[cylc] 32/33: Upstream release 6.7.4

Alastair McKinstry mckinstry at moszumanska.debian.org
Sat Dec 12 14:24:50 UTC 2015


This is an automated email from the git hooks/post-receive script.

mckinstry pushed a commit to branch debian/master
in repository cylc.

commit 7babe6bd64b261337e4dc356c76e7817d075e4ce
Author: Alastair McKinstry <mckinstry at debian.org>
Date:   Sat Dec 12 13:53:29 2015 +0000

    Upstream release 6.7.4
---
 admin/get-repo-version                             |   10 +-
 bin/cylc                                           |  192 +-
 bin/cylc-5to6                                      |    2 +-
 bin/cylc-broadcast                                 |  323 ++--
 bin/cylc-cat-log                                   |  476 +++--
 bin/cylc-cat-state                                 |   56 +-
 bin/cylc-check-software                            |    2 +-
 bin/cylc-check-triggering                          |   45 +-
 bin/cylc-check-versions                            |  166 +-
 bin/cylc-conditions                                |    2 +-
 bin/cylc-copy                                      |  122 +-
 bin/cylc-cycle-point                               |  338 ++--
 bin/cylc-depend                                    |   78 -
 bin/cylc-diff                                      |  180 +-
 bin/cylc-documentation                             |  247 +--
 bin/cylc-dump                                      |  127 +-
 bin/cylc-edit                                      |  269 ++-
 bin/cylc-email-suite                               |    2 +-
 bin/cylc-email-task                                |    2 +-
 bin/cylc-ext-trigger                               |   86 +
 bin/cylc-failed                                    |   73 -
 bin/cylc-get-directory                             |   32 +-
 bin/cylc-get-gui-config                            |   73 +-
 bin/cylc-get-site-config                           |   76 +-
 bin/cylc-get-suite-config                          |  127 +-
 bin/cylc-get-suite-version                         |   51 +-
 bin/cylc-gpanel                                    |   53 +-
 bin/cylc-graph                                     |  398 ++--
 bin/cylc-graph-diff                                |    2 +-
 bin/{cylc-gsummary => cylc-gscan}                  |   42 +-
 bin/cylc-gsummary                                  |   36 +-
 bin/cylc-gui                                       |  139 +-
 bin/cylc-hold                                      |  104 +-
 bin/cylc-import-examples                           |    2 +-
 bin/cylc-insert                                    |  104 +-
 bin/cylc-job-logs-retrieve                         |   81 +
 bin/cylc-job-submit                                |   10 +-
 bin/{cylc-version => cylc-jobs-kill}               |   39 +-
 bin/{cylc-reregister => cylc-jobs-poll}            |   41 +-
 bin/{cylc-job-submit => cylc-jobs-submit}          |   36 +-
 bin/cylc-jobscript                                 |   43 +-
 bin/cylc-kill                                      |   90 +-
 bin/cylc-list                                      |  162 +-
 bin/cylc-message                                   |   93 +-
 bin/cylc-monitor                                   |  418 ++---
 bin/cylc-nudge                                     |   70 +-
 bin/cylc-ping                                      |  103 +-
 bin/cylc-poll                                      |  108 +-
 bin/cylc-print                                     |  117 +-
 bin/cylc-purge                                     |   91 -
 bin/cylc-random                                    |   38 +-
 bin/cylc-refresh                                   |   77 +-
 bin/cylc-register                                  |   92 +-
 bin/cylc-release                                   |   91 +-
 bin/cylc-reload                                    |   63 +-
 bin/cylc-remove                                    |  105 +-
 bin/cylc-reregister                                |   38 +-
 bin/cylc-reset                                     |  107 +-
 bin/cylc-restart                                   |  275 ++-
 bin/cylc-run                                       |  145 +-
 bin/cylc-scan                                      |  336 +++-
 bin/cylc-search                                    |  110 +-
 bin/cylc-set-runahead                              |   77 +-
 bin/cylc-set-verbosity                             |   87 +-
 bin/cylc-show                                      |   57 +-
 bin/cylc-started                                   |   68 -
 bin/cylc-stop                                      |  266 ++-
 bin/cylc-submit                                    |  256 +--
 bin/cylc-succeeded                                 |   69 -
 bin/cylc-suite-state                               |  260 ++-
 bin/cylc-test-battery                              |   65 +-
 bin/cylc-test-db                                   |    2 +-
 bin/cylc-trigger                                   |  290 +--
 bin/cylc-unregister                                |  124 +-
 bin/cylc-upgrade-db                                |   72 +-
 bin/cylc-upgrade-run-dir                           |  105 +-
 bin/cylc-validate                                  |  125 +-
 bin/cylc-version                                   |   36 +-
 bin/cylc-view                                      |  250 +--
 bin/cylc-warranty                                  |   61 +-
 bin/gcapture                                       |   55 +-
 conf/cylc-mode.el                                  |    4 +-
 conf/cylc.lang                                     |    1 +
 conf/cylc.xml                                      |    1 +
 conf/gcylcrc/gcylc.rc.eg                           |    7 +-
 conf/gcylcrc/themes.rc                             |    6 +-
 dev/ToDo/ToDo.txt                                  |   10 -
 dev/bin/n-suites-start.sh                          |   37 +
 dev/bin/n-suites-stop.sh                           |   18 +
 dev/filewalk.py                                    |   34 -
 dev/suites/busy/suite.rc                           |    2 +-
 doc/changes-old.txt                                |    4 +-
 doc/changes.html                                   | 1905 ++++++++++++--------
 doc/cug.tex                                        |  816 ++++++---
 doc/development.tex                                |  233 ---
 doc/gcylcrc.tex                                    |   20 +-
 doc/gh-pages/index.html                            |   96 +-
 doc/gh-pages/screenshots/gcylc-text-view.png       |  Bin 78976 -> 63660 bytes
 doc/gh-pages/screenshots/gsummary.png              |  Bin 30301 -> 12577 bytes
 doc/graphics/png/orig/{gsummary.png => gscan.png}  |  Bin
 .../png/scaled/{gsummary.png => gscan.png}         |  Bin 30067 -> 29994 bytes
 doc/implementation.tex                             |  379 ----
 doc/siterc.tex                                     |  266 ++-
 doc/suiterc.tex                                    |  458 ++++-
 examples/admin/suite.rc                            |    2 +-
 examples/clock-expire/suite.rc                     |   20 +
 examples/demo/ecox/suite.rc                        |    2 +-
 examples/satellite/{ => ext-trigger}/suite.rc      |   76 +-
 examples/satellite/{ => task-polling}/suite.rc     |   31 +-
 examples/satellite/{ => task-retries}/suite.rc     |   61 +-
 lib/cylc/C3MRO.py                                  |   84 +-
 lib/cylc/CylcError.py                              |   14 +-
 lib/cylc/CylcOptionParsers.py                      |  316 ++--
 lib/cylc/LogDiagnosis.py                           |   68 +-
 lib/cylc/RunEventHandler.py                        |    9 +-
 lib/cylc/__init__.py                               |   39 +-
 lib/cylc/batch_sys_handlers/at.py                  |    3 +-
 lib/cylc/batch_sys_handlers/background.py          |   67 +-
 lib/cylc/batch_sys_handlers/loadleveler.py         |   21 +-
 lib/cylc/batch_sys_handlers/lsf.py                 |    6 +-
 lib/cylc/batch_sys_handlers/{pbs.py => moab.py}    |   25 +-
 lib/cylc/batch_sys_handlers/pbs.py                 |   10 +-
 lib/cylc/batch_sys_handlers/sge.py                 |    6 +-
 lib/cylc/batch_sys_handlers/slurm.py               |   11 +-
 lib/cylc/batch_sys_manager.py                      |  562 +++++-
 lib/cylc/batchproc.py                              |  112 --
 lib/cylc/broadcast_report.py                       |   53 +-
 lib/cylc/broker.py                                 |   19 +-
 lib/cylc/cfgspec/gcylc.py                          |  167 +-
 lib/cylc/cfgspec/globalcfg.py                      |  487 +++--
 lib/cylc/cfgspec/suite.py                          |  561 +++---
 lib/cylc/cfgspec/utils.py                          |   65 +
 lib/cylc/command_polling.py                        |   48 +-
 lib/cylc/command_prep.py                           |   74 -
 .../simplify.py => conditional_simplifier.py}      |   88 +-
 lib/cylc/config.py                                 | 1135 +++++++-----
 lib/cylc/cycling/__init__.py                       |   11 +
 lib/cylc/cycling/integer.py                        |   16 +-
 lib/cylc/cycling/iso8601.py                        |   37 +-
 lib/cylc/cylc_mode.py                              |   12 +-
 lib/cylc/cylc_pyro_client.py                       |   65 -
 lib/cylc/cylc_xdot.py                              |  162 +-
 lib/cylc/daemonize.py                              |   13 +-
 lib/cylc/dbstatecheck.py                           |   29 +-
 lib/cylc/dump.py                                   |   39 +-
 lib/cylc/envvar.py                                 |   27 +-
 lib/cylc/exceptions.py                             |   16 +-
 lib/cylc/execute.py                                |   21 +-
 lib/cylc/{owner.py => get_task_proxy.py}           |   20 +-
 lib/cylc/graphing.py                               |  111 +-
 lib/cylc/graphnode.py                              |   27 +-
 lib/cylc/gui/app_gcylc.py                          |  834 ++++-----
 lib/cylc/gui/color_rotator.py                      |   10 +-
 lib/cylc/gui/combo_logviewer.py                    |   53 +-
 lib/cylc/gui/cylc_logviewer.py                     |  112 +-
 lib/cylc/gui/dbchooser.py                          |  428 +++--
 lib/cylc/gui/dot_maker.py                          |    8 +-
 lib/cylc/gui/filtered_tailer.py                    |   54 -
 lib/cylc/gui/gcapture.py                           |  173 +-
 lib/cylc/gui/gpanel.py                             |  146 +-
 lib/cylc/gui/graph.py                              |   60 +-
 lib/cylc/gui/{gsummary.py => gscan.py}             |  695 +++----
 lib/cylc/gui/logviewer.py                          |  134 +-
 lib/cylc/gui/option_group.py                       |  111 +-
 lib/cylc/gui/tailer.py                             |  244 ++-
 lib/cylc/gui/updater.py                            |  488 +++--
 lib/cylc/gui/updater_dot.py                        |   83 +-
 lib/cylc/gui/updater_graph.py                      |   40 +-
 lib/cylc/gui/updater_tree.py                       |  245 +--
 lib/cylc/gui/util.py                               |   33 +-
 lib/cylc/gui/view_dot.py                           |  153 +-
 lib/cylc/gui/view_graph.py                         |   13 +-
 lib/cylc/gui/view_tree.py                          |  212 ++-
 lib/cylc/gui/warning_dialog.py                     |   34 +-
 lib/cylc/job_file.py                               |   79 +-
 lib/cylc/job_host.py                               |   62 +-
 lib/cylc/job_logs.py                               |  132 --
 lib/cylc/mkdir_p.py                                |   14 +-
 lib/cylc/mp_pool.py                                |  198 +-
 lib/cylc/multisubprocess.py                        |   25 +-
 lib/cylc/network/__init__.py                       |   86 +
 lib/cylc/network/client_reporter.py                |  131 ++
 lib/cylc/network/connection_validator.py           |  130 ++
 lib/cylc/network/ext_trigger.py                    |  160 ++
 lib/cylc/network/port_file.py                      |  136 ++
 lib/cylc/network/port_scan.py                      |  170 ++
 lib/cylc/network/pyro_base.py                      |  137 ++
 .../pyro_daemon.py}                                |   65 +-
 .../{broadcast.py => network/suite_broadcast.py}   |  271 ++-
 lib/cylc/network/suite_command.py                  |   89 +
 lib/cylc/network/suite_identifier.py               |   70 +
 lib/cylc/network/suite_info.py                     |   76 +
 .../suite_log.py}                                  |   34 +-
 .../{state_summary.py => network/suite_state.py}   |  188 +-
 lib/cylc/network/task_msgqueue.py                  |   53 +
 lib/cylc/output.py                                 |    6 +-
 lib/cylc/outputs.py                                |   60 +-
 lib/cylc/owner.py                                  |    9 +-
 lib/cylc/passphrase.py                             |  181 +-
 lib/cylc/port_file.py                              |  145 --
 lib/cylc/port_scan.py                              |  269 ---
 lib/cylc/prerequisite.py                           |  175 ++
 lib/cylc/prerequisites/__init__.py                 |    0
 lib/cylc/prerequisites/conditionals.py             |  178 --
 lib/cylc/prerequisites/plain_prerequisites.py      |  120 --
 lib/cylc/prerequisites/prerequisites.py            |   98 -
 lib/cylc/print_tree.py                             |   20 +-
 lib/cylc/prompt.py                                 |   36 +-
 lib/cylc/registration.py                           |  215 ++-
 lib/cylc/regpath.py                                |   36 +-
 lib/cylc/regprompt.py                              |    6 +-
 lib/cylc/remote.py                                 |   21 +-
 lib/cylc/rolling_archive.py                        |   33 +-
 lib/cylc/run.py                                    |   52 +-
 lib/cylc/rundb.py                                  |  865 +++++----
 lib/cylc/scheduler.py                              | 1162 ++++++------
 lib/cylc/strftime.py                               |   37 +-
 lib/cylc/suite_cmd_interface.py                    |   40 -
 lib/cylc/suite_host.py                             |   44 +-
 lib/cylc/suite_id.py                               |   36 -
 lib/cylc/suite_info_interface.py                   |   32 -
 lib/cylc/suite_logging.py                          |   53 +-
 lib/cylc/suite_output.py                           |   51 +-
 lib/cylc/suite_state_dumping.py                    |   33 +-
 lib/cylc/tail.py                                   |   25 +-
 lib/cylc/task_id.py                                |    2 +-
 lib/cylc/task_message.py                           |  395 ++--
 lib/cylc/task_output_logs.py                       |   47 +-
 lib/cylc/task_pool.py                              |  928 +++++++---
 lib/cylc/task_proxy.py                             | 1588 +++++++++-------
 lib/cylc/task_receiver.py                          |   37 -
 lib/cylc/task_state.py                             |  181 +-
 lib/cylc/taskdef.py                                |   34 +-
 lib/cylc/time_parser.py                            |   47 +-
 lib/cylc/trigger.py                                |   34 +-
 lib/cylc/which.py                                  |    1 +
 lib/parsec/Jinja2Support.py                        |   19 +-
 lib/parsec/OrderedDict.py                          |   86 +
 lib/parsec/__init__.py                             |    6 +
 lib/parsec/config.py                               |   83 +-
 lib/parsec/fileparse.py                            |   77 +-
 lib/parsec/include.py                              |   22 +-
 lib/parsec/tests/nullcfg/bin/missing.py            |   22 -
 lib/parsec/tests/synonyms/bin/synonyms.py          |    2 +-
 lib/parsec/upgrade.py                              |   10 +-
 lib/parsec/util.py                                 |   94 +-
 lib/parsec/validate.py                             |   32 +-
 lib/xdot.py                                        |   10 +-
 tests/authentication/00-identity.t                 |   70 +
 tests/authentication/01-description.t              |   78 +
 tests/authentication/02-state-totals.t             |   80 +
 tests/authentication/03-full-read.t                |   82 +
 tests/authentication/04-shutdown.t                 |   77 +
 tests/authentication/05-full-control.t             |   77 +
 tests/authentication/06-suite-override.t           |   79 +
 tests/authentication/07-back-compat.t              |  106 ++
 tests/authentication/basic/suite.rc                |   14 +
 tests/authentication/override/suite.rc             |   17 +
 tests/{purge => authentication}/test_header        |    0
 tests/broadcast/00-simple.t                        |   62 +-
 tests/broadcast/01-dependencies/reference.log      |  105 --
 tests/broadcast/01-dependencies/suite.rc           |   14 -
 tests/{events/00-suite.t => broadcast/08-space.t}  |   12 +-
 tests/broadcast/08-space/reference.log             |    6 +
 tests/broadcast/08-space/suite.rc                  |   25 +
 .../{events/00-suite.t => clock-expire/00-basic.t} |   13 +-
 tests/clock-expire/00-basic/suite.rc               |   33 +
 tests/{pyc => clock-expire}/test_header            |    0
 tests/cyclers/00-daily.t                           |    0
 tests/cyclers/23-multidaily_local.t                |    0
 tests/cyclers/24-360_calendar.t                    |    0
 tests/cyclers/25-no_initial_cycle_point.t          |    0
 tests/cyclers/26-no_final_cycle_point.t            |    0
 .../cyclers/27-no_initial_but_final_cycle_point.t  |    0
 tests/cyclers/29-0000_rollunder.t                  |    0
 tests/cyclers/30-9999_rollover.t                   |    2 +-
 tests/cyclers/34-implicit-back-compat.t            |    0
 tests/cyclers/35-implicit-disallowed.t             |    0
 tests/cyclers/{22-integer1.t => 40-integer1.t}     |    0
 ...initial_immortal.t => 41-r1_initial_immortal.t} |    0
 ...two_step.t => 42-back_comp_start_up_two_step.t} |    0
 ..._up_simple.t => 43-back_comp_start_up_simple.t} |    0
 tests/cyclers/9999_rollover/reference.log          |    8 -
 tests/cyclers/9999_rollover/suite.rc               |   11 +-
 tests/cyclers/integer1/suite.rc                    |    2 +-
 tests/cyclers/r1_at_icp_or/reference.log           |    2 +-
 tests/cyclers/r1_at_icp_or/suite.rc                |    2 +
 tests/cyclers/r1_initial_immortal/suite.rc         |    2 +
 tests/cylc-5to6/00-simple-start-up.t               |    2 +-
 tests/cylc-5to6/01-single-cycler.t                 |    2 +-
 tests/cylc-cat-log/00-local.t                      |    8 +-
 tests/cylc-cat-log/01-remote.t                     |    8 +-
 tests/cylc-cat-log/01-remote/suite.rc              |    2 +-
 .../02-remote-custom-runtime-viewer-pbs.t          |   61 +
 .../reference.log                                  |    6 +
 .../02-remote-custom-runtime-viewer-pbs/suite.rc   |   33 +
 .../00-suite.t => cylc-cat-log/03-bad-suite.t}     |   25 +-
 .../00-basic.t => cylc-cat-log/04-local-tail.t}    |   40 +-
 tests/cylc-cat-log/04-local-tail/bin/my-tailer.sh  |    5 +
 tests/cylc-cat-log/04-local-tail/suite.rc          |   14 +
 .../01-remote.t => cylc-cat-log/05-remote-tail.t}  |   42 +-
 tests/cylc-cat-log/05-remote-tail/bin/my-tailer.sh |    5 +
 tests/cylc-cat-log/05-remote-tail/suite.rc         |   17 +
 tests/cylc-get-config/00-simple/section1.stdout    |   15 +-
 tests/cylc-get-config/00-simple/section2.stdout    |  260 ++-
 tests/cylc-graph-diff/00-simple.t                  |    4 +-
 tests/cylc-insert/01-insert-bad-cycle-point.t      |    2 +-
 tests/cylc-insert/02-insert-bad-stop-cycle-point.t |    2 +-
 .../04-insert-family.t}                            |    6 +-
 tests/cylc-insert/insert-family/reference.log      |   63 +
 tests/cylc-insert/insert-family/suite.rc           |   33 +
 tests/cylc-job-poll/02-loadleveler.t               |    5 +-
 tests/cylc-job-poll/03-slurm.t                     |    5 +-
 tests/cylc-job-poll/04-pbs.t                       |    5 +-
 tests/cylc-job-poll/05-lsf.t                       |    7 +-
 tests/cylc-kill/00-kill-multi-hosts.t              |   56 +
 tests/cylc-kill/00-kill-multi-hosts/reference.log  |   10 +
 tests/cylc-kill/00-kill-multi-hosts/suite.rc       |   26 +
 tests/{purge => cylc-kill}/test_header             |    0
 tests/cylc-message/00-ssh.t                        |   54 +
 tests/cylc-message/00-ssh/reference.log            |    5 +
 tests/cylc-message/00-ssh/suite.rc                 |   15 +
 tests/{purge => cylc-message}/test_header          |    0
 tests/cylc-poll/00-basic.t                         |    0
 tests/cylc-poll/01-task-failed.t                   |    0
 tests/cylc-poll/02-task-submit-failed.t            |    0
 tests/cylc-poll/03-poll-all.t                      |    0
 tests/cylc-poll/04-poll-multi-hosts.t              |   56 +
 tests/cylc-poll/04-poll-multi-hosts/reference.log  |   11 +
 tests/cylc-poll/04-poll-multi-hosts/suite.rc       |   52 +
 .../cylc-poll/05-poll-multi-messages.t             |   20 +-
 .../cylc-poll/05-poll-multi-messages/reference.log |    8 +
 tests/cylc-poll/05-poll-multi-messages/suite.rc    |   45 +
 .../06-loadleveler.t}                              |   20 +-
 tests/cylc-poll/06-loadleveler/reference.log       |   40 +
 .../06-loadleveler}/suite.rc                       |   26 +-
 tests/cylc-poll/07-pbs.t                           |    1 +
 tests/cylc-poll/07-pbs/reference.log               |    5 +
 tests/cylc-poll/07-pbs/suite.rc                    |   25 +
 tests/cylc-poll/08-slurm.t                         |    1 +
 tests/cylc-poll/08-slurm/reference.log             |    5 +
 tests/cylc-poll/08-slurm/suite.rc                  |   25 +
 tests/cylc-poll/09-lsf.t                           |    1 +
 tests/cylc-poll/09-lsf/reference.log               |    5 +
 tests/cylc-poll/09-lsf/suite.rc                    |   25 +
 tests/cylc-scan/00-simple.t                        |    1 +
 tests/cylc-scan/01-hosts.t                         |    4 +-
 tests/cylc-submit/00-bg.t                          |   13 +-
 tests/cylc-submit/00-bg/suite.rc                   |    4 +-
 tests/cylc-trigger/03-edit-run.t                   |    2 +-
 tests/cylc-trigger/03-edit-run/suite.rc            |    6 +-
 tests/cylc-trigger/basic/reference.log             |    2 +-
 tests/database/00-simple.t                         |   67 +-
 tests/database/00-simple/schema.out                |    6 +
 .../db-events => 00-simple/select-task-events.out} |    0
 tests/database/00-simple/select-task-job-logs.out  |   15 +
 tests/database/00-simple/select-task-jobs.out      |    3 +
 .../db-states => 00-simple/select-task-states.out} |    0
 tests/database/{simple => 00-simple}/suite.rc      |    2 +-
 .../05-activity-log.t => database/01-broadcast.t}  |   47 +-
 tests/database/01-broadcast/reference.log          |    7 +
 tests/database/01-broadcast/suite.rc               |   20 +
 tests/{events/00-suite.t => database/02-retry.t}   |   26 +-
 tests/database/02-retry/reference.log              |    8 +
 tests/database/02-retry/suite.rc                   |   15 +
 tests/database/03-remote.t                         |   56 +
 tests/database/03-remote/reference.log             |    6 +
 tests/database/03-remote/suite.rc                  |   20 +
 .../00-suite.t => database/04-lock-recover.t}      |   33 +-
 tests/database/04-lock-recover/bin/cylc-db-lock    |   23 +
 tests/database/04-lock-recover/reference.log       |   16 +
 tests/database/04-lock-recover/suite.rc            |   25 +
 .../00-suite.t => database/05-lock-recover-100.t}  |   32 +-
 .../database/05-lock-recover-100/bin/cylc-db-lock  |   25 +
 tests/database/05-lock-recover-100/reference.log   |   15 +
 tests/database/05-lock-recover-100/suite.rc        |   19 +
 tests/database/simple/db-schema                    |    3 -
 tests/deprecations/00-all/suite.rc                 |    2 +
 tests/directives/00-loadleveler.t                  |   13 +-
 tests/documentation/00-make.t                      |    2 +-
 tests/events/00-suite.t                            |    0
 tests/events/01-task.t                             |    1 +
 tests/events/02-multi.t                            |    0
 tests/events/02-multi/reference.log                |    2 +-
 tests/events/03-timeout.t                          |    0
 tests/events/04-timeout-ref-live.t                 |    0
 tests/events/05-timeout-ref-dummy.t                |    0
 tests/events/06-timeout-ref-simulation.t           |    0
 tests/events/07-task-iso.t                         |    1 +
 tests/events/08-task-event-handler-retry.t         |   54 +
 .../bin/hello-event-handler                        |   20 +
 .../08-task-event-handler-retry/reference.log      |    6 +
 tests/events/08-task-event-handler-retry/suite.rc  |   31 +
 tests/events/09-task-event-mail.t                  |   51 +
 tests/events/09-task-event-mail/reference.log      |    6 +
 tests/events/09-task-event-mail/suite.rc           |   21 +
 tests/events/10-task-event-job-logs-retrieve.t     |  104 ++
 .../10-task-event-job-logs-retrieve/reference.log  |    7 +
 .../10-task-event-job-logs-retrieve/suite.rc       |   21 +
 .../events/11-cycle-task-event-job-logs-retrieve.t |   82 +
 .../reference.log                                  |   10 +
 .../11-cycle-task-event-job-logs-retrieve/suite.rc |   29 +
 tests/events/12-task-event-handler-retry-globalcfg |    1 +
 .../events/12-task-event-handler-retry-globalcfg.t |    1 +
 tests/events/13-task-event-mail-globalcfg          |    1 +
 tests/events/13-task-event-mail-globalcfg.t        |    1 +
 .../14-task-event-job-logs-retrieve-globalcfg      |    1 +
 .../14-task-event-job-logs-retrieve-globalcfg.t    |    1 +
 .../15-host-task-event-handler-retry-globalcfg     |    1 +
 .../15-host-task-event-handler-retry-globalcfg.t   |   74 +
 .../16-task-event-job-logs-register-globalcfg.t    |   63 +
 .../reference.log                                  |    5 +
 .../suite.rc                                       |   23 +
 .../events/17-task-event-job-logs-retrieve-command |    1 +
 .../17-task-event-job-logs-retrieve-command.t      |   75 +
 tests/events/task-iso/bin/log-check.sh             |    4 +-
 tests/events/task-iso/events.log                   |    2 +-
 tests/events/task-iso/suite.rc                     |   10 +-
 tests/events/task/bin/log-check.sh                 |    4 +-
 tests/events/task/suite.rc                         |    8 +-
 .../00-suite.t => ext-trigger/00-satellite.t}      |   10 +-
 tests/ext-trigger/00-satellite/reference.log       |  204 +++
 .../ext-trigger/00-satellite}/suite.rc             |   84 +-
 .../00-suite.t => ext-trigger/01-no-nudge.t}       |   17 +-
 tests/ext-trigger/01-no-nudge/suite.rc             |   29 +
 tests/{purge => ext-trigger}/test_header           |    0
 tests/graph-equivalence/00-oneline.t               |    1 +
 tests/graph-equivalence/01-twolines.t              |    1 +
 tests/graph-equivalence/02-splitline.t             |    1 +
 tests/graph-equivalence/03-multiline_and1.t        |    1 +
 tests/graph-equivalence/04-multiline_and2.t        |    3 +-
 tests/graph-equivalence/multiline_and_refs/c-ref-2 |    5 +
 .../05-suicide-family.t}                           |   14 +-
 tests/graphing/05-suicide-family/graph.plain.ref   |    2 +
 .../05-suicide-family/graph.plain.suicide.ref      |    5 +
 tests/graphing/05-suicide-family/suite.rc          |    8 +
 tests/graphing/06-family-or.t                      |   64 +
 .../07-stop-at-final-point.t}                      |   16 +-
 .../07-stop-at-final-point/graph.plain.ref         |   23 +
 tests/graphing/07-stop-at-final-point/suite.rc     |   18 +
 tests/hold-release/12-hold-then-retry/suite.rc     |    2 +-
 .../17-hold-after-point.t}                         |    9 +-
 tests/hold-release/hold-after-point/reference.log  |   36 +
 tests/hold-release/hold-after-point/suite.rc       |   29 +
 tests/inheritance/00-namespace-list.t              |   10 +-
 tests/inheritance/01-circular.t                    |    2 +-
 tests/integer-cycling/00-satellite/suite.rc        |    2 +-
 .../{cylc-scan/00-simple.t => jinja2/07-filters.t} |    8 +-
 tests/jinja2/filters/Jinja2Filters/hello.py        |    2 +
 tests/jinja2/filters/Jinja2Filters/truly.py        |    2 +
 tests/jinja2/filters/suite.rc                      |   10 +
 tests/jinja2/filters/suite.rc-expanded             |    9 +
 tests/jinja2/include/suite.rc-expanded             |    2 +-
 tests/jinja2/simple/suite.rc-expanded              |    2 +-
 tests/job-kill/01-remote.t                         |    5 +-
 tests/job-kill/01-remote/suite.rc                  |    2 -
 tests/job-kill/02-loadleveler.t                    |   11 +-
 tests/job-kill/02-loadleveler/suite.rc             |    2 +-
 tests/job-kill/03-slurm/suite.rc                   |    2 +-
 tests/job-kill/04-pbs/suite.rc                     |    2 +-
 tests/job-poll/00-late/suite.rc                    |    2 +-
 tests/job-submission/02-job-nn-remote-host.t       |    4 +-
 .../03-job-nn-remote-host-with-shared-fs.t         |    5 +-
 tests/job-submission/05-activity-log.t             |   13 +-
 tests/job-submission/06-garbage/suite.rc           |    2 +-
 tests/job-submission/07-multi.t                    |   64 +
 tests/job-submission/07-multi/reference.log        |   46 +
 tests/job-submission/07-multi/suite.rc             |   33 +
 tests/jobscript/00-torture.t                       |    1 -
 tests/jobscript/00-torture/foo.ref-jobfile         |   27 +-
 tests/lib/bash/test_header                         |   48 +-
 .../00-client.t}                                   |   39 +-
 tests/logging/00-client/suite.rc                   |   17 +
 tests/{purge => logging}/test_header               |    0
 tests/message-triggers/01-new/reference.log        |   49 -
 tests/message-triggers/01-new/suite.rc             |   23 +-
 .../02-alternate.t}                                |    2 +-
 tests/message-triggers/02-alternate/reference.log  |   23 +
 tests/message-triggers/02-alternate/suite.rc       |   20 +
 .../03-placeholder.t}                              |    9 +-
 tests/message-triggers/03-placeholder/suite.rc     |   12 +
 tests/{events/00-suite.t => pep8/00-bin-lib.t}     |   21 +-
 tests/{purge => pep8}/test_header                  |    0
 tests/purge/00-purge.t                             |   31 -
 tests/purge/purge/bin/A.sh                         |   27 -
 tests/purge/purge/bin/B.sh                         |   26 -
 tests/purge/purge/bin/C.sh                         |   26 -
 tests/purge/purge/bin/ColdA.sh                     |    8 -
 tests/purge/purge/bin/ColdB.sh                     |    8 -
 tests/purge/purge/bin/ColdC.sh                     |    8 -
 tests/purge/purge/bin/D.sh                         |   20 -
 tests/purge/purge/bin/E.sh                         |   17 -
 tests/purge/purge/bin/F.sh                         |   16 -
 tests/purge/purge/bin/X.sh                         |    8 -
 tests/purge/purge/bin/clean-workspace.sh           |   27 -
 tests/purge/purge/reference.log                    |  179 --
 tests/purge/purge/suite.rc                         |  135 --
 tests/registration/00-simple.t                     |    0
 .../02-corrupted.t}                                |   50 +-
 tests/reload/11-garbage.t                          |    2 +-
 tests/remote/00-basic.t                            |    4 +-
 tests/restart/01-broadcast.t                       |  192 +-
 tests/restart/02-failed.t                          |  136 +-
 tests/restart/03-retrying.t                        |  155 +-
 tests/restart/04-running.t                         |  150 +-
 tests/restart/05-submit-failed.t                   |  142 +-
 tests/restart/06-succeeded.t                       |  137 +-
 tests/restart/07-waiting.t                         |  135 +-
 .../{11-bad-state-dump.t => 08-bad-state-dump.t}   |    0
 tests/restart/08-retrying-loadleveler.t            |   40 -
 tests/restart/{12-reload.t => 09-reload.t}         |    0
 tests/restart/09-running-loadleveler.t             |   42 -
 .../{13-pre-initial-2.t => 10-pre-initial-2.t}     |    0
 tests/restart/10-submit-failed-loadleveler.t       |   40 -
 ...-back-comp-restart.t => 11-back-comp-restart.t} |    0
 .../{21-deleted-logs.t => 12-deleted-logs.t}       |    0
 .../{22-bad-job-host.t => 13-bad-job-host.t}       |    7 +-
 tests/restart/14-multicycle.t                      |  121 ++
 tests/restart/15-retrying-slurm.t                  |    1 -
 tests/restart/16-retrying-pbs.t                    |    1 -
 tests/restart/17-running-slurm.t                   |    1 -
 tests/restart/18-running-pbs.t                     |    1 -
 tests/restart/19-submit-failed-slurm.t             |    1 -
 tests/restart/20-submit-failed-pbs.t               |    1 -
 tests/restart/back-comp-restart/state              |    2 +-
 .../{22-bad-job-host => bad-job-host}/suite.rc     |    8 +-
 tests/restart/broadcast/suite.rc                   |  105 +-
 tests/restart/failed/suite.rc                      |   94 +-
 tests/restart/lib/suite-runtime-restart.rc         |   32 +
 .../multicycle/bin/shutdown_this_suite_hook        |    2 +
 tests/restart/multicycle/suite.rc                  |   53 +
 tests/restart/reload/suite.rc                      |    2 +-
 tests/restart/retrying/suite.rc                    |  116 +-
 tests/restart/running/suite.rc                     |  101 +-
 tests/restart/submit-failed/suite.rc               |  107 +-
 tests/restart/succeeded/suite.rc                   |   94 +-
 tests/restart/waiting/suite.rc                     |   92 +-
 tests/runahead/no_final/suite.rc                   |    6 +-
 tests/special/07-clock-triggered-360.t             |    2 +-
 .../{pyc/00-simple.t => suite-state/03-options.t}  |   11 +-
 .../00-basic.t => suite-state/04-template.t}       |   36 +-
 tests/suite-state/options/reference.log            |   49 +
 tests/suite-state/options/suite.rc                 |   22 +
 tests/suite-state/template/reference.log           |   19 +
 tests/suite-state/template/suite.rc                |   16 +
 tests/suite-state/template_ref/reference.log       |   19 +
 tests/suite-state/template_ref/suite.rc            |   16 +
 tests/vacation/00-sigusr1.t                        |  130 +-
 tests/vacation/01-loadleveler.t                    |    8 +-
 tests/validate/09-include-missing.t                |    5 +-
 tests/validate/10-bad-sequence-interval.t          |    1 +
 tests/validate/11-bad-sequence-2-digit-century.t   |    1 +
 tests/validate/11-fail-mixed-syntax-formats-1.t    |    1 +
 tests/validate/12-fail-mixed-syntax-formats-2.t    |    1 +
 tests/validate/13-fail-mixed-syntax-formats-3.t    |    1 +
 tests/validate/14-fail-mixed-syntax-formats-4.t    |    1 +
 tests/validate/15-fail-mixed-syntax-formats-5.t    |    1 +
 tests/validate/16-fail-mixed-syntax-formats-6.t    |    1 +
 tests/validate/17-fail-mixed-syntax-formats-7.t    |    1 +
 tests/validate/18-fail-mixed-syntax-formats-8.t    |    1 +
 tests/validate/19-fail-mixed-syntax-formats-9.t    |    1 +
 tests/validate/20-fail-no-scheduling.t             |    3 +-
 tests/validate/21-fail-no-dependencies.t           |    1 +
 tests/validate/21-fail-no-dependencies/suite.rc    |    1 +
 tests/validate/22-fail-no-graph-async.t            |    1 +
 tests/validate/23-fail-no-graph-sequence.t         |    1 +
 tests/validate/24-fail-year-bounds.t               |    1 +
 tests/validate/25-fail-mixed-syntax-formats-10.t   |    1 +
 tests/validate/26-fail-initial-greater-final.t     |    1 +
 .../validate/28-fail-graph-double-ampsand/suite.rc |   14 -
 .../28-fail-graph-double-conditionals.t}           |   65 +-
 .../30-fail-max-active-cycle-points-zero.t         |    1 +
 tests/validate/32-fail-not-integer.t               |    1 +
 tests/validate/33-fail-graph-bracket-missing.t     |    1 +
 ...aph-double-ampsand.t => 34-fail-graph-cycles.t} |    7 +-
 tests/validate/34-fail-graph-cycles/suite.rc       |    4 +
 ...graph-double-ampsand.t => 35-fail-self-edges.t} |    5 +-
 tests/validate/35-fail-self-edges/suite.rc         |    8 +
 .../36-pass-special-tasks-non-word-names.t}        |   33 +-
 ...cle-points-zero.t => 37-fail-double-runahead.t} |    9 +-
 tests/validate/37-fail-double-runahead/suite.rc    |    7 +
 .../38-clock-trigger-task-not-defined.t}           |   29 +-
 ...-year-bounds.t => 39-degenerate-point-format.t} |    3 +-
 tests/validate/39-degenerate-point-format/suite.rc |    7 +
 .../00-suite.t => validate/40-fail-suicide-left.t} |   21 +-
 .../41-mixed-syntax-global-suite.t}                |   45 +-
 ...ry.t => 42-jinja2-template-syntax-error-main.t} |   14 +-
 .../42-jinja2-template-syntax-error-main/suite.rc  |   10 +
 ...43-jinja2-template-syntax-error-cylc-include.t} |   14 +-
 .../suite-includeme.rc                             |    4 +
 .../suite.rc                                       |    8 +
 ...4-jinja2-template-syntax-error-jinja-include.t} |   11 +-
 .../suite-includeme.rc                             |    3 +
 .../suite.rc                                       |    7 +
 ...t-century.t => 45-jinja2-template-error-main.t} |   12 +-
 .../45-jinja2-template-error-main/suite.rc         |    8 +
 ...it-century.t => 46-jinja2-template-not-found.t} |   11 +-
 .../validate/46-jinja2-template-not-found/suite.rc |    6 +
 ...ce-2-digit-century.t => 47-jinja2-type-error.t} |    9 +-
 tests/validate/47-jinja2-type-error/suite.rc       |    6 +
 ...l-year-bounds.t => 48-fail-bad-vis-nod-attrs.t} |    8 +-
 tests/validate/48-fail-bad-vis-nod-attrs/suite.rc  |    8 +
 .../49-fail-no-graph.t}                            |   33 +-
 603 files changed, 23658 insertions(+), 17355 deletions(-)

diff --git a/admin/get-repo-version b/admin/get-repo-version
index 13f0523..b430b3c 100755
--- a/admin/get-repo-version
+++ b/admin/get-repo-version
@@ -31,13 +31,11 @@ VN=$(git describe --abbrev=4 --tags HEAD 2>/dev/null)
 case "$VN" in
     *$LF*) (exit 1) ;;
     [0-9]*)
-        # If uncommited changes exist append "-dirty".
+        # If uncommitted changes exist append "-dirty".
 
-        # The git update-index and diff-index expect a working tree;
-        # notably they don't work in a detached checkout from a bare
-        # repo - so send errors to /dev/null and ignore.
-        git update-index -q --refresh 2> /dev/null 
-        test -z "$(git diff-index --name-only HEAD -- 2> /dev/null)" || VN="$VN-dirty" ;;
+        if [[ -n "$(git status --untracked-files=no --porcelain)" ]]; then
+            VN="$VN-dirty"
+        fi
 esac
 # echo to stdout
 echo "$VN"
diff --git a/bin/cylc b/bin/cylc
index f69f5f7..614db63 100755
--- a/bin/cylc
+++ b/bin/cylc
@@ -1,6 +1,5 @@
 #!/usr/bin/env python
 
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
 #
 # This program is free software: you can redistribute it and/or modify
@@ -16,48 +15,63 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, re, sys
-import subprocess
+import os
+import re
+import sys
+
+
+def prelude():
+    """Ensure cylc library is at the front of "sys.path"."""
+    lib = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)), '..', 'lib')
+    if lib in sys.path:
+        sys.path.remove(lib)
+    sys.path.insert(0, lib)
+
+
+prelude()
 
-sys.path.append(os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../lib')
 
 try:
     os.getcwd()
-except OSError,x:
+except OSError as exc:
     # The current working directory has been deleted (or filesystem
     # problems of some kind...). This results in Pyro not being found,
     # immediately below. We cannot just chdir to $HOME as gcylc does
     # because that would break relative directory path command arguments
     # (cylc reg SUITE PATH).
-    print >> sys.stderr, x
-    raise SystemExit('ERROR: your current working directory does not exist!')
+    sys.exit(exc)
 
-from cylc.version import CYLC_VERSION
+# Import cylc to initialise CYLC_DIR and the path for python (__init__.py).
+import cylc
 from parsec.OrderedDict import OrderedDict
 
+
 class CommandError(Exception):
+
     def __init__(self, msg):
         self.msg = msg
+
     def __str__(self):
         return repr(self.msg)
 
+
 class CommandNotFoundError(CommandError):
     pass
 
+
 class CommandNotUniqueError(CommandError):
     pass
 
+
 def is_help(str):
-    if str == '-h' or \
-        str == '--help' or \
-        str == '--hlep' or \
-        str == 'help' or \
-        str == 'hlep' or \
-        str == '?':
+    if (str == '-h' or str == '--help' or str == '--hlep' or str == 'help' or
+            str == 'hlep' or str == '?'):
         return True
     else:
         return False
 
+
 def match_dict(abbrev, categories, title):
     # allow any unique abbreviation to cylc categories
     matches = []
@@ -67,31 +81,32 @@ def match_dict(abbrev, categories, title):
                 if cat not in matches:
                     matches.append(cat)
     if len(matches) == 0:
-        raise CommandNotFoundError, title + ' not found: ' + abbrev
+        raise CommandNotFoundError(title + ' not found: ' + abbrev)
     elif len(matches) > 1:
         # multiple matches
         res = ''
         for cat in matches:
             res += ' ' + '|'.join(categories[cat])
-        raise CommandNotUniqueError, title + ' "' + abbrev + '" not unique:' + res
+        raise CommandNotUniqueError(
+            title + ' "' + abbrev + '" not unique:' + res)
     else:
         return matches[0]
 
+
 def match_command(abbrev):
     # allow any unique abbreviation to commands when no category is specified
     matches = []
     finished_matching = False
-    for dct in [
-        admin_commands,
-        license_commands,
-        database_commands,
-        preparation_commands,
-        information_commands,
-        discovery_commands,
-        control_commands,
-        utility_commands,
-        hook_commands,
-        task_commands]:
+    for dct in [admin_commands,
+                license_commands,
+                database_commands,
+                preparation_commands,
+                information_commands,
+                discovery_commands,
+                control_commands,
+                utility_commands,
+                hook_commands,
+                task_commands]:
         for com in dct.keys():
             if com == abbrev:
                 matches = [com]
@@ -104,16 +119,18 @@ def match_command(abbrev):
             if finished_matching:
                 break
     if len(matches) == 0:
-        raise CommandNotFoundError, 'COMMAND not found: ' + abbrev
+        raise CommandNotFoundError('COMMAND not found: ' + abbrev)
     elif len(matches) > 1:
         # multiple matches
         res = ''
         for com in matches:
             res += ' ' + '|'.join(all_commands[com])
-        raise CommandNotUniqueError, 'COMMAND "' + abbrev + '" not unique:' + res
+        raise CommandNotUniqueError(
+            'COMMAND "' + abbrev + '" not unique:' + res)
     else:
         return matches[0]
 
+
 def pretty_print(incom, choose_dict, indent=True, numbered=False, sort=False):
     # pretty print commands or topics from a dict:
     # (com[item] = description)
@@ -152,7 +169,10 @@ def pretty_print(incom, choose_dict, indent=True, numbered=False, sort=False):
             else:
                 digit = str(count)
             print digit + '/',
-        print label[item], '.'*(longest-len(label[item])) + '...', incom[item]
+        print (
+            label[item],
+            '.' * (longest - len(label[item])) + '...',
+            incom[item])
 
 # BEGIN MAIN
 
@@ -172,7 +192,7 @@ categories['license'] = ['license', 'GPL']
 
 information_commands = OrderedDict()
 
-information_commands['gsummary'] = ['gsummary']
+information_commands['gscan'] = ['gscan', 'gsummary']
 information_commands['gpanel'] = ['gpanel']
 information_commands['gui'] = ['gui', 'gcylc']
 information_commands['list'] = ['list', 'ls']
@@ -180,13 +200,15 @@ information_commands['dump'] = ['dump']
 information_commands['cat-state'] = ['cat-state']
 information_commands['show'] = ['show']
 information_commands['cat-log'] = ['cat-log', 'log']
-information_commands['get-suite-version'] = ['get-suite-version', 'get-cylc-version']
+information_commands['get-suite-version'] = [
+    'get-suite-version', 'get-cylc-version']
 information_commands['version'] = ['version']
 
 information_commands['documentation'] = ['documentation', 'browse']
 information_commands['monitor'] = ['monitor']
 information_commands['get-suite-config'] = ['get-suite-config', 'get-config']
-information_commands['get-site-config'] = ['get-site-config', 'get-global-config']
+information_commands['get-site-config'] = [
+    'get-site-config', 'get-global-config']
 information_commands['get-gui-config'] = ['get-gui-config']
 
 control_commands = OrderedDict()
@@ -202,19 +224,19 @@ control_commands['insert'] = ['insert']
 control_commands['remove'] = ['remove']
 control_commands['poll'] = ['poll']
 control_commands['kill'] = ['kill']
-control_commands['purge'] = ['purge']
 control_commands['hold'] = ['hold']
 control_commands['release'] = ['release', 'unhold']
 control_commands['reset'] = ['reset']
 control_commands['nudge'] = ['nudge']
 control_commands['reload'] = ['reload']
-control_commands['depend'] = ['depend']
 control_commands['set-runahead'] = ['set-runahead']
 control_commands['set-verbosity'] = ['set-verbosity']
 control_commands['broadcast'] = ['broadcast', 'bcast']
+control_commands['ext-trigger'] = ['ext-trigger', 'external-trigger']
 
 utility_commands = OrderedDict()
-utility_commands['cycle-point'] = ['cycle-point', 'cyclepoint', 'datetime','cycletime']
+utility_commands['cycle-point'] = [
+    'cycle-point', 'cyclepoint', 'datetime', 'cycletime']
 utility_commands['random'] = ['random', 'rnd']
 utility_commands['scp-transfer'] = ['scp-transfer']
 utility_commands['suite-state'] = ['suite-state']
@@ -222,6 +244,7 @@ utility_commands['suite-state'] = ['suite-state']
 hook_commands = OrderedDict()
 hook_commands['email-suite'] = ['email-suite']
 hook_commands['email-task'] = ['email-task']
+hook_commands['job-logs-retrieve'] = ['job-logs-retrieve']
 hook_commands['check-triggering'] = ['check-triggering']
 
 admin_commands = OrderedDict()
@@ -238,7 +261,7 @@ license_commands['conditions'] = ['conditions']
 
 database_commands = OrderedDict()
 database_commands['register'] = ['register']
-database_commands['reregister'] = ['reregister','rename']
+database_commands['reregister'] = ['reregister', 'rename']
 database_commands['unregister'] = ['unregister']
 database_commands['copy'] = ['copy', 'cp']
 database_commands['print'] = ['print']
@@ -264,12 +287,12 @@ discovery_commands['check-versions'] = ['check-versions']
 
 task_commands = OrderedDict()
 task_commands['submit'] = ['submit', 'single']
-task_commands['started'] = ['started', 'task-started']
 task_commands['message'] = ['message', 'task-message']
-task_commands['succeeded'] = ['succeeded', 'task-succeeded']
-task_commands['failed'] = ['failed', 'task-failed']
-task_commands['job-poll'] = ['job-poll']
+task_commands['jobs-kill'] = ['jobs-kill']
+task_commands['jobs-poll'] = ['jobs-poll']
+task_commands['jobs-submit'] = ['jobs-submit']
 task_commands['job-kill'] = ['job-kill']
+task_commands['job-poll'] = ['job-poll']
 task_commands['job-submit'] = ['job-submit']
 
 all_commands = OrderedDict()
@@ -293,7 +316,7 @@ cycling weather and climate forecasting suites and related processing
 (but it can also be used for one-off workflows of non-cycling tasks).
 For detailed documentation see the Cylc User Guide (cylc doc --help).
 
-Version """ + CYLC_VERSION + """
+Version __CYLC_VERSION__
 
 The graphical user interface for cylc is "gcylc" (a.k.a. "cylc gui").
 
@@ -313,17 +336,17 @@ USAGE:
 
 # topic summaries
 catsum = OrderedDict()
-catsum['all']        = "The complete command set."
-catsum['admin']   = "Cylc installation, testing, and example suites."
-catsum['license']   = "Software licensing information (GPL v3.0)."
-catsum['database']   = "Suite name registration, copying, deletion, etc."
-catsum['information']   = "Interrogate suite definitions and running suites."
-catsum['preparation']   = "Suite editing, validation, visualization, etc."
-catsum['discovery']   = "Detect running suites."
-catsum['control']   = "Suite start up, monitoring, and control."
-catsum['task']   = "The task messaging interface."
-catsum['hook']   = "Suite and task event hook scripts."
-catsum['utility']   = "Cycle arithmetic and templating, etc."
+catsum['all'] = "The complete command set."
+catsum['admin'] = "Cylc installation, testing, and example suites."
+catsum['license'] = "Software licensing information (GPL v3.0)."
+catsum['database'] = "Suite name registration, copying, deletion, etc."
+catsum['information'] = "Interrogate suite definitions and running suites."
+catsum['preparation'] = "Suite editing, validation, visualization, etc."
+catsum['discovery'] = "Detect running suites."
+catsum['control'] = "Suite start up, monitoring, and control."
+catsum['task'] = "The task messaging interface."
+catsum['hook'] = "Suite and task event hook scripts."
+catsum['utility'] = "Cycle arithmetic and templating, etc."
 
 usage = general_usage + """
 
@@ -356,8 +379,8 @@ HOW TO DRILL DOWN TO COMMAND USAGE HELP:
 
 Command CATEGORIES:"""
 
-#Some commands and categories are aliased (db|database, cp|copy) and
-#some common typographical errors are corrected (e.g. cycl => cylc).
+# Some commands and categories are aliased (db|database, cp|copy) and
+# some common typographical errors are corrected (e.g. cycl => cylc).
 
 # command summaries
 comsum = OrderedDict()
@@ -399,9 +422,9 @@ comsum['monitor'] = 'An in-terminal suite monitor (see also gcylc)'
 comsum['get-suite-config'] = 'Print suite configuration items'
 comsum['get-site-config'] = 'Print site/user configuration items'
 comsum['get-gui-config'] = 'Print gcylc configuration items'
-comsum['get-suite-version'] = 'Print the cylc version of a running suite daemon'
+comsum['get-suite-version'] = 'Print the cylc version of a suite daemon'
 comsum['version'] = 'Print the cylc release version'
-comsum['gsummary'] = 'Summary GUI for monitoring multiple suites'
+comsum['gscan'] = 'Scan GUI for monitoring multiple suites'
 comsum['gpanel'] = 'Internal interface for GNOME 2 panel applet'
 # control
 comsum['gui'] = '(a.k.a. gcylc) cylc GUI for suite control etc.'
@@ -413,28 +436,27 @@ comsum['insert'] = 'Insert tasks into a running suite'
 comsum['remove'] = 'Remove tasks from a running suite'
 comsum['poll'] = 'Poll submitted or running tasks'
 comsum['kill'] = 'Kill submitted or running tasks'
-comsum['purge'] = 'Remove task trees from a running suite'
 comsum['hold'] = 'Hold (pause) suites or individual tasks'
 comsum['release'] = 'Release (unpause) suites or individual tasks'
 comsum['reset'] = 'Force one or more tasks to change state.'
 comsum['nudge'] = 'Cause the cylc task processing loop to be invoked'
 comsum['reload'] = 'Reload the suite definition at run time'
-comsum['depend'] = 'Add prerequisites to tasks in a running suite'
 comsum['set-runahead'] = 'Change the runahead limit in a running suite.'
 comsum['set-verbosity'] = 'Change a running suite\'s logging verbosity'
+comsum['ext-trigger'] = 'Report an external trigger event to a suite'
 # discovery
 comsum['ping'] = 'Check that a suite is running'
 comsum['scan'] = 'Scan a host for running suites'
 comsum['check-versions'] = 'Compare cylc versions on task host accounts'
 # task
 comsum['submit'] = 'Run a single task just as its parent suite would'
-comsum['started'] = '(task messaging) Report task started'
 comsum['message'] = '(task messaging) Report task messages'
-comsum['succeeded'] = '(task messaging) Report task succeeded'
-comsum['failed'] = '(task messaging) Report task failed'
 comsum['broadcast'] = 'Change suite [runtime] settings on the fly'
-comsum['job-poll'] = '(Internal) Retrieve job status for a task'
-comsum['job-kill'] = '(Internal) Kill a job for a task'
+comsum['jobs-kill'] = '(Internal) Kill task jobs'
+comsum['jobs-poll'] = '(Internal) Retrieve status for task jobs'
+comsum['jobs-submit'] = '(Internal) Submit task jobs'
+comsum['job-kill'] = '(Internal) Kill a task job'
+comsum['job-poll'] = '(Internal) Retrieve status for a task job'
 comsum['job-submit'] = '(Internal) Submit a job'
 
 # utility
@@ -447,14 +469,18 @@ comsum['suite-state'] = 'Query the task states in a suite'
 # hook
 comsum['email-task'] = 'A task event hook script that sends email alerts'
 comsum['email-suite'] = 'A suite event hook script that sends email alerts'
+comsum['job-logs-retrieve'] = (
+    '(Internal) Retrieve logs from a remote host for a task job')
 comsum['check-triggering'] = 'A suite shutdown event hook for cylc testing'
 
+
 def typo(str):
     corrected = str
     if str == 'gcycl':
         corrected = 'gcylc'
     return corrected
 
+
 def category_help(category):
     coms = eval(category + '_commands')
     alts = '|'.join(categories[category])
@@ -469,6 +495,7 @@ def category_help(category):
     print 'COMMANDS:'
     pretty_print(comsum, coms, sort=True)
 
+
 def set_environment_vars(args):
     """
     Set --env=key=val arguments as environment variables & remove
@@ -477,13 +504,15 @@ def set_environment_vars(args):
     regex = re.compile('\A--env=(\S+)=(\S+)\Z')
     for arg in args:
         match = regex.match(arg)
-        if match == None: continue
+        if match is None:
+            continue
         os.environ[match.group(1)] = match.group(2)
-    return filter(lambda i: not regex.search(i),args)
+    return filter(lambda i: not regex.search(i), args)
 
 # no arguments: print help and exit
 if len(sys.argv) == 1:
-    print usage
+    from cylc.version import CYLC_VERSION
+    print usage.replace("__CYLC_VERSION__", CYLC_VERSION)
     pretty_print(catsum, categories)
     sys.exit(1)
 
@@ -492,12 +521,6 @@ args = sys.argv[1:]
 # Set environment variables from arguments like --env=key=val
 args = set_environment_vars(args)
 
-if '--notify-completion' in args:
-    notify = True
-    args.remove('--notify-completion')
-else:
-    notify = False
-
 if len(args) == 1:
     if args[0] == 'categories':
         # secret argument for document processing
@@ -522,10 +545,12 @@ if len(args) == 1:
         sys.exit(0)
     if is_help(args[0]):
         # cylc help
-        print usage
+        from cylc.version import CYLC_VERSION
+        print usage.replace("__CYLC_VERSION__", CYLC_VERSION)
         pretty_print(catsum, categories)
         sys.exit(0)
     if (args[0] == '-v' or args[0] == '--version'):
+        from cylc.version import CYLC_VERSION
         print CYLC_VERSION
         sys.exit(0)
 
@@ -651,19 +676,12 @@ for item in command_args:
         args_new.append(item)
 args = args_new
 
-cmd = 'cylc-' + command
-
-####BACKGROUNDED MESSAGING CALLS: this results in a currently
-####undiagnosed error in Pyro core when the 'message' command is
-####invoked - but not for the 'started' command (try it and see the task
-####stderr log).
-##if command in ['started', 'message']:
-##    # Run non job-ending task message commands in the background.
-##    p = subprocess.Popen([cmd] + args)
-##    sys.exit(0)
-##else:
+cmd = sys.argv[0] + '-' + command
+
 # Replace the current process with that of the sub-command.
 try:
     os.execvp(cmd, [cmd] + args)
-except OSError, x:
-    raise SystemExit(x)
+except OSError, exc:
+    if exc.filename is None:
+        exc.filename = cmd
+    raise SystemExit(exc)
diff --git a/bin/cylc-5to6 b/bin/cylc-5to6
index 79d0cce..5241abe 100755
--- a/bin/cylc-5to6
+++ b/bin/cylc-5to6
@@ -20,7 +20,7 @@
 
 usage() {
     cat <<eof
-USAGE: cylc [prep] 5to6 FILE
+Usage: cylc [prep] 5to6 FILE
 
 Suggest changes to a cylc 5 suite file to make it more cylc 6 compatible.
 This may be a suite.rc file, an include file, or a suite.rc.processed file.
diff --git a/bin/cylc-broadcast b/bin/cylc-broadcast
index 5e27019..d6042a7 100755
--- a/bin/cylc-broadcast
+++ b/bin/cylc-broadcast
@@ -16,54 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
-    from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
-        sys.exit(0)
-
-import os, re
-from cylc.broadcast_report import (
-    get_broadcast_change_report, get_broadcast_bad_options_report)
-from cylc.CylcOptionParsers import cop
-from cylc.task_message import message
-from cylc import cylc_pyro_client
-from cylc.command_prep import prep_pyro
-from cylc.print_tree import print_tree
-from cylc.task_id import TaskID
-from parsec.validate import validate, ValidationError
-from cylc.cfgspec.suite import SPEC, upg
-import cylc.flags
-
-def get_padding( settings, level=0, padding=0 ):
-    level += 1
-    for key,val in settings.items():
-        tmp = level*2 + len(key)
-        if tmp > padding:
-            padding = tmp
-        if isinstance( val, dict ):
-            padding = get_padding( val, level, padding )
-    return padding
-
-def get_rdict( left, right=None ):
-    # left is [section]item, or just item
-    rdict = {}
-    m = re.match( '^\[(.*)\](.*)$', left )
-    if m:
-        # [sect]item = right
-        sect, var = m.groups()
-        if not var:
-            rdict = { sect : right }
-        else:
-            rdict = { sect : { var : right }}
-    else:
-        # item = right
-        rdict = { left : right }
-    return rdict
-
-
-usage = """cylc [control] broadcast|bcast [OPTIONS] REG
+"""cylc [control] broadcast|bcast [OPTIONS] REG
 
 Override [runtime] config in targeted namespaces in a running suite.
 
@@ -104,147 +57,184 @@ options can be used on the same command line. Broadcast cannot change
 
 See also 'cylc reload' - reload a modified suite definition at run time."""
 
-parser = cop( usage, pyro=True )
+import sys
+if '--use-ssh' in sys.argv[1:]:
+    sys.argv.remove('--use-ssh')
+    from cylc.remote import remrun
+    if remrun().execute(force_required=True):
+        sys.exit(0)
+
+import os
+import re
 
-parser.add_option( "-t", "--tag", metavar="CYCLE_POINT",
+import cylc.flags
+from cylc.broadcast_report import (
+    get_broadcast_change_report, get_broadcast_bad_options_report)
+from cylc.CylcOptionParsers import cop
+from cylc.network.suite_broadcast import BroadcastClient
+from cylc.print_tree import print_tree
+from cylc.task_id import TaskID
+from cylc.cfgspec.suite import SPEC, upg
+from parsec.validate import validate
+
+
+def get_padding(settings, level=0, padding=0):
+    level += 1
+    for key, val in settings.items():
+        tmp = level * 2 + len(key)
+        if tmp > padding:
+            padding = tmp
+        if isinstance(val, dict):
+            padding = get_padding(val, level, padding)
+    return padding
+
+
+def get_rdict(left, right=None):
+    # left is [section]item, or just item
+    rdict = {}
+    m = re.match('^\[(.*)\](.*)$', left)
+    if m:
+        # [sect]item = right
+        sect, var = m.groups()
+        if not var:
+            rdict = {sect.strip(): right}
+        else:
+            rdict = {sect.strip(): {var.strip(): right}}
+    else:
+        # item = right
+        rdict = {left: right}
+    return rdict
+
+
+def main():
+    parser = cop(__doc__, pyro=True)
+
+    parser.add_option(
+        "-t", "--tag", metavar="CYCLE_POINT",
         help="(Deprecated). "
-        "Target cycle point. More than one can be added. "
-        "Defaults to '*' for all cycle points with --set and --cancel, "
-        "and nothing with --clear.",
-        action="append", dest="point_strings", default=[] )
+             "Target cycle point. More than one can be added. "
+             "Defaults to '*' for all cycle points with --set and --cancel, "
+             "and nothing with --clear.",
+        action="append", dest="point_strings", default=[])
 
-parser.add_option( "-p", "--point", metavar="CYCLE_POINT",
+    parser.add_option(
+        "-p", "--point", metavar="CYCLE_POINT",
         help="Target cycle point. More than one can be added. "
-        "Defaults to '*' with --set and --cancel, "
-        "and nothing with --clear.",
-        action="append", dest="point_strings", default=[] )
+             "Defaults to '*' with --set and --cancel, "
+             "and nothing with --clear.",
+        action="append", dest="point_strings", default=[])
 
-parser.add_option( "-n", "--namespace", metavar="NAME",
+    parser.add_option(
+        "-n", "--namespace", metavar="NAME",
         help="Target namespace. Defaults to 'root' with "
-        "--set and --cancel, and nothing with --clear.",
-        action="append", dest="namespaces", default=[] )
+             "--set and --cancel, and nothing with --clear.",
+        action="append", dest="namespaces", default=[])
 
-parser.add_option( "-s", "--set", metavar="[SEC]ITEM=VALUE",
+    parser.add_option(
+        "-s", "--set", metavar="[SEC]ITEM=VALUE",
         help="A [runtime] config item and value to broadcast.",
-        action="append", dest="set", default=[] )
+        action="append", dest="set", default=[])
 
-parser.add_option( "-c", "--cancel", metavar="[SEC]ITEM",
+    parser.add_option(
+        "-c", "--cancel", metavar="[SEC]ITEM",
         help="An item-specific broadcast to cancel.",
-        action="append", dest="cancel", default=[] )
+        action="append", dest="cancel", default=[])
 
-parser.add_option( "-C", "--clear",
+    parser.add_option(
+        "-C", "--clear",
         help="Cancel all broadcasts, or with -p/--point, "
-        "-n/--namespace, cancel all broadcasts to targeted "
-        "namespaces and/or cycle points. Use \"-C -p '*'\" "
-        "to cancel all all-cycle broadcasts without canceling "
-        "all specific-cycle broadcasts.",
-        action="store_true", dest="clear", default=False )
-
-parser.add_option( "-e", "--expire", metavar="CYCLE_POINT",
+             "-n/--namespace, cancel all broadcasts to targeted "
+             "namespaces and/or cycle points. Use \"-C -p '*'\" "
+             "to cancel all all-cycle broadcasts without canceling "
+             "all specific-cycle broadcasts.",
+        action="store_true", dest="clear", default=False)
+
+    parser.add_option(
+        "-e", "--expire", metavar="CYCLE_POINT",
         help="Cancel any broadcasts that target cycle "
-        "points earlier than, but not inclusive of, CYCLE_POINT.",
-        action="store", default=None, dest="expire" )
+             "points earlier than, but not inclusive of, CYCLE_POINT.",
+        action="store", default=None, dest="expire")
 
-parser.add_option( "-d", "--display",
+    parser.add_option(
+        "-d", "--display",
         help="Display active broadcasts.",
-        action="store_true", default=False, dest="show" )
+        action="store_true", default=False, dest="show")
 
-parser.add_option( "-k", "--display-task", metavar="TASKID",
+    parser.add_option(
+        "-k", "--display-task", metavar="TASKID",
         help="Print active broadcasts for a given task "
-        "(" + TaskID.SYNTAX + ").",
-        action="store", default=None, dest="showtask" )
+             "(" + TaskID.SYNTAX + ").",
+        action="store", default=None, dest="showtask")
 
-parser.add_option( "-b", "--box",
+    parser.add_option(
+        "-b", "--box",
         help="Use unicode box characters with -d, -k.",
-        action="store_true", default=False, dest="unicode" )
+        action="store_true", default=False, dest="unicode")
 
-parser.add_option( "-r", "--raw",
+    parser.add_option(
+        "-r", "--raw",
         help="With -d/--display or -k/--display-task, write out "
-        "the broadcast config structure in raw Python form.",
-        action="store_true", default=False, dest="raw" )
-
-( options, args ) = parser.parse_args()
+             "the broadcast config structure in raw Python form.",
+        action="store_true", default=False, dest="raw")
 
-suite = args[0]
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-suite, pphrase = prep_pyro( suite, options ).execute()
-
-debug = False
-if cylc.flags.debug:
-    debug = True
-else:
-    try:
-        # from task execution environment
-        if os.environ['CYLC_DEBUG'] == 'True':
-            debug = True
-    except KeyError:
-        pass
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'broadcast_receiver' )
-except Exception, x:
-    if debug:
-        raise
-    raise SystemExit(x)
-
-if options.show or options.showtask:
-    if options.showtask:
-        try:
-            name, point_string = TaskID.split(options.showtask)
-        except ValueError:
-            parser.error("TASKID must be " + TaskID.SYNTAX)
-    try:
-        settings = proxy.get( options.showtask )
-    except Exception,x:
-        if debug:
-            raise
-        sys.exit(x)
-    padding = get_padding(settings) * ' '
-    if options.raw:
-        print str( settings )
+    debug = False
+    if cylc.flags.debug:
+        debug = True
     else:
-        print_tree( settings, padding, options.unicode )
-    sys.exit(0)
+        try:
+            # from task execution environment
+            if os.environ['CYLC_DEBUG'] == 'True':
+                debug = True
+        except KeyError:
+            pass
+
+    pclient = BroadcastClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+
+    if options.show or options.showtask:
+        if options.showtask:
+            try:
+                name, point_string = TaskID.split(options.showtask)
+            except ValueError:
+                parser.error("TASKID must be " + TaskID.SYNTAX)
+        settings = pclient.broadcast('get', options.showtask)
+        padding = get_padding(settings) * ' '
+        if options.raw:
+            print str(settings)
+        else:
+            print_tree(settings, padding, options.unicode)
+        sys.exit(0)
 
-if options.clear:
-    try:
-        modified_settings, bad_options = proxy.clear(
-            options.point_strings, options.namespaces)
-    except Exception,x:
-        if debug:
-            raise
-        sys.exit(x)
-    else:
+    if options.clear:
+        modified_settings, bad_options = pclient.broadcast(
+            'clear', options.point_strings, options.namespaces)
         if modified_settings:
             print get_broadcast_change_report(
                 modified_settings, is_cancel=True)
         sys.exit(get_broadcast_bad_options_report(bad_options))
 
-if options.expire:
-    try:
-        modified_settings, bad_options = proxy.expire(options.expire)
-    except Exception, exc:
-        if debug:
-            raise
-        sys.exit(exc)
-    else:
+    if options.expire:
+        modified_settings, bad_options = pclient.broadcast(
+            'expire', options.expire)
         if modified_settings:
             print get_broadcast_change_report(
                 modified_settings, is_cancel=True)
         sys.exit(get_broadcast_bad_options_report(bad_options))
 
-# implement namespace and cycle point defaults here
-namespaces = options.namespaces
-if not namespaces:
-    namespaces = ["root"]
-point_strings = options.point_strings
-if not point_strings:
-    point_strings = ["*"]
+    # implement namespace and cycle point defaults here
+    namespaces = options.namespaces
+    if not namespaces:
+        namespaces = ["root"]
+    point_strings = options.point_strings
+    if not point_strings:
+        point_strings = ["*"]
 
-if options.cancel:
-    try:
+    if options.cancel:
         settings = []
         for option_item in options.cancel:
             if "=" in option_item:
@@ -258,20 +248,14 @@ if options.cancel:
             upg({'runtime': {'__MANY__': setting}}, 'test')
             validate(setting, SPEC['runtime']['__MANY__'])
             settings.append(setting)
-        modified_settings, bad_options = proxy.clear(
-            point_strings, namespaces, settings)
-    except Exception, exc:
-        if debug:
-            raise
-        sys.exit(exc)
-    else:
+        modified_settings, bad_options = pclient.broadcast(
+            'clear', point_strings, namespaces, settings)
         if modified_settings:
             print get_broadcast_change_report(
                 modified_settings, is_cancel=True)
         sys.exit(get_broadcast_bad_options_report(bad_options))
 
-if options.set:
-    try:
+    if options.set:
         settings = []
         for option_item in options.set:
             if "=" not in option_item:
@@ -285,11 +269,16 @@ if options.set:
             upg({'runtime': {'__MANY__': setting}}, 'test')
             validate(setting, SPEC['runtime']['__MANY__'])
             settings.append(setting)
-        modified_settings, bad_options = proxy.put(point_strings, namespaces, settings)
+        modified_settings, bad_options = pclient.broadcast(
+            'put', point_strings, namespaces, settings)
+        print get_broadcast_change_report(modified_settings)
+        sys.exit(get_broadcast_bad_options_report(bad_options, is_set=True))
+
+
+if __name__ == "__main__":
+    try:
+        main()
     except Exception as exc:
-        if debug:
+        if cylc.flags.debug:
             raise
         sys.exit(exc)
-    else:
-        print get_broadcast_change_report(modified_settings)
-        sys.exit(get_broadcast_bad_options_report(bad_options, is_set=True))
diff --git a/bin/cylc-cat-log b/bin/cylc-cat-log
index 988c3a2..287fde5 100755
--- a/bin/cylc-cat-log
+++ b/bin/cylc-cat-log
@@ -16,216 +16,308 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [info] cat-log|log [OPTIONS] ARGS
+
+Print the location or content of any suite or task log file, or a listing of a
+task log directory on the suite or task host.  By default the suite event log
+or task job script is printed. For task logs you must use the same cycle
+point format as the suite (list the log directory to see what it is)."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
 import os
-import cylc.flags
+from pipes import quote
+import shlex
 from subprocess import Popen, PIPE
+import traceback
+
 from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_file
 from cylc.owner import is_remote_user
-from cylc.rundb import CylcRuntimeDAO
+from cylc.rundb import CylcSuiteDAO
 from cylc.suite_host import is_remote_host
-from cylc.suite_logging import suite_log
-from cylc.suite_output import suite_output
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.task_id import TaskID
 
-parser = cop(
-    """cylc [info] cat-log|log [OPTIONS] ARGS
-Print the location or content of any suite or task log file, or a listing of a
-task log directory on the suite or task host.  By default the suite event log
-or task job script is printed.""",
-    argdoc=[("REG", "Suite name"), ("[TASK-ID]", """Task ID""")])
-
-parser.add_option(
-    "-l", "--location",
-    help="Print the location of the file, and set exit status according"
-    "to whether or not the file exists (0 means it exists).",
-    action="store_true", default=False, dest="location")
-
-parser.add_option(
-    "-o", "--stdout",
-    help="Suite or task stdout log.",
-    action="store_true", default=False, dest="stdout")
-
-parser.add_option(
-    "-e", "--stderr",
-    help="Suite or task stderr log.",
-    action="store_true", default=False, dest="stderr")
-
-parser.add_option(
-    "-r", "--rotation",
-    help="Suite logs log rotation number (default 0)",
-    metavar="INT", action="store", default="0", dest="rotation")
-
-parser.add_option(
-    "-a", "--activity",
-    help="Task job activity log.",
-    action="store_true", default=False, dest="job_activity")
-
-parser.add_option(
-    "-d", "--diff",
-    help="Task job script diff (present after an edit-run).",
-    action="store_true", default=False, dest="job_diff")
-
-parser.add_option(
-    "-u", "--status",
-    help="Task job status file.",
-    action="store_true", default=False, dest="job_status")
-
-parser.add_option(
-    "-c", "--custom",
-    help="Custom task log name, for tasks that write non-standard files to "
-    "their cylc log directory (e.g. 'job.stats').",
-    metavar="FILENAME", action="store", default=None, dest="job_custom")
-
-parser.add_option(
-    "-s", "--submit-number",
-    help="Task logs only: job submit number (default latest).",
-    metavar="INT", action="store", default="NN", dest="subnum")
-
-parser.add_option(
-    "-t", "--try-number",
-    help="(deprecated - use -s/--submit-number).",
-    metavar="INT", action="store", default="NN", dest="subnum")
-
-parser.add_option(
-    "-x", "--list-local",
-    help="List a task log directory on the suite host.",
-    action="store_true", default=False, dest="list_local")
-
-parser.add_option(
-    "-y", "--list-remote",
-    help="List a task log directory on the task host.",
-    action="store_true", default=False, dest="list_remote")
-
-(options, args) = parser.parse_args()
-suite, suiterc = prep_file(args[0], options).execute()
-owner = options.db_owner
-
-# If --host is used, the command re-invokes itself on the remote suite host.
-# For task logs we have to find retrieve task host from the suite run db
-# located on the suite host.
-
-user_at_host = None
-suite_run_dir = GLOBAL_CFG.get_derived_host_item(
-    suite, "suite run directory")
-
-if len(args) == 1:
-    # Suite logs.
-    if any([options.job_activity, options.job_diff, options.job_status,
-            options.list_local or options.list_remote]) or (
-            options.job_custom is not None or
-            options.subnum != "NN"):
-        parser.error("Task log option(s) are not legal for suite logs.")
-
-    log_dir = os.path.join(suite_run_dir, "log", "suite")
-    if options.stdout:
-        fpath = os.path.join(log_dir, "out")
-    elif options.stderr:
-        fpath = os.path.join(log_dir, "err")
+
+NAME_DEFAULT = "log"
+NAME_ERR = "err"
+NAME_OUT = "out"
+NAME_JOB_ACTIVITY_LOG = "job-activity.log"
+NAME_JOB_EDIT_DIFF = "job-edit.diff"
+NAME_JOB_STATUS = "job.status"
+JOB_LOG_DEST_MAP = {
+    NAME_DEFAULT: "job", NAME_ERR: "job.err", NAME_OUT: "job.out"}
+JOB_LOG_LOCAL_ALWAYS = (NAME_JOB_EDIT_DIFF, NAME_JOB_ACTIVITY_LOG)
+LIST_MODE_LOCAL = "local"
+LIST_MODE_REMOTE = "remote"
+
+
+def get_option_parser():
+    """Set up the CLI option parser."""
+    parser = cop(
+        __doc__, argdoc=[("REG", "Suite name"), ("[TASK-ID]", """Task ID""")])
+
+    parser.add_option(
+        "-l", "--location",
+        help=("Print location of the log file, exit 0 if it exists," +
+              " exit 1 otherwise"),
+        action="store_true", default=False, dest="location_mode")
+
+    parser.add_option(
+        "-o", "--stdout",
+        help="Suite log: out, task job log: job.out",
+        action="store_const", const=NAME_OUT, dest="filename")
+
+    parser.add_option(
+        "-e", "--stderr",
+        help="Suite log: err, task job log: job.err",
+        action="store_const", const=NAME_ERR, dest="filename")
+
+    parser.add_option(
+        "-r", "--rotation",
+        help="Suite logs log rotation number", metavar="INT",
+        action="store", dest="rotation_num")
+
+    parser.add_option(
+        "-a", "--activity",
+        help="Task job log only: Short for --filename=job-activity.log",
+        action="store_const", const=NAME_JOB_ACTIVITY_LOG, dest="filename")
+
+    parser.add_option(
+        "-d", "--diff",
+        help=("Task job log only: Short for --filename=job-edit.diff" +
+              " (file present after an edit-run)."),
+        action="store_const", const=NAME_JOB_EDIT_DIFF, dest="filename")
+
+    parser.add_option(
+        "-u", "--status",
+        help="Task job log only: Short for --filename=job.status",
+        action="store_const", const=NAME_JOB_STATUS, dest="filename")
+
+    parser.add_option(
+        "-f", "--filename", "-c", "--custom",
+        help="Name of log file (e.g. 'job.stats').", metavar="FILENAME",
+        action="store", dest="filename")
+
+    parser.add_option(
+        "--tail",
+        help="Tail the job log, if the task is running.", metavar="INT",
+        action="store_true", default=False, dest="tail")
+
+    parser.add_option(
+        "-s", "--submit-number", "-t", "--try-number",
+        help="Task job log only: submit number (default=NN).", metavar="INT",
+        action="store", dest="submit_num")
+
+    parser.add_option(
+        "-x", "--list-local",
+        help="List a log directory on the suite host",
+        action="store_const", const=LIST_MODE_LOCAL, dest="list_mode")
+
+    parser.add_option(
+        "-y", "--list-remote",
+        help="Task job log only: List log directory on the job host",
+        action="store_const", const=LIST_MODE_REMOTE, dest="list_mode")
+
+    return parser
+
+
+def get_suite_log_path(options, suite):
+    """Return file name of a suite log, given the options."""
+    if options.list_mode:
+        basename = "."
     else:
-        fpath = os.path.join(log_dir, "log")
-        if options.rotation != "0":
-            fpath += "." + options.rotation
-else:
-    # Task logs.
-    if [options.job_activity, options.job_diff, options.job_status,
-            options.stdout, options.stderr, options.list_local,
-            options.list_remote,
-            options.job_custom is not None].count(True) > 1:
-        parser.error("Choose only one log file type.")
-    taskid = args[1]
+        if options.filename:
+            basename = options.filename
+        else:
+            basename = NAME_DEFAULT
+        if options.rotation_num:
+            basename += "." + options.rotation_num
+    return os.path.normpath(os.path.join(
+        GLOBAL_CFG.get_derived_host_item(suite, "suite log directory"),
+        basename))
 
-    try:
-        task, point = TaskID.split(taskid)
-    except:
-        sys.exit("ERROR, illegal task ID: %s" % taskid)
 
-    if options.subnum == "NN":
-        # The latest submit.
-        log_sub_dir = os.path.join(point, task, "NN")
+def get_task_job_log_path(
+        options, suite, point, task, submit_num, user_at_host):
+    """Return file name of a task job log, given the options."""
+    if user_at_host and "@" in user_at_host:
+        owner, host = user_at_host.split("@", 1)
+    elif user_at_host:
+        owner, host = (None, user_at_host)
     else:
-        # A specific submit.
+        owner, host = (None, None)
+    if options.list_mode:
+        basename = "."
+    elif options.filename:
         try:
-            log_sub_dir = os.path.join(
-                point, task, "%02d" % int(options.subnum))
+            basename = JOB_LOG_DEST_MAP[options.filename]
+        except KeyError:
+            basename = options.filename
+    else:
+        basename = JOB_LOG_DEST_MAP[NAME_DEFAULT]
+    if submit_num != "NN":
+        submit_num = "%02d" % submit_num
+    return os.path.normpath(os.path.join(
+        GLOBAL_CFG.get_derived_host_item(
+            suite, "suite job log directory", host, owner),
+        point, task, submit_num, basename))
+
+
+def get_task_job_attrs(options, suite, point, task, submit_num):
+    """Return (user at host, command0) of a task job log.
+
+    user at host is set if task job is run remotely and for relevant log files.
+    command0 is set if task job is running on a batch system that requires a
+    special command to view stdout/stderr files.
+
+    """
+    if (options.filename in JOB_LOG_LOCAL_ALWAYS or
+            options.list_mode == LIST_MODE_LOCAL):
+        return (None, None)
+    suite_dao = CylcSuiteDAO(
+        os.path.join(
+            GLOBAL_CFG.get_derived_host_item(suite, "suite run directory"),
+            CylcSuiteDAO.DB_FILE_BASE_NAME),
+        is_public=True)
+    task_job_data = suite_dao.select_task_job(None, point, task, submit_num)
+    suite_dao.close()
+    if task_job_data is None:
+        return (None, None)
+    if "@" in task_job_data["user_at_host"]:
+        owner, host = str(task_job_data["user_at_host"]).split("@", 1)
+    else:
+        owner, host = (None, str(task_job_data["user_at_host"]))
+    user_at_host = None
+    if is_remote_host(host) or is_remote_user(owner):
+        if host and owner:
+            user_at_host = owner + "@" + host
+        elif host:
+            user_at_host = host
+        elif owner:
+            user_at_host = owner + "@localhost"
+    if (options.list_mode or
+            options.location_mode or
+            options.filename not in [
+                NAME_ERR, NAME_OUT,
+                JOB_LOG_DEST_MAP[NAME_ERR], JOB_LOG_DEST_MAP[NAME_OUT]] or
+            not task_job_data["batch_sys_name"] or
+            not task_job_data["batch_sys_job_id"] or
+            not task_job_data["time_run"] or
+            task_job_data["time_run_exit"]):
+        return (user_at_host, None)
+    try:
+        if user_at_host and "@" in user_at_host:
+            owner, host = user_at_host.split("@", 1)
+        else:
+            owner, host = (None, user_at_host)
+        if options.filename in (NAME_OUT, JOB_LOG_DEST_MAP[NAME_OUT]):
+            key = "out viewer"
+        else:
+            key = "err viewer"
+        conf = GLOBAL_CFG.get_host_item("batch systems", host, owner)
+        command0_tmpl = conf[str(task_job_data["batch_sys_name"])][key]
+    except (KeyError, TypeError):
+        return (user_at_host, None)
+    else:
+        if command0_tmpl:
+            return (user_at_host, shlex.split(command0_tmpl % {
+                "job_id": str(task_job_data["batch_sys_job_id"])}))
+        else:
+            return (user_at_host, None)
+
+
+def main():
+    """Implement cylc cat-log CLI."""
+    parser = get_option_parser()
+    options, args = parser.parse_args()
+    suite = args[0]
+    if options.filename and options.list_mode:
+        parser.error("Choose either test/print log file or list log directory")
+    elif len(args) > 1:
+        # Task job log
+        try:
+            task, point = TaskID.split(args[1])
         except ValueError:
-            parser.error("Illegal submit number: %s" % options.subnum)
-
-    # Get the task host from the suite run db.
-    if not (options.list_local or options.job_diff or options.job_activity):
-        # Job diff and activity logs file are always located on the suite host;
-        # other logs are generated by the task at run time, on the task host.
-        suite_run_dao = CylcRuntimeDAO(suite_run_dir)
-        host = suite_run_dao.get_task_host(task, point)
-        suite_run_dao.close()
-        owner = None
-        if host and "@" in host:
-            owner, host = host.split("@", 1)
-        if is_remote_host(host) or is_remote_user(owner):
-            if host and owner:
-                user_at_host = owner + "@" + host
-            elif host:
-                user_at_host = host
-            elif owner:
-                user_at_host = owner + "@localhost"
-        job_log_dir = GLOBAL_CFG.get_derived_host_item(
-            suite, "suite job log directory", host=host, owner=owner)
+            parser.error("Illegal task ID: %s" % args[1])
+        if options.submit_num in [None, "NN"]:
+            submit_num = "NN"
+        else:
+            try:
+                submit_num = int(options.submit_num)
+            except ValueError:
+                parser.error("Illegal submit number: %s" % options.submit_num)
+        user_at_host, command0 = get_task_job_attrs(
+            options, suite, point, task, submit_num)
+        filename = get_task_job_log_path(
+            options, args[0], point, task, submit_num, user_at_host)
     else:
-        user_at_host = None
-        job_log_dir = GLOBAL_CFG.get_derived_host_item(
-            suite, "suite job log directory")
-
-    log_dir = fpath = os.path.join(job_log_dir, log_sub_dir)
-    if options.stdout:
-        fpath = os.path.join(log_dir, "job.out")
-    elif options.stderr:
-        fpath = os.path.join(log_dir, "job.err")
-    elif options.job_diff:
-        fpath = os.path.join(log_dir, "job-edit.diff")
-    elif options.job_status:
-        fpath = os.path.join(log_dir, "job.status")
-    elif options.job_activity:
-        fpath = os.path.join(log_dir, "job-activity.log")
-    elif options.job_custom:
-        fpath = os.path.join(log_dir, options.job_custom)
-    elif options.list_remote or options.list_local:
-        fpath = log_dir
+        # Suite log
+        if options.submit_num or options.list_mode == LIST_MODE_REMOTE:
+            parser.error("Task log option(s) are not legal for suite logs.")
+        filename = get_suite_log_path(options, args[0])
+        user_at_host, command0 = (None, None)
+
+    if user_at_host:
+        if "@" in user_at_host:
+            owner, host = user_at_host.split("@", 1)
+        else:
+            owner, host = (None, user_at_host)
+
+    # Construct the shell command
+    commands = []
+    if options.location_mode:
+        if user_at_host is not None:
+            sys.stdout.write("%s:" % user_at_host)
+        sys.stdout.write("%s\n" % filename)
+        commands.append(["test", "-e", filename])
+    elif options.list_mode:
+        commands.append(["ls", filename])
+    elif command0 and user_at_host:
+        commands.append(command0 + ["||", "cat", filename])
+    elif command0:
+        commands.append(command0)
+        commands.append(["cat", filename])
+    elif options.tail:
+        if user_at_host:
+            # Replace 'cat' with the remote tail command.
+            cmd_tmpl = str(GLOBAL_CFG.get_host_item(
+                "remote tail command template", host, owner))
+            commands.append(shlex.split(cmd_tmpl % {"filename": filename}))
+        else:
+            # Replace 'cat' with the local tail command.
+            cmd_tmpl = str(GLOBAL_CFG.get_host_item(
+                "local tail command template"))
+            commands.append(shlex.split(cmd_tmpl % {"filename": filename}))
     else:
-        fpath = os.path.join(log_dir, "job")
-
-# Having got the file path, construct a command to print its path or content.
-if options.location:
-    if user_at_host is not None:
-        sys.stdout.write("%s:" % user_at_host)
-    sys.stdout.write("%s\n" % fpath)
-    cmd = "test -e %s" % fpath
-elif options.list_remote or options.list_local:
-    cmd = "ls %s" % fpath
-else:
-    cmd = "cat %s" % fpath
-
-if user_at_host is not None:
-    command = ["ssh", "-oBatchMode=yes", user_at_host, cmd]
-else:
-    command = cmd.split()
-
-# Execute the command.
-try:
-    p = Popen(command, stdout=PIPE, stderr=PIPE)
-except OSError as exc:
-    if cylc.flags.debug:
-        raise
-    sys.exit(exc)
-out, err, = p.communicate()
-res = p.wait()
-if out:
-    sys.stdout.write(out)
-if err:
-    sys.stderr.write(err)
-sys.exit(res)
+        commands.append(["cat", filename])
+
+    # Deal with remote [user@]host
+    if user_at_host:
+        ssh = str(GLOBAL_CFG.get_host_item(
+            "remote shell template", host, owner)).replace(" %s", "")
+        for i, command in enumerate(commands):
+            commands[i] = shlex.split(ssh) + ["-n", user_at_host] + command
+
+    err = None
+    for command in commands:
+        stderr = PIPE
+        if options.debug:
+            sys.stderr.write(
+                " ".join([quote(item) for item in command]) + "\n")
+            stderr = None
+        proc = Popen(command, stderr=stderr)
+        err = proc.communicate()[1]
+        ret_code = proc.wait()
+        if ret_code == 0:
+            break
+    if ret_code and err:
+        sys.stderr.write(err)
+    sys.exit(ret_code)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/bin/cylc-cat-state b/bin/cylc-cat-state
index 6b665fd..6dbe69b 100755
--- a/bin/cylc-cat-state
+++ b/bin/cylc-cat-state
@@ -16,46 +16,52 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [info] cat-state [OPTIONS] REG
+
+Print the suite state dump file directly to stdout."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
+import cylc.flags
 from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_file
 from cylc.dump import dump_to_stdout, get_stop_state_summary
 from cylc.suite_state_dumping import SuiteStateDumper
-import cylc.flags
 
-parser = cop( usage = """cylc [info] cat-state [OPTIONS] REG
 
-Print the suite state dump file directly to stdout.""" )
+def main():
+    parser = cop(__doc__)
 
-parser.add_option( "-d", "--dump", help="Use the same display "
-        "format as the 'cylc dump' command.",
-        action="store_true", default=False, dest="dumpform" )
+    parser.add_option(
+        "-d", "--dump",
+        help="Use the same display format as the 'cylc dump' command.",
+        action="store_true", default=False, dest="dumpform")
 
-( options, args ) = parser.parse_args()
-owner = options.db_owner
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-suite, suiterc = prep_file( args[0], options ).execute()
-
-try:
+    owner = options.db_owner
     f = open(SuiteStateDumper(suite).file_name, 'rb')
-except (IOError, OSError), x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
-else:
     lines = f.readlines()
     f.close()
 
-lines = map( str.rstrip, lines )
+    lines = map(str.rstrip, lines)
+
+    if not options.dumpform:
+        for line in lines:
+            print line
+    else:
+        [glbl, states, fam_states] = get_stop_state_summary(
+            suite, options.owner, options.host, lines)
+        dump_to_stdout(states)
+
 
-if not options.dumpform:
-    for line in lines:
-        print line
-else:
-    [glbl, states, fam_states] = get_stop_state_summary( suite,
-            options.owner, options.host, lines )
-    dump_to_stdout( states )
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-check-software b/bin/cylc-check-software
index 5aa9e9f..5a550f2 100755
--- a/bin/cylc-check-software
+++ b/bin/cylc-check-software
@@ -20,7 +20,7 @@
 
 usage() {
     cat <<eof
-USAGE: cylc [admin] check-software
+Usage: cylc [admin] check-software
 
 Check that the external software required by cylc is installed.
 
diff --git a/bin/cylc-check-triggering b/bin/cylc-check-triggering
index 63158ad..cdbaae9 100755
--- a/bin/cylc-check-triggering
+++ b/bin/cylc-check-triggering
@@ -1,10 +1,22 @@
 #!/usr/bin/env python
 
-import sys, os
-from cylc.LogDiagnosis import LogAnalyser
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-if len(sys.argv) == 2 and sys.argv[1] == '--help':
-    print """USAGE: cylc [hook] check-triggering ARGS
+"""cylc [hook] check-triggering ARGS
 
 This is a cylc shutdown event handler that compares the newly generated
 suite log with a previously generated reference log "reference.log"
@@ -32,30 +44,37 @@ Reference tests can use any run mode:
  equivalent to the reference run, the test will abort with non-zero exit
  status - so reference tests can be used as automated tests to check
  that changes to cylc have not broken your suites."""
+
+import os
+import sys
+
+from cylc.LogDiagnosis import LogAnalyser
+
+if len(sys.argv) == 2 and sys.argv[1] == '--help':
+    print __doc__
     sys.exit(0)
 
-print
-print "This is the cylc check-triggering shutdown event handler"
+print "\nThis is the cylc check-triggering shutdown event handler"
 
 event, suite = sys.argv[1], sys.argv[2]
 
 if event != 'shutdown':
-    raise SystemExit( "ERROR: run this as a shutdown event handler")
+    raise SystemExit("ERROR: run this as a shutdown event handler")
 
 try:
-    log_dir = os.path.expandvars( os.environ['CYLC_SUITE_LOG_DIR'] )
-    suite_dir = os.path.expandvars( os.environ['CYLC_SUITE_DEF_PATH'] )
+    log_dir = os.path.expandvars(os.environ['CYLC_SUITE_LOG_DIR'])
+    suite_dir = os.path.expandvars(os.environ['CYLC_SUITE_DEF_PATH'])
 except KeyError, x:
     raise SystemExit(x)
 
-new_log = os.path.join( log_dir, 'log' )
-ref_log = os.path.join( suite_dir, 'reference.log' )
+new_log = os.path.join(log_dir, 'log')
+ref_log = os.path.join(suite_dir, 'reference.log')
 
 try:
-    lanal = LogAnalyser( ref_log, new_log )
+    lanal = LogAnalyser(new_log, ref_log)
     lanal.verify_triggering()
 except Exception, x:
     print >> sys.stderr, x
-    raise SystemExit( "ERROR: Triggering check FAILED" )
+    raise SystemExit("ERROR: Triggering check FAILED")
 else:
     print "Triggering check passed"
diff --git a/bin/cylc-check-versions b/bin/cylc-check-versions
index e046201..977eb0f 100755
--- a/bin/cylc-check-versions
+++ b/bin/cylc-check-versions
@@ -16,22 +16,10 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-import subprocess
-
-import cylc.flags
-from cylc.remote import remrun
-from cylc.CylcOptionParsers import cop
-from cylc.version import CYLC_VERSION
-from cylc.config import config, SuiteConfigError
-from cylc.host_select import get_task_host
-
-
-parser = cop(
-    """cylc [discovery] check-versions [OPTIONS] ARGS
+"""cylc [discovery] check-versions [OPTIONS] ARGS
 
 Check the version of cylc invoked on each of SUITE's task host accounts when
-CYLC_VERSION is set to """ + CYLC_VERSION + """ (i.e. *this* version).
+CYLC_VERSION is set to *the version running this command line tool*.
 Different versions are reported but are not considered an error unless the
 -e|--error option is specified, because different cylc versions from 6.0.0
 onward should at least be backward compatible.
@@ -44,30 +32,37 @@ if $CYLC_VERSION is defined.
 
 User -v/--verbose to see the command invoked to determine the remote version
 (all remote cylc command invocations will be of the same form, which may be
-site dependent -- see cylc global config documentation.""",
-    prep=True, jset=True)
+site dependent -- see cylc global config documentation."""
+
+import sys
+import subprocess
+
+import cylc.flags
+from cylc.remote import remrun
+from cylc.CylcOptionParsers import cop
+from cylc.version import CYLC_VERSION
+from cylc.config import SuiteConfig, SuiteConfigError
+from cylc.host_select import get_task_host
+
+
+def main():
+    parser = cop(__doc__, prep=True, jset=True)
 
-parser.add_option(
-    "-e", "--error", help="Exit with error status "
-    "if " + CYLC_VERSION + " is not available on all remote accounts.",
-    action="store_true", default=False, dest="error")
+    parser.add_option(
+        "-e", "--error", help="Exit with error status "
+        "if " + CYLC_VERSION + " is not available on all remote accounts.",
+        action="store_true", default=False, dest="error")
 
-(options, args) = parser.parse_args(remove_opts=['--host', '--user'])
+    (options, args) = parser.parse_args(remove_opts=['--host', '--user'])
 
-# suite name or file path
-suite, suiterc, junk = parser.get_suite()
+    # suite name or file path
+    suite, suiterc, junk = parser.get_suite()
 
-# extract task host accounts from the suite
-try:
-    config = config(
+    # extract task host accounts from the suite
+    config = SuiteConfig.get_inst(
         suite, suiterc,
         template_vars=options.templatevars,
         template_vars_file=options.templatevars_file)
-except Exception as exc:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(exc)
-else:
     result = config.get_namespace_list('all tasks')
     namespaces = result.keys()
     accounts = set()
@@ -78,55 +73,64 @@ else:
         accounts.add((owner, host))
     accounts = list(accounts)
 
-# Interrogate the each remote account with CYLC_VERSION set to our version.
-# Note that post backward compatibility concerns to do this we can just run:
-#   cylc version --host=HOST --user=USER
-# but this command only exists for version > 6.3.0.
-# So for the moment generate an actual remote invocation command string for
-# "cylc -v".
-
-# (save verbose flag as gets reset in remrun)
-verbose = cylc.flags.verbose
-
-warn = {}
-contacted = 0
-for user, host in accounts:
-    argv = ["cylc", "--version"]
-    if user and host:
-        argv += ["--user=%s" % user, "--host=%s" % host]
-        user_at_host = "%s@%s" % (user, host)
-    elif user:
-        argv += ["--user=%s" % user]
-        user_at_host = "%s at localhost" % user
-    elif host:
-        argv += ["--host=%s" % host]
-        user_at_host = host
-    cmd = remrun(argv).execute(dry_run=True)
-    if verbose:
-        print "%s: %s" % (user_at_host, ' '.join(cmd))
-    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    out, err = p.communicate()
-    res = p.wait()
-    if res == 0:
+    # Interrogate the each remote account with CYLC_VERSION set to our version.
+    # Post backward compatibility concerns to do this we can just run:
+    #   cylc version --host=HOST --user=USER
+    # but this command only exists for version > 6.3.0.
+    # So for the moment generate an actual remote invocation command string for
+    # "cylc -v".
+
+    # (save verbose flag as gets reset in remrun)
+    verbose = cylc.flags.verbose
+
+    warn = {}
+    contacted = 0
+    for user, host in accounts:
+        argv = ["cylc", "version"]
+        if user and host:
+            argv += ["--user=%s" % user, "--host=%s" % host]
+            user_at_host = "%s@%s" % (user, host)
+        elif user:
+            argv += ["--user=%s" % user]
+            user_at_host = "%s at localhost" % user
+        elif host:
+            argv += ["--host=%s" % host]
+            user_at_host = host
         if verbose:
-            print "   %s" % out
-        contacted += 1
-        out = out.strip()
-        if out != CYLC_VERSION:
-            warn[user_at_host] = out
+            print "%s: %s" % (user_at_host, ' '.join(argv))
+        p = subprocess.Popen(
+            argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        out, err = p.communicate()
+        res = p.wait()
+        if res == 0:
+            if verbose:
+                print "   %s" % out
+            contacted += 1
+            out = out.strip()
+            if out != CYLC_VERSION:
+                warn[user_at_host] = out
+        else:
+            print >> sys.stderr, 'ERROR ' + user_at_host + ':'
+            print >> sys.stderr, err
+
+    # report results
+    if not warn:
+        if contacted:
+            print "All", contacted, "accounts have cylc-" + CYLC_VERSION
     else:
-        print >> sys.stderr, 'ERROR ' + user_at_host + ':'
-        print >> sys.stderr, err
-
-# report results
-if not warn:
-    if contacted:
-        print "All", contacted, "accounts have cylc-" + CYLC_VERSION
-else:
-    print "WARNING: failed to invoke cylc-%s on %d accounts:" % (
-        CYLC_VERSION, len(warn.keys()))
-    m = max([len(ac) for ac in warn.keys()])
-    for ac, warning in warn.items():
-        print ' ', ac.ljust(m), warning
-    if options.error:
-        sys.exit(1)
+        print "WARNING: failed to invoke cylc-%s on %d accounts:" % (
+            CYLC_VERSION, len(warn.keys()))
+        m = max([len(ac) for ac in warn.keys()])
+        for ac, warning in warn.items():
+            print ' ', ac.ljust(m), warning
+        if options.error:
+            sys.exit(1)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-conditions b/bin/cylc-conditions
index e559021..ba9d15d 100755
--- a/bin/cylc-conditions
+++ b/bin/cylc-conditions
@@ -17,7 +17,7 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 usage() {
-    echo "USAGE: cylc [license] warranty [--help]"
+    echo "Usage: cylc [license] warranty [--help]"
     echo "Cylc is release under the GNU General Public License v3.0"
     echo "This command prints the GPL v3.0 license in full."
     echo ""
diff --git a/bin/cylc-copy b/bin/cylc-copy
index 9c98c9a..cdd34b1 100755
--- a/bin/cylc-copy
+++ b/bin/cylc-copy
@@ -16,20 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-from cylc.remote import remrun
-if remrun().execute():
-    sys.exit(0)
-
-import os, re
-import shutil
-from cylc.CylcOptionParsers import cop
-from cylc.mkdir_p import mkdir_p
-from cylc.registration import localdb, RegistrationError
-from cylc.regpath import RegPath
-import cylc.flags
-
-parser = cop( usage = """cylc [db] copy|cp [OPTIONS] REG REG2 TOPDIR
+"""cylc [db] copy|cp [OPTIONS] REG REG2 TOPDIR
 
 Copy suite or group REG to TOPDIR, and register the copy as REG2.
 
@@ -76,72 +63,69 @@ EXAMPLES (using the three suites above):
   Copying suite definition for orange.baz
 % cylc db pr "^orange"
   orange.qux | "Test Suite Two" | /home/bob/orange/qux
-  orange.baz | "Test Suite One" | /home/bob/orange/baz""",
-  argdoc=[("REG", "Source suite name"),
-          ("REG2", "Target suite name"),
-          ("TOPDIR", "Top level target directory.")] )
+  orange.baz | "Test Suite One" | /home/bob/orange/baz"""
 
-parser.add_option( "--db-from",
-        help="Copy suites from another DB (defaults to --db).",
-        metavar='PATH', action="store", default=None, dest="dbfrom" )
+import sys
+from cylc.remote import remrun
+if remrun().execute():
+    sys.exit(0)
+
+import os
+import re
+import shutil
 
-( options, args ) = parser.parse_args()
+import cylc.flags
+from cylc.CylcOptionParsers import cop
+from cylc.mkdir_p import mkdir_p
+from cylc.registration import localdb, RegistrationError
+from cylc.regpath import RegPath
 
-arg_from = args[0]
-arg_to = args[1]
-arg_dir = args[2]
 
-if options.dbfrom:
-    dbfrom = localdb( file=options.dbfrom )
-else:
-    dbfrom = localdb( file=options.db )
-db = localdb( file=options.db )
+def main():
+    parser = cop(__doc__,
+                 argdoc=[("REG", "Source suite name"),
+                         ("REG2", "Target suite name"),
+                         ("TOPDIR", "Top level target directory.")])
 
-try:
-    flist = dbfrom.get_list( '^' + arg_from + r'\b' )
+    parser.add_option(
+        "--db-from",
+        help="Copy suites from another DB (defaults to --db).",
+        metavar='PATH', action="store", default=None, dest="dbfrom")
+
+    (options, args) = parser.parse_args()
+    arg_from = args[0]
+    arg_to = args[1]
+    arg_dir = args[2]
+
+    if options.dbfrom:
+        dbfrom = localdb(file=options.dbfrom)
+    else:
+        dbfrom = localdb(file=options.db)
+    db = localdb(file=options.db)
+
+    flist = dbfrom.get_list('^' + arg_from + r'\b')
     if len(flist) == 0:
-        sys.exit( 'ERROR, no suites matched: ' + arg_from )
+        sys.exit('ERROR, no suites matched: ' + arg_from)
 
     for item in flist:
         freg, fdir, ftitle = item
-        treg = re.sub( r'\b' + arg_from + r'\b', arg_to, freg )
+        treg = re.sub(r'\b' + arg_from + r'\b', arg_to, freg)
 
         tdir = RegPath(treg).get_fpath()
-        tdir = os.path.join( arg_dir, tdir )
+        tdir = os.path.join(arg_dir, tdir)
 
-        if os.path.exists( tdir ):
-            print >> sys.stderr, 'SKIPPING, directory already exists: ' + tdir
-            continue
-
-        tdir = os.path.abspath( tdir )
-        #print 'Making directory ', tdir
-        try:
-            mkdir_p( os.path.dirname(tdir))
-        except Exception,x:
-            print >> sys.stderr, 'ERROR, illegal target directory?', tdir
-            print >> sys.stderr, x
-            continue
+        tdir = os.path.abspath(tdir)
+        mkdir_p(os.path.dirname(tdir))
 
         print 'COPY', fdir, '\n  TO', tdir
-        try:
-            shutil.copytree( fdir, tdir )
-        except OSError, x:
-            print >> sys.stderr,x
-            continue
-        except shutil.Error, x:
-            # if one or more files could not be read
-            print 'WARNINGS:'
-            for e in x.args[0]:
-                src, trg, err = e
-                print >> sys.stderr, "  ", src, '-->', trg
-                print >> sys.stderr, "     ", err
-        try:
-            db.register( treg, tdir )
-        except RegistrationError, x:
-            print >> sys.stderr, 'ERROR, ' + str(x)
-            continue
-
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+        shutil.copytree(fdir, tdir)
+        db.register(treg, tdir)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-cycle-point b/bin/cylc-cycle-point
index 7009743..5936896 100755
--- a/bin/cylc-cycle-point
+++ b/bin/cylc-cycle-point
@@ -17,14 +17,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys, re
-import cylc.cycling.iso8601
-import isodatetime.data
-import isodatetime.dumpers
-import isodatetime.parsers
-from optparse import OptionParser
-
-parser = OptionParser( usage = """cylc [util] cycle-point [OPTIONS] [POINT]
+"""cylc [util] cycle-point [OPTIONS] [POINT]
 
 Cycle point date-time offset computation, and filename templating.
 
@@ -67,180 +60,215 @@ Other examples:
   foo-2012-08.nc
 
 Arguments:
-   [CYCLE]  ISO 8601 date-time, e.g. 20140201T0000Z, default
-      $CYLC_TASK_CYCLE_POINT""" )
+   [POINT]  ISO 8601 date-time, e.g. 20140201T0000Z, default
+      $CYLC_TASK_CYCLE_POINT"""
+
+import os
+import sys
+import re
+from optparse import OptionParser
 
-parser.add_option( "--offset-hours", metavar="HOURS",
+import cylc.flags
+import cylc.cycling.iso8601
+import isodatetime.data
+import isodatetime.dumpers
+import isodatetime.parsers
+
+
+def main():
+    parser = OptionParser(__doc__)
+
+    parser.add_option(
+        "--offset-hours", metavar="HOURS",
         help="Add N hours to CYCLE (may be negative)",
-        action="store", dest="offsethours" )
+        action="store", dest="offsethours")
 
-parser.add_option( "--offset-days", metavar="DAYS",
+    parser.add_option(
+        "--offset-days", metavar="DAYS",
         help="Add N days to CYCLE (N may be negative)",
-        action="store", dest="offsetdays" )
+        action="store", dest="offsetdays")
 
-parser.add_option( "--offset-months", metavar="MONTHS",
+    parser.add_option(
+        "--offset-months", metavar="MONTHS",
         help="Add N months to CYCLE (N may be negative)",
-        action="store", dest="offsetmonths" )
+        action="store", dest="offsetmonths")
 
-parser.add_option( "--offset-years", metavar="YEARS",
+    parser.add_option(
+        "--offset-years", metavar="YEARS",
         help="Add N years to CYCLE (N may be negative)",
-        action="store", dest="offsetyears" )
+        action="store", dest="offsetyears")
 
-parser.add_option( "--offset", metavar="ISO_OFFSET",
+    parser.add_option(
+        "--offset", metavar="ISO_OFFSET",
         help="Add an ISO 8601-based interval representation to CYCLE",
-        action="store", dest="offset" )
+        action="store", dest="offset")
 
-parser.add_option( "--template", metavar="TEMPLATE",
+    parser.add_option(
+        "--template", metavar="TEMPLATE",
         help="Filename template string or variable",
-        action="store", dest="template" )
+        action="store", dest="template")
 
-parser.add_option( "--time-zone", metavar="TEMPLATE",
-        help=("Control the formatting of the result's timezone e.g. " +
-              "(Z, +13:00, -hh"),
-        action="store", default=None, dest="time_zone" )
+    parser.add_option(
+        "--time-zone", metavar="TEMPLATE",
+        help="Control the formatting of the result's timezone e.g. "
+             "(Z, +13:00, -hh",
+        action="store", default=None, dest="time_zone")
 
-parser.add_option( "--num-expanded-year-digits", metavar="NUMBER",
-        help=("Specify a number of expanded year digits to print in the " +
-              "result"),
-        action="store", default=0, dest="num_expanded_year_digits" )
+    parser.add_option(
+        "--num-expanded-year-digits", metavar="NUMBER",
+        help="Specify a number of expanded year digits to print in the result",
+        action="store", default=0, dest="num_expanded_year_digits")
 
-parser.add_option( "--print-year", help="Print only CCYY of result",
-        action="store_true", default=False, dest="print_year" )
+    parser.add_option(
+        "--print-year", help="Print only CCYY of result",
+        action="store_true", default=False, dest="print_year")
 
-parser.add_option( "--print-month", help="Print only MM of result",
-        action="store_true", default=False, dest="print_month" )
+    parser.add_option(
+        "--print-month", help="Print only MM of result",
+        action="store_true", default=False, dest="print_month")
 
-parser.add_option( "--print-day", help="Print only DD of result",
-        action="store_true", default=False, dest="print_day" )
+    parser.add_option(
+        "--print-day", help="Print only DD of result",
+        action="store_true", default=False, dest="print_day")
 
-parser.add_option( "--print-hour", help="Print only HH of result",
-        action="store_true", default=False, dest="print_hour" )
+    parser.add_option(
+        "--print-hour", help="Print only HH of result",
+        action="store_true", default=False, dest="print_hour")
 
+    (options, args) = parser.parse_args()
 
-(options, args) = parser.parse_args()
+    if len(args) == 0:
+        # input cycle point must be defined in the environment.
+        if 'CYLC_TASK_CYCLE_POINT' not in os.environ:
+            parser.error("Provide CYCLE arg, or define $CYLC_TASK_CYCLE_POINT")
+        cycle_point_string = os.environ['CYLC_TASK_CYCLE_POINT']
 
-if len( args ) == 0:
-    # input cycle point must be defined in the environment.
-    if 'CYLC_TASK_CYCLE_POINT' not in os.environ:
-        parser.error( "Provide CYCLE arg, or define $CYLC_TASK_CYCLE_POINT" )
-    cycle_point_string = os.environ['CYLC_TASK_CYCLE_POINT'] 
+    elif len(args) == 1:
+        # must be cycle point
+        cycle_point_string = args[0]
 
-elif len( args ) == 1:
-    # must be cycle point
-    cycle_point_string = args[0]
-
-else:
-    parser.error( "Wrong number of arguments!" )
-
-# template string
-template = None
-if options.template:
-    if options.print_month or options.print_year or \
-            options.print_day or options.print_hour:
-                parser.error( '"print only" options are incompatible with templating' )
-    tmp = options.template
-    if tmp in os.environ:
-        # name of a variable that contains a template
-        template = os.environ[ tmp ]
     else:
-        # or a raw template string
-        template = tmp
-else:
-    n_chosen = 0
-
-    if options.print_year:
-        n_chosen += 1
-        if options.num_expanded_year_digits:
-            template = u"±XCCYY"
+        parser.error("Wrong number of arguments!")
+
+    # template string
+    template = None
+    if options.template:
+        if (options.print_month or options.print_year or options.print_day or
+                options.print_hour):
+            parser.error(
+                '"print only" options are incompatible with templating')
+        tmp = options.template
+        if tmp in os.environ:
+            # name of a variable that contains a template
+            template = os.environ[tmp]
         else:
-            template = "CCYY"
-
-    if options.print_month:
-        n_chosen += 1
-        template = "MM"
-
-    if options.print_day:
-        n_chosen += 1
-        template = "DD"
-
-    if options.print_hour:
-        n_chosen += 1
-        template = "%H"
-
-    if n_chosen != 0 and n_chosen != 1:
-        parser.error( "Choose NONE or ONE of print_(year|month|day|hour)" )
-
-if re.match("\d{10}$", cycle_point_string):
-    # Auto-detect prev Cylc format.
-    if template is None:
-        template="%Y%m%d%H"
+            # or a raw template string
+            template = tmp
     else:
-        template = template.replace("YYYY", "%Y")
-        template = template.replace("MM", "%m")
-        template = template.replace("DD", "%d")
-        template = template.replace("HH", "%H")
-
-cylc.cycling.iso8601.init(
-    num_expanded_year_digits=options.num_expanded_year_digits,
-    time_zone=options.time_zone
-)
-iso_point_parser = isodatetime.parsers.TimePointParser(
-    num_expanded_year_digits=options.num_expanded_year_digits
-)
-iso_point_dumper = isodatetime.dumpers.TimePointDumper(
-    num_expanded_year_digits=options.num_expanded_year_digits
-)
-try:
-    cycle_point = iso_point_parser.parse(
-        cycle_point_string, dump_as_parsed=(template is None))
-except ValueError:
-    # May be in prev Cylc format.
-    try:
-        cycle_point = cylc.cycling.iso8601.point_parse(cycle_point_string)
-    except ValueError as exc:
-        parser.error( 'ERROR: invalid cycle: %s' % exc )
-
-offset_props = {}
-
-if options.offsethours:
-    try:
-        offset_props["hours"] = int( options.offsethours )
-    except ValueError:
-        parser.error( 'ERROR: offset must be integer' )
-
-if options.offsetdays:
-    try:
-        offset_props["days"] = int( options.offsetdays )
-    except ValueError:
-        parser.error( 'ERROR: offset must be integer' )
-
-if options.offsetmonths:
-    try:
-        offset_props["months"] = int( options.offsetmonths )
-    except ValueError:
-        parser.error( 'ERROR: offset must be integer' )
-
-if options.offsetyears:
+        n_chosen = 0
+
+        if options.print_year:
+            n_chosen += 1
+            if options.num_expanded_year_digits:
+                template = u"±XCCYY"
+            else:
+                template = "CCYY"
+
+        if options.print_month:
+            n_chosen += 1
+            template = "MM"
+
+        if options.print_day:
+            n_chosen += 1
+            template = "DD"
+
+        if options.print_hour:
+            n_chosen += 1
+            template = "%H"
+
+        if n_chosen != 0 and n_chosen != 1:
+            parser.error("Choose NONE or ONE of print_(year|month|day|hour)")
+
+    if re.match("\d{10}$", cycle_point_string):
+        # Auto-detect prev Cylc format.
+        if template is None:
+            template = "%Y%m%d%H"
+        else:
+            template = template.replace("YYYY", "%Y")
+            template = template.replace("MM", "%m")
+            template = template.replace("DD", "%d")
+            template = template.replace("HH", "%H")
+
+    cylc.cycling.iso8601.init(
+        num_expanded_year_digits=options.num_expanded_year_digits,
+        time_zone=options.time_zone
+    )
+    iso_point_parser = isodatetime.parsers.TimePointParser(
+        num_expanded_year_digits=options.num_expanded_year_digits
+    )
+    iso_point_dumper = isodatetime.dumpers.TimePointDumper(
+        num_expanded_year_digits=options.num_expanded_year_digits
+    )
     try:
-        offset_props["years"] = int( options.offsetyears )
+        cycle_point = iso_point_parser.parse(
+            cycle_point_string, dump_as_parsed=(template is None))
     except ValueError:
-        parser.error( 'ERROR: offset must be integer' )
+        # May be in prev Cylc format.
+        try:
+            cycle_point = cylc.cycling.iso8601.point_parse(cycle_point_string)
+        except ValueError as exc:
+            parser.error('ERROR: invalid cycle: %s' % exc)
+
+    offset_props = {}
+
+    if options.offsethours:
+        try:
+            offset_props["hours"] = int(options.offsethours)
+        except ValueError:
+            parser.error('ERROR: offset must be integer')
+
+    if options.offsetdays:
+        try:
+            offset_props["days"] = int(options.offsetdays)
+        except ValueError:
+            parser.error('ERROR: offset must be integer')
+
+    if options.offsetmonths:
+        try:
+            offset_props["months"] = int(options.offsetmonths)
+        except ValueError:
+            parser.error('ERROR: offset must be integer')
+
+    if options.offsetyears:
+        try:
+            offset_props["years"] = int(options.offsetyears)
+        except ValueError:
+            parser.error('ERROR: offset must be integer')
+
+    offset = isodatetime.data.Duration(**offset_props)
+
+    if options.offset:
+        opt_offset = options.offset
+        sign_factor = 1
+        if options.offset.startswith("-"):
+            opt_offset = options.offset[1:]
+            sign_factor = -1
+        try:
+            offset += isodatetime.parsers.DurationParser().parse(
+                opt_offset) * sign_factor
+        except ValueError as exc:
+            parser.error('ERROR: offset not valid: %s' % exc)
+    cycle_point += offset
+    if template is None:
+        print cycle_point
+    else:
+        print iso_point_dumper.dump(cycle_point, template)
 
-offset = isodatetime.data.Duration(**offset_props)
 
-if options.offset:
-    opt_offset = options.offset
-    sign_factor = 1
-    if options.offset.startswith("-"):
-        opt_offset = options.offset[1:]
-        sign_factor = -1
+if __name__ == "__main__":
     try:
-        offset += isodatetime.parsers.DurationParser().parse(
-            opt_offset) * sign_factor
-    except ValueError as exc:
-        parser.error( 'ERROR: offset not valid: %s' % exc )
-cycle_point += offset
-if template is None:
-    print cycle_point
-else:
-    print iso_point_dumper.dump(cycle_point, template)
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-depend b/bin/cylc-depend
deleted file mode 100755
index da2ba8e..0000000
--- a/bin/cylc-depend
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
-    from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
-        sys.exit(0)
-
-import os
-from cylc.prompt import prompt
-from cylc.task_id import TaskID
-from cylc import cylc_pyro_client
-from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_pyro
-import cylc.flags
-
-parser = cop( """cylc [control] depend [OPTIONS] ARGS
-
-Add new dependencies on the fly to tasks in running suite REG. If DEP
-is a task ID the target TASK will depend on that task finishing,
-otherwise DEP can be an explicit quoted message such as
-  "Data files uploaded for 2011080806"
-(presumably there will be another task in the suite, or you will insert
-one, that reports that message as an output).
-
-Prerequisites added on the fly are not propagated to the successors
-of TASK, and they will not persist in TASK across a suite restart.""",
-    pyro=True, argdoc = [ ( 'REG', 'Suite name' ),
-                          ( 'TASK', 'Target task' ),
-                          ( 'DEP', 'New dependency' )] )
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-task_id = args[1]
-dep = args[2]
-
-if TaskID.is_valid_id(dep):
-    msg = dep + ' succeeded'
-else:
-    msg = dep
-
-if not TaskID.is_valid_id(task_id):
-    sys.exit("Invalid task ID: " + task_id)
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-    prompt( 'Add new prerequisite to ' + task_id + ' in ' + suite, options.force )
-    result = proxy.put( 'add prerequisite', task_id, msg )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
diff --git a/bin/cylc-diff b/bin/cylc-diff
index dee35f2..f6db613 100755
--- a/bin/cylc-diff
+++ b/bin/cylc-diff
@@ -16,20 +16,37 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [prep] diff|compare [OPTIONS] SUITE1 SUITE2
+
+Compare two suite definitions and display any differences.
+
+Differencing is done after parsing the suite.rc files so it takes
+account of default values that are not explicitly defined, it disregards
+the order of configuration items, and it sees any include-file content
+after inlining has occurred.
+
+Note that seemingly identical suites normally differ due to inherited
+default configuration values (e.g. the default job submission log
+directory.
+
+Files in the suite bin directory and other sub-directories of the
+suite definition directory are not currently differenced."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
-from cylc.CylcOptionParsers import cop
-from cylc.config import config
 import cylc.flags
+from cylc.CylcOptionParsers import cop
+from cylc.config import SuiteConfig
 
 n_oone = 0
 n_otwo = 0
 n_diff = 0
 
-def diffdict( one, two, oone, otwo, diff ):
+
+def diffdict(one, two, oone, otwo, diff):
     global n_oone, n_otwo, n_diff
     # Recursively difference two dictionaries in which any element
     # may be another dictionary, keeping items that appear only
@@ -39,13 +56,13 @@ def diffdict( one, two, oone, otwo, diff ):
             oone[key] = one[key]
             n_oone += 1
         elif one[key] != two[key]:
-            if isinstance( one[key], dict ):
+            if isinstance(one[key], dict):
                 for item in oone, otwo, diff:
                     if key not in item:
                         item[key] = {}
-                diffdict( one[key], two[key], oone[key], otwo[key], diff[key] )
+                diffdict(one[key], two[key], oone[key], otwo[key], diff[key])
             else:
-                diff[key] = ( one[key], two[key] )
+                diff[key] = (one[key], two[key])
                 n_diff += 1
 
     for key in two:
@@ -53,7 +70,8 @@ def diffdict( one, two, oone, otwo, diff ):
             otwo[key] = two[key]
             n_otwo += 1
 
-def prdict( dct, arrow='<', section='', level=0, diff=False, nested=False ):
+
+def prdict(dct, arrow='<', section='', level=0, diff=False, nested=False):
     # Recursively print, in pseudo 'diff' format, the contents of
     # one of the three dictionaries populated by the diffdict() function
     # above (any element may itself be a dictionary).
@@ -73,13 +91,15 @@ def prdict( dct, arrow='<', section='', level=0, diff=False, nested=False ):
     foo = False
 
     for key in dct:
-        if isinstance( dct[key], dict ):
+        if isinstance(dct[key], dict):
             lvl = level + 1
             if nested:
-                pre = prfx + '\n' + '   '*lvl
+                pre = prfx + '\n' + '   ' * lvl
             else:
                 pre = prfx
-            prdict( dct[key], arrow, pre + '['*lvl + str(key) + ']'*lvl, lvl, diff, nested )
+            prdict(dct[key], arrow,
+                   pre + '[' * lvl + str(key) + ']' * lvl, lvl,
+                   diff, nested)
         else:
             if not foo:
                 if nested:
@@ -94,76 +114,72 @@ def prdict( dct, arrow='<', section='', level=0, diff=False, nested=False ):
             else:
                 print ' ' + arrow + '  ', key, '=', dct[key]
 
-parser = cop( """cylc [prep] diff|compare [OPTIONS] SUITE1 SUITE2
-
-Compare two suite definitions and display any differences.
-
-Differencing is done after parsing the suite.rc files so it takes
-account of default values that are not explicitly defined, it disregards
-the order of configuration items, and it sees any include-file content
-after inlining has occurred.
-
-Note that seemingly identical suites normally differ due to inherited
-default configuration values (e.g. the default job submission log
-directory.
-
-Files in the suite bin directory and other sub-directories of the
-suite definition directory are not currently differenced.""",
-    jset=True, prep=True, twosuites=True,
-    argdoc=[('SUITE1', 'Suite name or path'), ('SUITE2', 'Suite name or path')] )
-
-parser.add_option( "-n", "--nested", help="print suite.rc "
-        "section headings in nested form.",
-        action="store_true", default=False, dest="nested" )
-
-(options, args) = parser.parse_args()
-
-suite1, suite1rc, junk = parser.get_suite()
-suite2, suite2rc, junk = parser.get_suite(index=1)
-
-try:
-    print "Parsing", suite1
-    config1 = config( suite1, suite1rc,
-            template_vars=options.templatevars,
-            template_vars_file=options.templatevars_file )
-    print "Parsing", suite2
-    config2 = config( suite2, suite2rc,
-            template_vars=options.templatevars,
-            template_vars_file=options.templatevars_file )
-except Exception,x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
-
-if config1 == config2: # does this work, or do we need to do key by key comparison?
-    print "Suite definitions " + suite1 + " and " + suite2 + " are identical."
-    sys.exit(0)
 
-print "Suite definitions " + suite1 + " and " + suite2 + " differ."
-
-suite1_only = {}
-suite2_only = {}
-diff_1_2 = {}
-
-diffdict( config1, config2, suite1_only, suite2_only, diff_1_2 )
-
-if n_oone > 0:
-    print
-    msg = str(n_oone) + ' items only in ' + suite1 + ' (<)'
-    print msg
-    #print '-' * len(msg)
-    prdict( suite1_only, '<', nested=options.nested )
-
-if n_otwo > 0:
-    print
-    msg = str(n_otwo) + ' items only in ' + suite2 + ' (>)'
-    print msg
-    #print '-' * len(msg)
-    prdict( suite2_only, '>', nested=options.nested )
-
-if n_diff > 0:
-    print
-    msg = str(n_diff) + ' common items differ ' + suite1 + '(<) ' + suite2 + '(>)'
-    print msg
-    #print '-' * len(msg)
-    prdict( diff_1_2, '', diff=True, nested=options.nested )
+def main():
+    parser = cop(
+        __doc__, jset=True, prep=True, twosuites=True,
+        argdoc=[('SUITE1', 'Suite name or path'),
+                ('SUITE2', 'Suite name or path')])
+
+    parser.add_option(
+        "-n", "--nested",
+        help="print suite.rc section headings in nested form.",
+        action="store_true", default=False, dest="nested")
+
+    (options, args) = parser.parse_args()
+
+    suite1, suite1rc, junk = parser.get_suite()
+    suite2, suite2rc, junk = parser.get_suite(index=1)
+    if suite1 == suite2:
+        parser.error("You can't diff a single suite.")
+    print "Parsing %s (%s)" % (suite1, suite1rc)
+    config1 = SuiteConfig.get_inst(
+        suite1, suite1rc,
+        template_vars=options.templatevars,
+        template_vars_file=options.templatevars_file).cfg
+    SuiteConfig._FORCE = True
+    print "Parsing %s (%s)" % (suite2, suite2rc)
+    config2 = SuiteConfig.get_inst(
+        suite2, suite2rc,
+        template_vars=options.templatevars,
+        template_vars_file=options.templatevars_file).cfg
+
+    if config1 == config2:
+        print "Suite definitions %s and %s are identical" % (suite1, suite2)
+        sys.exit(0)
+
+    print "Suite definitions %s and %s differ" % (suite1, suite2)
+
+    suite1_only = {}
+    suite2_only = {}
+    diff_1_2 = {}
+
+    diffdict(config1, config2, suite1_only, suite2_only, diff_1_2)
+
+    if n_oone > 0:
+        print
+        msg = str(n_oone) + ' items only in ' + suite1 + ' (<)'
+        print msg
+        prdict(suite1_only, '<', nested=options.nested)
+
+    if n_otwo > 0:
+        print
+        msg = str(n_otwo) + ' items only in ' + suite2 + ' (>)'
+        print msg
+        prdict(suite2_only, '>', nested=options.nested)
+
+    if n_diff > 0:
+        print
+        msg = (str(n_diff) + ' common items differ ' +
+               suite1 + '(<) ' + suite2 + '(>)')
+        print msg
+        prdict(diff_1_2, '', diff=True, nested=options.nested)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-documentation b/bin/cylc-documentation
index 550725c..cb69c35 100755
--- a/bin/cylc-documentation
+++ b/bin/cylc-documentation
@@ -16,6 +16,22 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [info] documentation|browse [OPTIONS] [SUITE]
+
+Open cylc or suite documentation in your browser or PDF viewer (as defined
+in cylc global config files).
+
+% cylc doc [OPTIONS]
+   Open local or internet [--www] cylc documentation (locations must be
+specified in cylc global config files).
+
+% cylc doc -u [-t TASK] SUITE
+    Open suite or task documentation if corresponding URL items are specified
+in the suite definition.
+
+Arguments:
+   [TARGET]    File, URL, or suite name"""
+
 import sys
 for arg in sys.argv[1:]:
     if arg.startswith('--host=') or arg.startswith('--user='):
@@ -27,127 +43,128 @@ import os
 import re
 import subprocess
 from optparse import OptionParser
+
+import cylc.flags
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.run_get_stdout import run_get_stdout
 from cylc.suite_host import get_hostname
 from cylc.owner import user
 
-parser = OptionParser(
-    """cylc [info] documentation|browse [OPTIONS] [SUITE]
 
-Open cylc or suite documentation in your browser or PDF viewer (as defined
-in cylc global config files).
-
-% cylc doc [OPTIONS]
-   Open local or internet [--www] cylc documentation (locations must be
-specified in cylc global config files).
-
-% cylc doc -u [-t TASK] SUITE
-    Open suite or task documentation if corresponding URL items are specified
-in the suite definition.
-                      
-Arguments:
-   [TARGET]    File, URL, or suite name""")
-
-parser.add_option(
-    "-p", "--pdf", help="Open the PDF User Guide directly.",
-    action="store_true", default=False, dest="pdf")
-
-parser.add_option(
-    "-w", "--www", help="Open the cylc internet homepage",
-    action="store_true", default=False, dest="www")
-
-parser.add_option(
-    "-t", "--task", help="Browse task documentation URLs.",
-    metavar="TASK_NAME", action="store", default=None, dest="task_name")
-
-parser.add_option(
-    "-s", "--stdout", help="Just print the URL to stdout.",
-    action="store_true", default=False, dest="stdout")
-
-parser.add_option(
-    "--user",
-    help="Other user account name. This results in "
-         "command reinvocation on the remote account.",
-    metavar="USER", default=user, action="store", dest="owner")
-
-parser.add_option(
-    "--host",
-    help="Other host name. This results in "
-         "command reinvocation on the remote account.",
-    metavar="HOST", action="store", default=get_hostname(), dest="host")
-
-(options, args) = parser.parse_args()
-
-intranet_url = GLOBAL_CFG.get(['documentation','urls','local index'])
-internet_url = GLOBAL_CFG.get(['documentation','urls','internet homepage'])
-html_file = GLOBAL_CFG.get(['documentation','files','html index'])
-html_viewer = GLOBAL_CFG.get(['document viewers','html'])
-pdf_file = GLOBAL_CFG.get(['documentation','files','pdf user guide'])
-pdf_viewer = GLOBAL_CFG.get(['document viewers','pdf'])
-if len(args) == 0:
-    # Cylc documentation.
-    if options.pdf:
-        # Force PDF.
-        viewer = pdf_viewer
-        target = pdf_file
-    else:
-        # HTML documentation index.
-        viewer = html_viewer
-        if options.www:
-            # Force internet.
-            if internet_url is not None:
-                target = internet_url
-            else:
-                sys.exit("ERROR: cylc internet URL not defined.")
-        elif intranet_url is not None:
-            # Intranet.
-            target = intranet_url
+def main():
+    parser = OptionParser(__doc__)
+
+    parser.add_option(
+        "-p", "--pdf", help="Open the PDF User Guide directly.",
+        action="store_true", default=False, dest="pdf")
+
+    parser.add_option(
+        "-w", "--www", help="Open the cylc internet homepage",
+        action="store_true", default=False, dest="www")
+
+    parser.add_option(
+        "-t", "--task", help="Browse task documentation URLs.",
+        metavar="TASK_NAME", action="store", default=None, dest="task_name")
+
+    parser.add_option(
+        "-s", "--stdout", help="Just print the URL to stdout.",
+        action="store_true", default=False, dest="stdout")
+
+    parser.add_option(
+        "--user",
+        help="Other user account name. This results in "
+             "command reinvocation on the remote account.",
+        metavar="USER", default=user, action="store", dest="owner")
+
+    parser.add_option(
+        "--host",
+        help="Other host name. This results in "
+             "command reinvocation on the remote account.",
+        metavar="HOST", action="store", default=get_hostname(), dest="host")
+
+    (options, args) = parser.parse_args()
+
+    intranet_url = GLOBAL_CFG.get(['documentation', 'urls', 'local index'])
+    internet_url = GLOBAL_CFG.get(['documentation', 'urls',
+                                   'internet homepage'])
+    html_file = GLOBAL_CFG.get(['documentation', 'files', 'html index'])
+    html_viewer = GLOBAL_CFG.get(['document viewers', 'html'])
+    pdf_file = GLOBAL_CFG.get(['documentation', 'files', 'pdf user guide'])
+    pdf_viewer = GLOBAL_CFG.get(['document viewers', 'pdf'])
+    if len(args) == 0:
+        # Cylc documentation.
+        if options.pdf:
+            # Force PDF.
+            viewer = pdf_viewer
+            target = pdf_file
         else:
-            # Open in file:// mode as a last resort.
+            # HTML documentation index.
+            viewer = html_viewer
+            if options.www:
+                # Force internet.
+                if internet_url is not None:
+                    target = internet_url
+                else:
+                    sys.exit("ERROR: cylc internet URL not defined.")
+            elif intranet_url is not None:
+                # Intranet.
+                target = intranet_url
+            else:
+                # Open in file:// mode as a last resort.
+                print >> sys.stderr, ("WARNING: cylc intranet URL not "
+                                      "defined, trying file mode.")
+                target = html_file
+
+    elif len(args) == 1:
+        # Suite or task documentation.
+        if options.pdf or options.www:
             print >> sys.stderr, (
-                "WARNING: cylc intranet URL not defined, trying file mode.")
-            target = html_file
-
-elif len(args) == 1:
-    # Suite or task documentation.
-    if options.pdf or options.www:
-        print >> sys.stderr, (
-            "(Note: --pdf and --www are ignored for suite documentation).")
-    suite = args[0]
-    if options.task_name:
-        # Task documentation.
-        res, stdout = run_get_stdout(
-            "cylc get-suite-config -i [runtime][%s]URL %s" % (
-                options.task_name, suite))
+                "(Note: --pdf and --www are ignored for suite documentation).")
+        suite = args[0]
+        if options.task_name:
+            # Task documentation.
+            res, stdout = run_get_stdout(
+                "cylc get-suite-config -i [runtime][%s]URL %s" % (
+                    options.task_name, suite))
+        else:
+            # Suite documentation.
+            res, stdout = run_get_stdout(
+                "cylc get-suite-config -i URL %s" % suite)
+        if not res:
+            # (Illegal config item)
+            sys.exit(stdout)
+        elif len(stdout) == 0:
+            if options.task_name is not None:
+                sys.exit("ERROR: No URL defined for %s in %s." % (
+                    options.task_name, suite))
+            else:
+                sys.exit("ERROR: No URL defined for %s." % suite)
+        target = stdout[0]
+        viewer = html_viewer
     else:
-        # Suite documentation.
-        res, stdout = run_get_stdout(
-            "cylc get-suite-config -i URL %s" % suite)
-    if not res:
-        # (Illegal config item)
-        sys.exit(stdout)
-    elif len(stdout) == 0:
-        sys.exit("ERROR: No URL defined for %s in %s." % (
-            options.task_name, suite))
-    target = stdout[0]
-    viewer = html_viewer
-
-else:
-    parser.error("Too many arguments.")
-
-if target in [pdf_file, html_file] and not os.path.isfile(target):
-    sys.exit("ERROR, file not found: %s (see your cylc admin)" % target)
-
-# viewer may have spaces (e.g. 'firefox --no-remote'):
-command = '%s %s' % (viewer, target)
-command_list = re.split(' ', command)
-
-if options.stdout:
-    print target
-    sys.exit(0)
-
-retcode = subprocess.call(command_list)
-if retcode != 0:
-    print >> sys.stderr, 'ERROR, command failed: %s' % command
-sys.exit(retcode)
+        parser.error("Too many arguments.")
+
+    if target in [pdf_file, html_file] and not os.path.isfile(target):
+        sys.exit("ERROR, file not found: %s (see your cylc admin)" % target)
+
+    # viewer may have spaces (e.g. 'firefox --no-remote'):
+    command = '%s %s' % (viewer, target)
+    command_list = re.split(' ', command)
+
+    if options.stdout:
+        print target
+        sys.exit(0)
+
+    retcode = subprocess.call(command_list)
+    if retcode != 0:
+        print >> sys.stderr, 'ERROR, command failed: %s' % command
+    sys.exit(retcode)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-dump b/bin/cylc-dump
index a4adb46..797a928 100755
--- a/bin/cylc-dump
+++ b/bin/cylc-dump
@@ -16,20 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
-    from cylc.remote import remrun
-    if remrun().execute():
-        sys.exit(0)
-
-from cylc import cylc_pyro_client, dump
-from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_pyro
-from cylc.dump import dump_to_stdout
-import cylc.flags
-
-parser = cop( """cylc [info] dump [OPTIONS] ARGS
+"""cylc [info] dump [OPTIONS] ARGS
 
 Print state information (e.g. the state of each task) from a running
 suite. For small suites 'watch cylc [info] dump SUITE' is an effective
@@ -43,54 +30,76 @@ Examples:
  % cylc [info] dump --tasks --sort SUITE | grep running
 
  Display the state of all tasks in a particular cycle point:
- % cylc [info] dump -t SUITE | grep 2010082406""", pyro=True, noforce=True )
+ % cylc [info] dump -t SUITE | grep 2010082406"""
+
+import sys
+if '--use-ssh' in sys.argv[1:]:
+    sys.argv.remove('--use-ssh')
+    from cylc.remote import remrun
+    if remrun().execute():
+        sys.exit(0)
+
+import cylc.flags
+from cylc.network.suite_state import StateSummaryClient
+from cylc.CylcOptionParsers import cop
+from cylc.dump import dump_to_stdout
 
-parser.add_option( "-g", "--global", help="Global information only.",
-        action="store_true", default=False, dest="global_only" )
 
-parser.add_option( "-t", "--tasks", help="Task states only.",
-        action="store_true", default=False, dest="tasks_only" )
+def main():
+    parser = cop(__doc__, pyro=True, noforce=True)
 
-parser.add_option( "-s", "--sort", help="Task states only; "
-        "sort by cycle point instead of name.",
-        action="store_true", default=False, dest="sort_by_cycle" )
+    parser.add_option(
+        "-g", "--global", help="Global information only.",
+        action="store_true", default=False, dest="global_only")
 
-( options, args ) = parser.parse_args()
+    parser.add_option(
+        "-t", "--tasks", help="Task states only.",
+        action="store_true", default=False, dest="tasks_only")
 
-try:
-    suite, pphrase = prep_pyro( args[0], options ).execute()
-except:  # Trap SystemExit
+    parser.add_option(
+        "-s", "--sort",
+        help="Task states only; sort by cycle point instead of name.",
+        action="store_true", default=False, dest="sort_by_cycle")
+
+    (options, args) = parser.parse_args()
     suite = args[0]
-    pphrase = None
-
-# default: display all information
-display_tasks = True
-display_global = True
-# check for restricted output
-if options.global_only and options.tasks_only:
-    parser.error( '--tasks and --global are incompatible' )
-
-if options.global_only:
-    display_tasks = False
-if options.tasks_only:
-    display_global = False
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy('state_summary' )
-    # get state summary, task names, cycle points
-    [glbl, states, fam_states] = proxy.get_state_summary()
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if display_global:
-    #print 'GLOBALS:'
-    for item in glbl:
-        print item, '=', glbl[item]
-
-if display_tasks:
-    #print 'TASKS:'
-    dump_to_stdout( states, options.sort_by_cycle )
+
+    # default: display all information
+    display_tasks = True
+    display_global = True
+    # check for restricted output
+    if options.global_only and options.tasks_only:
+        parser.error('--tasks and --global are incompatible')
+
+    if options.global_only:
+        display_tasks = False
+    if options.tasks_only:
+        display_global = False
+
+    try:
+        pclient = StateSummaryClient(
+            suite, options.owner, options.host, options.pyro_timeout,
+            options.port, options.db, my_uuid=options.set_uuid,
+            print_uuid=options.print_uuid)
+        # get state summary, task names, cycle points
+        glbl, states, fam_states = pclient.get_suite_state_summary()
+    except Exception, x:
+        if cylc.flags.debug:
+            raise
+        sys.exit(x)
+
+    if display_global:
+        for item in glbl:
+            print item, '=', glbl[item]
+
+    if display_tasks:
+        dump_to_stdout(states, options.sort_by_cycle)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-edit b/bin/cylc-edit
index 3cbcaf3..3789eb5 100755
--- a/bin/cylc-edit
+++ b/bin/cylc-edit
@@ -16,22 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-from cylc.remote import remrun
-if remrun().execute():
-    sys.exit(0)
-
-import os, re
-import datetime
-from shutil import copy
-import subprocess
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.CylcOptionParsers import cop
-from parsec.include import inline, \
-    split_file, backup, backups, newfiles, cleanup, modtimes
-from cylc.wallclock import get_current_time_string
-
-parser = cop( """cylc [prep] edit [OPTIONS] ARGS
+"""cylc [prep] edit [OPTIONS] ARGS
 
 Edit suite definitions without having to move to their directory
 locations, and with optional reversible inlining of include-files. Note
@@ -69,150 +54,162 @@ The edit process is spawned in the foreground as follows:
   % <editor> suite.rc
 Where <editor> is defined in the cylc site/user config files.
 
-See also 'cylc [prep] view'.""", prep=True )
+See also 'cylc [prep] view'."""
+
+import sys
+from cylc.remote import remrun
+if remrun().execute():
+    sys.exit(0)
+
+import os
+import re
+import datetime
+import subprocess
+from shutil import copy
+
+import cylc.flags
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.CylcOptionParsers import cop
+from parsec.include import inline, \
+    split_file, backup, backups, newfiles, cleanup, modtimes
+from cylc.wallclock import get_current_time_string
+
+
+def main():
+    parser = cop(__doc__, prep=True)
 
-parser.add_option( "--inline", "-i",
+    parser.add_option(
+        "--inline", "-i",
         help="Edit with include-files inlined as described above.",
-        action="store_true", default=False, dest="inline" )
+        action="store_true", default=False, dest="inline")
 
-parser.add_option( "--cleanup",
+    parser.add_option(
+        "--cleanup",
         help="Remove backup files left by previous inlined edit sessions.",
-        action="store_true", default=False, dest="cleanup" )
+        action="store_true", default=False, dest="cleanup")
 
-parser.add_option( "--gui", "-g",
-        help="Force use of the configured GUI editor.",
-        action="store_true", default=False, dest="geditor" )
+    parser.add_option(
+        "--gui", "-g", help="Force use of the configured GUI editor.",
+        action="store_true", default=False, dest="geditor")
 
-( options, args ) = parser.parse_args()
+    (options, args) = parser.parse_args()
 
-suite, suiterc, junk = parser.get_suite()
+    suite, suiterc, junk = parser.get_suite()
 
-if options.geditor:
-    editor = GLOBAL_CFG.get( ['editors','gui'] )
-else:
-    editor = GLOBAL_CFG.get( ['editors','terminal'] )
+    if options.geditor:
+        editor = GLOBAL_CFG.get(['editors', 'gui'])
+    else:
+        editor = GLOBAL_CFG.get(['editors', 'terminal'])
 
-suitedir = os.path.dirname(suiterc)
+    suitedir = os.path.dirname(suiterc)
 
-if options.cleanup:
-    # remove backup files left by inlined editing sessions
-    cleanup( suitedir )
-    sys.exit(0)
+    if options.cleanup:
+        # remove backup files left by inlined editing sessions
+        cleanup(suitedir)
+        sys.exit(0)
 
-if not options.inline:
-    # plain old editing.
-    # move to suite def dir
-    try:
-        os.chdir( suitedir )
-    except OSError, x:
-        print >> sys.stderr, x
-        sys.exit(1)
+    if not options.inline:
+        # plain old editing.
+        # move to suite def dir
+        os.chdir(suitedir)
 
-    # edit the suite.rc file
-    if not os.path.isfile( suiterc ):
-        print  >> sys.stderr, 'ERROR, file not found: ', suiterc
-        sys.exit(1)
+        # edit the suite.rc file
+        if not os.path.isfile(suiterc):
+            print >> sys.stderr, 'ERROR, file not found: ', suiterc
+            sys.exit(1)
 
-    # in case editor has options, e.g. 'emacs -nw':
-    command_list = re.split( ' ', editor )
-    command_list.append( suiterc )
-    command = ' '.join( command_list )
-    try:
+        # in case editor has options, e.g. 'emacs -nw':
+        command_list = re.split(' ', editor)
+        command_list.append(suiterc)
+        command = ' '.join(command_list)
         # THIS BLOCKS UNTIL THE COMMAND COMPLETES
-        retcode = subprocess.call( command_list )
+        retcode = subprocess.call(command_list)
         if retcode != 0:
             # the command returned non-zero exist status
             print >> sys.stderr, command, 'failed:', retcode
             sys.exit(1)
-    except OSError:
-        # the command was not invoked
-        print >> sys.stderr, 'ERROR: unable to execute:', command
-        sys.exit(1)
 
-    # !!!EDITING FINISHED!!!
-    sys.exit(0)
+        # !!!EDITING FINISHED!!!
+        sys.exit(0)
+
+    # read the suite.rc file
+    if os.path.isfile(suiterc):
+        # back up the original
+        backup(suiterc)
+        # record original modtime
+        modtimes[suiterc] = os.stat(suiterc).st_mtime
+        # read the file
+        h = open(suiterc, 'rb')
+        lines0 = h.readlines()
+        h.close()
+        if lines0[0].startswith('# !WARNING! CYLC EDIT INLINED'):
+            print 'WARNING: RECOVERING A PREVIOUSLY INLINED FILE'
+            recovery = True
+            lines = lines0
+        else:
+            recovery = False
+            lines = inline(lines0, suitedir, suiterc, for_edit=True)
+    else:
+        parser.error("File not found: " + suiterc)
 
-# read the suite.rc file
-if os.path.isfile( suiterc ):
-    # back up the original
-    try:
-        backup( suiterc )
-    except IOError, x:
-        # e.g. if attempting inlined edit of another user's suite.
-        print >> sys.stderr, x
-        sys.exit("Write access to suite definition directory is required.")
-    # record original modtime
-    modtimes[suiterc] = os.stat( suiterc ).st_mtime
-    # read the file
-    h = open( suiterc, 'rb' )
-    lines0 = h.readlines()
+    lines = [i.rstrip() for i in lines]
+
+    # overwrite the (now backed up) original with the inlined file:
+    h = open(suiterc, 'wb')
+    for line in lines:
+        h.write(line + '\n')
     h.close()
-    if lines0[0].startswith( '# !WARNING! CYLC EDIT INLINED' ):
-        print 'WARNING: RECOVERING A PREVIOUSLY INLINED FILE'
-        recovery = True
-        lines = lines0
-    else:
-        recovery = False
-        lines = inline( lines0, suitedir, suiterc, for_edit=True )
-else:
-    parser.error("File not found: " + suiterc )
-
-lines = [ i.rstrip() for i in lines ]
-
-# overwrite the (now backed up) original with the inlined file:
-h = open( suiterc, 'wb' )
-for line in lines:
-    h.write( line + '\n' )
-h.close()
-
-print 'PRE-EDIT BACKUPS:'
-for file in backups:
-    src = re.sub( suitedir + '/', '', file )
-    dst = re.sub( suitedir + '/', '', backups[file] )
-    print ' + ' + src + ' ---> ' + dst
-
-# in case editor has options, e.g. 'emacs -nw':
-command_list = re.split( ' ', editor )
-command_list.append( suiterc )
-command = ' '.join( command_list )
-try:
+
+    print 'PRE-EDIT BACKUPS:'
+    for file in backups:
+        src = re.sub(suitedir + '/', '', file)
+        dst = re.sub(suitedir + '/', '', backups[file])
+        print ' + ' + src + ' ---> ' + dst
+
+    # in case editor has options, e.g. 'emacs -nw':
+    command_list = re.split(' ', editor)
+    command_list.append(suiterc)
+    command = ' '.join(command_list)
     # THIS BLOCKS UNTIL THE COMMAND COMPLETES
-    retcode = subprocess.call( command_list )
+    retcode = subprocess.call(command_list)
     if retcode != 0:
         # the command returned non-zero exist status
         print >> sys.stderr, command, 'failed:', retcode
         sys.exit(1)
-except OSError:
-    # the command was not invoked
-    print >> sys.stderr, 'ERROR: unable to execute:', command
-    sys.exit(1)
-
-print 'EDITING DONE'
-
-# Now back up the inlined file in case of absolute disaster, so as the
-# user or his editor corrupting the inlined-include-file marker lines.
-inlined_suiterc_backup = (
-    suitedir + '/suite.rc.INLINED.EDIT.' +
-    get_current_time_string(override_use_utc=True, use_basic_format=True)
-)
-copy( suiterc, inlined_suiterc_backup )
-
-# read in the edited inlined file
-h = open( suiterc, 'rb' )
-lines = h.readlines()
-h.close()
-
-# split it back into separate files
-split_file( suitedir, lines, suiterc, recovery )
-
-print ' + edited:', suiterc
-print ' + backup:', inlined_suiterc_backup
-print 'INCLUDE-FILES WRITTEN:'
-for file in newfiles:
-    f = re.sub( suitedir + '/', '', file )
-    if re.search( '\.EDIT\.NEW\.', f ):
-        print ' + ' + f + ' (!!! WARNING: original changed on disk !!!)'
-    else:
-        print ' + ' + f
+    print 'EDITING DONE'
+
+    # Now back up the inlined file in case of absolute disaster, so as the
+    # user or his editor corrupting the inlined-include-file marker lines.
+    inlined_suiterc_backup = (
+        suitedir + '/suite.rc.INLINED.EDIT.' +
+        get_current_time_string(override_use_utc=True, use_basic_format=True)
+    )
+    copy(suiterc, inlined_suiterc_backup)
+
+    # read in the edited inlined file
+    h = open(suiterc, 'rb')
+    lines = h.readlines()
+    h.close()
+
+    # split it back into separate files
+    split_file(suitedir, lines, suiterc, recovery)
 
-# DONE
+    print ' + edited:', suiterc
+    print ' + backup:', inlined_suiterc_backup
+    print 'INCLUDE-FILES WRITTEN:'
+    for file in newfiles:
+        f = re.sub(suitedir + '/', '', file)
+        if re.search('\.EDIT\.NEW\.', f):
+            print ' + ' + f + ' (!!! WARNING: original changed on disk !!!)'
+        else:
+            print ' + ' + f
+    # DONE
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-email-suite b/bin/cylc-email-suite
index 3f45464..45cf056 100755
--- a/bin/cylc-email-suite
+++ b/bin/cylc-email-suite
@@ -17,7 +17,7 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 usage() {
-    echo "USAGE: cylc [hook] email-suite EVENT SUITE MESSAGE"
+    echo "Usage: cylc [hook] email-suite EVENT SUITE MESSAGE"
     echo ""
     echo "This is a simple suite event hook script that sends an email."
     echo "The command line arguments are supplied automatically by cylc."
diff --git a/bin/cylc-email-task b/bin/cylc-email-task
index 398df93..297186e 100755
--- a/bin/cylc-email-task
+++ b/bin/cylc-email-task
@@ -17,7 +17,7 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 usage() {
-    echo "USAGE: cylc [hook] email-task EVENT SUITE TASKID MESSAGE"
+    echo "Usage: cylc [hook] email-task EVENT SUITE TASKID MESSAGE"
     echo ""
     echo "This is a simple task event hook handler script that sends an email."
     echo "The command line arguments are supplied automatically by cylc."
diff --git a/bin/cylc-ext-trigger b/bin/cylc-ext-trigger
new file mode 100755
index 0000000..975e6a7
--- /dev/null
+++ b/bin/cylc-ext-trigger
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""cylc [control] ext-trigger [OPTIONS] ARGS
+
+Report an external event message to a suite daemon. It is expected that a
+task in the suite has registered the same message as an external trigger - a
+special prerequisite to be satisifed by an external system, via this command,
+rather than by triggering off other tasks.
+
+The ID argument should uniquely distinguish one external trigger event from the
+next. When a task's external trigger is satisfied by an incoming message, the
+message ID is broadcast to all downstream tasks in the cycle point as
+$CYLC_EXT_TRIGGER_ID so that they can use it - e.g. to identify a new data file
+that the external triggering system is responding to.
+
+Use the retry options in case the target suite is down or out of contact.
+
+The suite passphrase must be installed in $HOME/.cylc/<SUITE>/.
+
+Note: to manually trigger a task use 'cylc trigger', not this command."""
+
+import os
+import sys
+
+import cylc.flags
+from cylc.CylcOptionParsers import cop
+from cylc.network.ext_trigger import ExtTriggerClient
+
+
+def main():
+    parser = cop(
+        __doc__, pyro=True,
+        argdoc=[("REG", "Suite name"),
+                ("MSG", "External trigger message"),
+                ("ID", "Unique trigger ID")])
+
+    parser.add_option(
+        "--max-tries", help="Maximum number of send attempts "
+        "(default %s)." % ExtTriggerClient.MAX_N_TRIES, metavar="INT",
+        action="store", default=None, dest="max_n_tries")
+
+    parser.add_option(
+        "--retry-interval", help="Delay in seconds before retrying "
+        "(default %s)." % ExtTriggerClient.RETRY_INTVL_SECS, metavar="SEC",
+        action="store", default=None, dest="retry_intvl_secs")
+
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    cylc.flags.verbose = options.verbose
+    event_msg = args[1]
+    event_id = args[2]
+
+    print 'Send to suite %s: "%s" (%s)' % (suite, event_msg, event_id)
+
+    pclient = ExtTriggerClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+
+    pclient.send_retry(
+        event_msg, event_id, options.max_n_tries, options.retry_intvl_secs)
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-failed b/bin/cylc-failed
deleted file mode 100755
index 04bdf01..0000000
--- a/bin/cylc-failed
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import os, sys
-from optparse import OptionParser
-from cylc.task_message import message
-import cylc.flags
-
-usage = """cylc [task] failed [OPTIONS] [REASON]
-
-This command is part of the cylc task messaging interface, used by
-running tasks to communicate progress to their parent suite.
-
-The failed command reports to successfully complete task execution.
-It is automatically called by task job scripts, except in the case of
-"detaching tasks" which must do their own completion messaging.
-
-Suite and task identity are determined from the task execution
-environment supplied by the suite (or by the single task 'submit'
-command, in which case case the message is just printed to stdout).
-
-See also:
-    cylc [task] message
-    cylc [task] started
-    cylc [task] succeeded
-
-Arguments:
-    REASON        - message explaining why the task failed."""
-
-parser = OptionParser( usage )
-
-parser.add_option( "-v", "--verbose",
-        help="Verbose output mode.",
-        action="store_true", default=False, dest="verbose" )
-
-( options, args ) = parser.parse_args()
-cylc.flags.verbose = options.verbose
-
-if len( args ) == 0:
-    reason = None
-else:
-    reason = ' '.join( args )
-
-debug = False
-try:
-    # from task execution environment
-    if os.environ['CYLC_DEBUG'] == 'True':
-        debug = True
-except KeyError:
-    pass
-
-try:
-    message( msg=reason ).send_failed()
-except Exception, x:
-    print >> sys.stderr, 'ERROR: task messaging failure.'
-    if debug:
-        raise
-    raise SystemExit(x)
diff --git a/bin/cylc-get-directory b/bin/cylc-get-directory
index f729ca7..2a3988a 100755
--- a/bin/cylc-get-directory
+++ b/bin/cylc-get-directory
@@ -16,6 +16,12 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [db] get-directory REG
+
+Retrieve and print the directory location of suite REG.
+Here's an easy way to move to a suite directory:
+  $ cd $(cylc get-dir REG)."""
+
 import sys
 from cylc.CylcOptionParsers import cop
 from cylc.registration import localdb
@@ -25,19 +31,21 @@ from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
-parser = cop( usage = """cylc [db] get-directory REG
 
-Retrieve and print the directory location of suite REG.
-Here's an easy way to move to a suite directory:
-  $ cd $(cylc get-dir REG).""" )
+def main():
+    parser = cop(__doc__)
 
-( options, args ) = parser.parse_args()
+    (options, args) = parser.parse_args()
 
-reg = args[0]
-db = localdb( file=options.db )
-try:
+    reg = args[0]
+    db = localdb(file=options.db)
     print db.get_suitedir(reg)
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-get-gui-config b/bin/cylc-get-gui-config
index 969a7c4..9a17da3 100755
--- a/bin/cylc-get-gui-config
+++ b/bin/cylc-get-gui-config
@@ -2,7 +2,7 @@
 
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -16,52 +16,61 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-from optparse import OptionParser
-import cylc.flags
-from parsec.util import itemstr
-
-parser = OptionParser( usage = """cylc [admin] get-gui-config [OPTIONS] 
+"""cylc [admin] get-gui-config [OPTIONS]
 
 Print gcylc configuration settings.
 
 By default all settings are printed. For specific sections or items
 use -i/--item and wrap parent sections in square brackets:
    cylc get-gui-config --item '[themes][default]succeeded'
-Multiple items can be specified at once.""" )
+Multiple items can be specified at once."""
+
+import sys
+from optparse import OptionParser
+import cylc.flags
+from parsec.util import itemstr
+
+
+def main():
+    parser = OptionParser(__doc__)
 
-parser.add_option( "-v", "--verbose", help="Print extra information.",
-        action="store_true", default=False, dest="verbose" )
+    parser.add_option(
+        "-v", "--verbose", help="Print extra information.",
+        action="store_true", default=False, dest="verbose")
 
-parser.add_option( "--debug",
-        help="Show exception tracebacks.",
-        action="store_true", default=False, dest="debug" )
+    parser.add_option(
+        "--debug", help="Show exception tracebacks.",
+        action="store_true", default=False, dest="debug")
 
-parser.add_option( "-i", "--item", metavar="[SEC...]ITEM", 
+    parser.add_option(
+        "-i", "--item", metavar="[SEC...]ITEM",
         help="Item or section to print (multiple use allowed).",
-        action="append", dest="item", default=[] )
+        action="append", dest="item", default=[])
 
-parser.add_option( "--sparse",
+    parser.add_option(
+        "--sparse",
         help="Only print items explicitly set in the config files.",
-        action="store_true", default=False, dest="sparse" )
+        action="store_true", default=False, dest="sparse")
 
-parser.add_option( "-p", "--python",
-        help="Print native Python format.",
-        action="store_true", default=False, dest="pnative" )
+    parser.add_option(
+        "-p", "--python", help="Print native Python format.",
+        action="store_true", default=False, dest="pnative")
 
-(options, args) = parser.parse_args()
-cylc.flags.verbose = options.verbose
-cylc.flags.debug = options.debug
+    (options, args) = parser.parse_args()
+    cylc.flags.verbose = options.verbose
+    cylc.flags.debug = options.debug
 
-if len(args) != 0:
-    parser.error( "ERROR: wrong number of arguments" )
+    if len(args) != 0:
+        parser.error("ERROR: wrong number of arguments")
 
-try:
-    # import gcfg here to avoid aborting before command help is printed
+    # Import gcfg here to avoid aborting before command help is printed.
     from cylc.cfgspec.gcylc import gcfg
-    gcfg.idump( options.item, sparse=options.sparse, pnative=options.pnative )
+    gcfg.idump(options.item, sparse=options.sparse, pnative=options.pnative)
 
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit( str(x) )
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-get-site-config b/bin/cylc-get-site-config
index 538d37e..d1dbc7b 100755
--- a/bin/cylc-get-site-config
+++ b/bin/cylc-get-site-config
@@ -2,7 +2,7 @@
 
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -16,59 +16,73 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-from optparse import OptionParser
-import cylc.flags
-from parsec.util import itemstr
-
-parser = OptionParser( usage = """cylc [admin] get-site-config [OPTIONS] 
+"""cylc [admin] get-site-config [OPTIONS]
 
 Print cylc site/user configuration settings.
 
 By default all settings are printed. For specific sections or items
 use -i/--item and wrap parent sections in square brackets:
    cylc get-site-config --item '[editors]terminal'
-Multiple items can be specified at once.""" )
+Multiple items can be specified at once."""
+
+import sys
+from optparse import OptionParser
+import cylc.flags
+from parsec.util import itemstr
 
-parser.add_option( "-i", "--item", metavar="[SEC...]ITEM", 
+
+def main():
+    parser = OptionParser(__doc__)
+
+    parser.add_option(
+        "-i", "--item", metavar="[SEC...]ITEM",
         help="Item or section to print (multiple use allowed).",
-        action="append", dest="item", default=[] )
+        action="append", dest="item", default=[])
 
-parser.add_option( "--sparse",
+    parser.add_option(
+        "--sparse",
         help="Only print items explicitly set in the config files.",
-        action="store_true", default=False, dest="sparse" )
+        action="store_true", default=False, dest="sparse")
 
-parser.add_option( "-p", "--python",
+    parser.add_option(
+        "-p", "--python",
         help="Print native Python format.",
-        action="store_true", default=False, dest="pnative" )
+        action="store_true", default=False, dest="pnative")
 
-parser.add_option( "--print-run-dir",
+    parser.add_option(
+        "--print-run-dir",
         help="Print the configured cylc run directory.",
-        action="store_true", default=False, dest="run_dir" )
+        action="store_true", default=False, dest="run_dir")
 
-parser.add_option( "-v", "--verbose", help="Print extra information.",
-        action="store_true", default=False, dest="verbose" )
+    parser.add_option(
+        "-v", "--verbose", help="Print extra information.",
+        action="store_true", default=False, dest="verbose")
 
-parser.add_option( "--debug",
+    parser.add_option(
+        "--debug",
         help="Show exception tracebacks.",
-        action="store_true", default=False, dest="debug" )
+        action="store_true", default=False, dest="debug")
 
-(options, args) = parser.parse_args()
-cylc.flags.verbose = options.verbose
-cylc.flags.debug = options.debug
+    (options, args) = parser.parse_args()
+    cylc.flags.verbose = options.verbose
+    cylc.flags.debug = options.debug
 
-if len(args) != 0:
-    parser.error( "ERROR: wrong number of arguments" )
+    if len(args) != 0:
+        parser.error("ERROR: wrong number of arguments")
 
-try:
     # import GLOBAL_CFG here to avoid aborting before command help is printed
     from cylc.cfgspec.globalcfg import GLOBAL_CFG
     if options.run_dir:
         print GLOBAL_CFG.get_host_item('run directory')
     else:
-        GLOBAL_CFG.idump( options.item, sparse=options.sparse, pnative=options.pnative )
+        GLOBAL_CFG.idump(
+            options.item, sparse=options.sparse, pnative=options.pnative)
+
 
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit( str(x) )
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-get-suite-config b/bin/cylc-get-suite-config
index 01231ce..4244153 100755
--- a/bin/cylc-get-suite-config
+++ b/bin/cylc-get-suite-config
@@ -2,7 +2,7 @@
 
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -16,17 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-from cylc.remote import remrun
-if remrun().execute():
-    sys.exit(0)
-
-import re
-from cylc.config import config, SuiteConfigError
-from cylc.CylcOptionParsers import cop
-import cylc.flags
-
-parser = cop( usage = """cylc [info] get-suite-config [OPTIONS] ARGS
+"""cylc [info] get-suite-config [OPTIONS] ARGS
 
 Print parsed suite configuration items, after runtime inheritance.
 
@@ -50,7 +40,7 @@ Example:
 $ cylc get-suite-config --item=[runtime][modelX][environment]FOO SUITE
 foo
 
-$ cylc get-suite-config --item=[runtime][modelX][environment] SUITE 
+$ cylc get-suite-config --item=[runtime][modelX][environment] SUITE
 FOO = foo
 BAR = bar
 
@@ -59,71 +49,98 @@ $ cylc get-suite-config --item=[runtime][modelX] SUITE
 [[[environment]]]
     FOO = foo
     BAR = bar
-...""", jset=True, prep=True )
+..."""
 
-parser.add_option( "-i", "--item", metavar="[SEC...]ITEM", 
+import sys
+from cylc.remote import remrun
+if remrun().execute():
+    sys.exit(0)
+
+import re
+from cylc.config import SuiteConfig, SuiteConfigError
+from cylc.CylcOptionParsers import cop
+import cylc.flags
+
+
+def main():
+    parser = cop(__doc__, jset=True, prep=True)
+
+    parser.add_option(
+        "-i", "--item", metavar="[SEC...]ITEM",
         help="Item or section to print (multiple use allowed).",
-        action="append", dest="item", default=[] )
+        action="append", dest="item", default=[])
 
-parser.add_option( "-r", "--sparse",
+    parser.add_option(
+        "-r", "--sparse",
         help="Only print items explicitly set in the config files.",
-        action="store_true", default=False, dest="sparse" )
+        action="store_true", default=False, dest="sparse")
 
-parser.add_option( "-p", "--python",
+    parser.add_option(
+        "-p", "--python",
         help="Print native Python format.",
-        action="store_true", default=False, dest="pnative" )
+        action="store_true", default=False, dest="pnative")
 
-parser.add_option( "-a", "--all-tasks",
+    parser.add_option(
+        "-a", "--all-tasks",
         help="For [runtime] items (e.g. --item='script') report "
         "values for all tasks prefixed by task name.",
-        action="store_true", default=False, dest="alltasks" )
+        action="store_true", default=False, dest="alltasks")
 
-parser.add_option( "-n", "--null-value",
+    parser.add_option(
+        "-n", "--null-value",
         help="The string to print for unset values (default nothing).",
-        metavar="STRING", action="store", default='', dest="none_str" )
+        metavar="STRING", action="store", default='', dest="none_str")
 
-parser.add_option( "-m", "--mark-up",
+    parser.add_option(
+        "-m", "--mark-up",
         help="Prefix each line with '!cylc!'.",
-        action="store_true", default=False, dest="markup" )
+        action="store_true", default=False, dest="markup")
 
-parser.add_option( "-o", "--one-line",
+    parser.add_option(
+        "-o", "--one-line",
         help="Print multiple single-value items at once.",
-        action="store_true", default=False, dest="oneline" )
+        action="store_true", default=False, dest="oneline")
 
-parser.add_option( "-t", "--tasks",
+    parser.add_option(
+        "-t", "--tasks",
         help="Print the suite task list "
-        "[DEPRECATED: use 'cylc list SUITE'].",
-        action="store_true", default=False, dest="tasks" )
-
-( options, args ) = parser.parse_args()
+             "[DEPRECATED: use 'cylc list SUITE'].",
+        action="store_true", default=False, dest="tasks")
 
-# TODO - check db owner still is (or should be?) passed through to taskdefs:
-owner = options.db_owner
+    (options, args) = parser.parse_args()
+    owner = options.db_owner
+    suite, suiterc, junk = parser.get_suite()
 
-suite, suiterc, junk = parser.get_suite()
-
-if options.markup:
-    prefix = '!cylc!'
-else:
-    prefix = ''
+    if options.markup:
+        prefix = '!cylc!'
+    else:
+        prefix = ''
 
-try:
-    config = config( suite, suiterc, 
-            template_vars=options.templatevars,
-            template_vars_file=options.templatevars_file,
-            owner=owner )
+    config = SuiteConfig(
+        suite, suiterc,
+        template_vars=options.templatevars,
+        template_vars_file=options.templatevars_file,
+        owner=owner)
     if options.tasks:
         for task in config.get_task_name_list():
             print prefix + task
     elif options.alltasks:
         for task in config.get_task_name_list():
-            items = [ '[runtime]['+task+']'+i for i in options.item ]
-            print prefix+task,
-            config.pcfg.idump( items, options.sparse, options.pnative, prefix, options.oneline, none_str=options.none_str )
+            items = ['[runtime][' + task + ']' + i for i in options.item]
+            print prefix + task,
+            config.pcfg.idump(
+                items, options.sparse, options.pnative, prefix,
+                options.oneline, none_str=options.none_str)
     else:
-        config.pcfg.idump( options.item, options.sparse, options.pnative, prefix, options.oneline, none_str=options.none_str )
-
-except Exception,x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+        config.pcfg.idump(
+            options.item, options.sparse, options.pnative, prefix,
+            options.oneline, none_str=options.none_str)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-get-suite-version b/bin/cylc-get-suite-version
index 9551222..3f1ef61 100755
--- a/bin/cylc-get-suite-version
+++ b/bin/cylc-get-suite-version
@@ -16,6 +16,12 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [info] get-suite-version [OPTIONS] ARGS
+
+Interrogate running suite REG to find what version of cylc is running it.
+
+To find the version you've invoked at the command line see "cylc version"."""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
     sys.argv.remove('--use-ssh')
@@ -23,33 +29,30 @@ if '--use-ssh' in sys.argv[1:]:
     if remrun().execute():
         sys.exit(0)
 
+import cylc.flags
 from cylc.CylcOptionParsers import cop
 from cylc.task_id import TaskID
-from cylc import cylc_pyro_client
-from cylc.command_prep import prep_pyro
+from cylc.network.suite_info import SuiteInfoClient
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
-import cylc.flags
 
-parser = cop("""cylc [info] get-suite-version [OPTIONS] ARGS
 
-Interrogate running suite REG to find what version of cylc is running it.
+def main():
+    parser = cop(__doc__, pyro=True, argdoc=[('REG', 'Suite name')])
+
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    pclient = SuiteInfoClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+    print pclient.get_info('get_cylc_version')
+
 
-To find the version you've invoked at the command line see "cylc version".""",
-    pyro=True,
-    argdoc=[('REG', 'Suite name')])
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro(args[0], options).execute()
-
-try:
-    proxy = cylc_pyro_client.client(
-            suite, pphrase, options.owner, options.host, options.pyro_timeout,
-            options.port).get_proxy('suite-info')
-    res = proxy.get('get cylc version')
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-else:
-    print res
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-gpanel b/bin/cylc-gpanel
index 33023b3..a5f0680 100755
--- a/bin/cylc-gpanel
+++ b/bin/cylc-gpanel
@@ -16,18 +16,9 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from optparse import OptionParser
-import os
-import sys
-
-sys.path.append(os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../lib/')
-cylc_dir = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
-if cylc_dir != os.getenv('CYLC_DIR', ''):
-    os.environ['CYLC_DIR'] = cylc_dir
-
-parser = OptionParser( """cylc gpanel [OPTIONS]
+"""cylc gpanel [OPTIONS]
 
-This is a cylc summary panel applet for monitoring running suites on a set of
+This is a cylc scan panel applet for monitoring running suites on a set of
 hosts in GNOME 2.
 
 To install this applet, run "cylc gpanel --install"
@@ -39,7 +30,19 @@ To customize themes, copy $CYLC_DIR/conf/gcylcrc/gcylc.rc.eg to
 $HOME/.cylc/gcylc.rc and follow the instructions in the file.
 
 To configure default suite hosts, edit the
-[suite host scanning]hosts entry in your global.rc file.""")
+[suite host scanning]hosts entry in your global.rc file."""
+
+from optparse import OptionParser
+import os
+import sys
+
+sys.path.append(
+    os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../lib/')
+cylc_dir = os.path.dirname(os.path.realpath(os.path.abspath(__file__)))
+if cylc_dir != os.getenv('CYLC_DIR', ''):
+    os.environ['CYLC_DIR'] = cylc_dir
+
+parser = OptionParser(__doc__)
 
 parser.add_option("--compact",
                   help="Switch on compact mode at runtime.",
@@ -49,12 +52,14 @@ parser.add_option("--install",
                   help="Install the panel applet.",
                   default=False,
                   action="store_true", dest="install")
+
 parser.add_option("--test",
                   help="Run in a standalone window.",
                   default=False,
                   action="store_true", dest="test")
+
 arglist = [a for a in sys.argv[1:] if not a.startswith("--oaf")]
-( options, args ) = parser.parse_args(arglist)
+(options, args) = parser.parse_args(arglist)
 
 import gtk
 import warnings
@@ -62,7 +67,8 @@ warnings.filterwarnings('ignore', 'use the new', Warning)
 
 import cylc.gui
 
-cylc_dir = os.path.dirname(os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
+cylc_dir = os.path.dirname(
+    os.path.dirname(os.path.realpath(os.path.abspath(__file__))))
 if os.path.basename(cylc_dir).startswith("cylc-"):
     # If using the wrapper, reference 'cylc' rather than 'cylc-6.3.1'.
     cylc_alt_dir = os.path.join(os.path.dirname(cylc_dir), "cylc")
@@ -72,7 +78,7 @@ if os.path.basename(cylc_dir).startswith("cylc-"):
 if cylc_dir != os.getenv('CYLC_DIR', ''):
     os.environ['CYLC_DIR'] = cylc_dir
 
-from cylc.gui.gpanel import SummaryPanelApplet, run_in_window
+from cylc.gui.gpanel import ScanPanelApplet, run_in_window
 
 
 def install_panel_applet():
@@ -100,13 +106,14 @@ select "Add to Panel", then choose a Cylc Applet."""
 
 
 def panel_applet_factory_compact(applet, iid):
-    my_panel_app = SummaryPanelApplet(is_compact=True)
+    my_panel_app = ScanPanelApplet(is_compact=True)
     applet.add(my_panel_app.get_widget())
     applet.show_all()
     return True
 
+
 def panel_applet_factory(applet, iid):
-    my_panel_app = SummaryPanelApplet()
+    my_panel_app = ScanPanelApplet()
     applet.add(my_panel_app.get_widget())
     applet.show_all()
     return True
@@ -120,12 +127,12 @@ if __name__ == "__main__":
     elif options.compact:
         import gnomeapplet
         gnomeapplet.bonobo_factory(
-                    "OAFIID:GNOME_CylcCompactMonitorFactory",
-                    gnomeapplet.Applet.__gtype__,
-                    "cylc gpanel compact", "0", panel_applet_factory_compact)
+            "OAFIID:GNOME_CylcCompactMonitorFactory",
+            gnomeapplet.Applet.__gtype__,
+            "cylc gpanel compact", "0", panel_applet_factory_compact)
     else:
         import gnomeapplet
         gnomeapplet.bonobo_factory(
-                    "OAFIID:GNOME_CylcMonitorFactory",
-                    gnomeapplet.Applet.__gtype__,
-                    "cylc gpanel", "0", panel_applet_factory)
+            "OAFIID:GNOME_CylcMonitorFactory",
+            gnomeapplet.Applet.__gtype__,
+            "cylc gpanel", "0", panel_applet_factory)
diff --git a/bin/cylc-graph b/bin/cylc-graph
index 4919cb4..cb62876 100755
--- a/bin/cylc-graph
+++ b/bin/cylc-graph
@@ -16,74 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import StringIO
-import sys
-from cylc.remote import remrun
-from cylc.task_id import TaskID
-
-if remrun().execute():
-    sys.exit(0)
-
-from cylc.CylcOptionParsers import cop
-
-# DEVELOPER NOTE: family grouping controls via the viewer toolbar and
-# right-click menu have been rather hastily stuck on to the original
-# viewer, via changes to this file and to lib/cylc/cylc_xdot.py - all
-# of which could stand some refactoring to streamline the code a bit.
-
-# TODO - clarify what it means to choose visualization boundaries (by CLI
-# or in-suite) outside of the defined suite initial and final cycle times.
-
-def on_url_clicked( widget, url, event, window ):
-    if event.button != 3:
-        return False
-    # URL is node ID
-    right_click_menu( event, url, type='live task', window=window )
-
-def right_click_menu( event, task_id, type='live task', window=None ):
-    name, point_string = TaskID.split(task_id)
-
-    menu = gtk.Menu()
-    menu_root = gtk.MenuItem( task_id )
-    menu_root.set_submenu( menu )
-
-    group_item = gtk.MenuItem( 'Group' )
-    group_item.connect( 'activate', grouping, name, window, False, False )
-    ungroup_item = gtk.MenuItem( 'UnGroup' )
-    ungroup_item.connect( 'activate', grouping, name, window, True, False )
-    ungroup_rec_item = gtk.MenuItem( 'Recursive UnGroup' )
-    ungroup_rec_item.connect( 'activate', grouping, name, window, True, True )
-
-    title_item = gtk.MenuItem( task_id )
-    title_item.set_sensitive(False)
-    menu.append( title_item )
-
-    menu.append( gtk.SeparatorMenuItem() )
-
-    menu.append( group_item )
-    menu.append( ungroup_item )
-    menu.append( ungroup_rec_item )
-
-    menu.show_all()
-    menu.popup( None, None, None, event.button, event.time )
-
-    # TODO - popup menus are not automatically destroyed and can be
-    # reused if saved; however, we need to reconstruct or at least
-    # alter ours dynamically => should destroy after each use to
-    # prevent a memory leak? But I'm not sure how to do this as yet.)
-
-    return True
-
-def grouping( w, name, window, un, recursive ):
-    if not un:
-        window.get_graph( group_nodes=[name] )
-    else:
-        if recursive:
-            window.get_graph( ungroup_nodes=[name], ungroup_recursive=True )
-        else:
-            window.get_graph( ungroup_nodes=[name], ungroup_recursive=False )
-
-parser = cop( """1/ cylc [prep] graph [OPTIONS] SUITE [START[STOP]]
+"""1/ cylc [prep] graph [OPTIONS] SUITE [START[STOP]]
      Plot the suite.rc dependency graph for SUITE.
        2/ cylc [prep] graph [OPTIONS] -f,--file FILE
      Plot the specified dot-language graph file.
@@ -127,130 +60,223 @@ GRAPH VIEWER CONTROLS:
     Right-click menu:
     * "group" - close this node's parent family.
     * "ungroup" - open this family node.
-    * "recursive ungroup" - ungroup all families below this node.""",
-    jset=True, prep=True, argdoc = [('[SUITE]', 'Suite name or path'),
-        ('[START]', 'Initial cycle point to plot (default=2999010100)'),
-        ('[STOP]', 'Final cycle point to plot (default=2999010123)')])
-
-parser.add_option( "-n", "--namespaces",
-    help="Plot the suite namespace inheritance hierarchy "
-    "(task run time properties).",
-    action="store_true", default=False, dest="namespaces" )
-
-parser.add_option( "-f", "--file",
-    help="View a specific dot-language graphfile.",
-    metavar="FILE", action="store", default=None, dest="filename" )
-
-parser.add_option( "--filter",
-    help="Filter out one or many nodes.",
-    metavar="NODE_NAME_PATTERN", action="append", dest="filter_patterns" )
-
-parser.add_option( "-O", "--output-file",
-    help="Output to a specific file, with a format given by --output-format" +
-         " or extrapolated from the extension. " +
-         "'-' implies stdout in plain format.",
-    metavar="FILE", action="store", default=None, dest="output_filename" )
-
-parser.add_option( "--output-format",
-    help="Specify a format for writing out the graph to --output-file " +
-         "e.g. png, svg, jpg, eps, dot. 'ref' is a special sorted plain " +
-         "text format for comparison and reference purposes.",
-    metavar="FORMAT", action="store", default=None, dest="output_format" )
-
-parser.add_option( "-r", "--reference",
-    help="Output in a sorted plain text format for comparison purposes." +
-         " If not given, assume --output-file=-.",
-    action="store_true", default=False, dest="reference")
-
-(options, args) = parser.parse_args()
-
-# import modules that require gtk now, so that a display is not needed
-# just to get command help (e.g. when running make on a post-commit hook
-# on a remote repository).
-import gtk, gobject
-from xdot import DotWindow
+    * "recursive ungroup" - ungroup all families below this node."""
+
+import sys
+import StringIO
+
+import cylc.flags
+from cylc.remote import remrun
+from cylc.task_id import TaskID
+
 try:
+    import gtk
+    import gobject
+    from xdot import DotWindow
     from cylc.cylc_xdot import (
         MyDotWindow, MyDotWindow2, get_reference_from_plain_format)
-except ImportError, x:
-    # this imports pygraphviz via cylc.graphing
-    print >> sys.stderr, str(x)
-    raise SystemExit( "ERROR: please install pygraphviz." )
-
-if options.filename:
-    if len(args) != 0:
-        parser.error( 'file graphing arguments: \'-f FILE\' or \'--file=FILE\'' )
-        sys.exit(1)
-    file = options.filename
-    try:
-        from xdot import DotWindow
-    except:
-        raise SystemExit( "Failed to import the xdot viewer.")
-    if options.output_filename:
-        raise SystemExit( "ERROR: output-file not supported for " +
-                          "dot files. Use 'dot' command instead." )
-    window = DotWindow()
-    try:
-        window.update( file )
-    except OSError, x:
-        print >> sys.stderr, x
-        sys.exit(1)
-    window.connect( 'destroy', gtk.main_quit)
-    # checking periodically for file changed
-    gobject.timeout_add(1000, window.update, file)
-    gtk.main()
+except ImportError as exc:
+    # Allow command help generation without a graphical environment.
+    print >> sys.stderr, 'WARNING: no X environment? %s' % exc
+
+if remrun().execute():
     sys.exit(0)
 
-should_hide_gtk_window = (options.output_filename is not None)
-
-suite, suiterc, junk = parser.get_suite()
-
-start_point_string = stop_point_string = None
-if len(args) >= 2:
-    start_point_string = args[1]
-if len(args) == 3:
-    stop_point_string = args[2]
-
-if options.namespaces:
-    window = MyDotWindow2(suite, suiterc, options.templatevars,
-            options.templatevars_file, 
-            should_hide=should_hide_gtk_window)
-else:
-    window = MyDotWindow(suite, suiterc, start_point_string,
-            stop_point_string, options.templatevars,
-            options.templatevars_file,
-            should_hide=should_hide_gtk_window)
-
-window.widget.connect('clicked', on_url_clicked, window)
-if options.filter_patterns:
-    filter_patterns = set(options.filter_patterns)
-    window.set_filter_graph_patterns(options.filter_patterns)
-window.get_graph()
-
-if options.reference and options.output_filename is None:
-    options.output_filename = "-"
-
-if options.output_filename:
-    if (options.reference or options.output_filename.endswith(".ref") or
-            options.output_format == "ref"):
-        dest = StringIO.StringIO()
-        window.graph.draw(dest, format="plain", prog="dot")
-        output_text = get_reference_from_plain_format(dest.getvalue())
-        if options.output_filename == "-":
-            sys.stdout.write(output_text)
+from cylc.CylcOptionParsers import cop
+
+# DEVELOPER NOTE: family grouping controls via the viewer toolbar and
+# right-click menu have been rather hastily stuck on to the original
+# viewer, via changes to this file and to lib/cylc/cylc_xdot.py - all
+# of which could stand some refactoring to streamline the code a bit.
+
+# TODO - clarify what it means to choose visualization boundaries (by CLI
+# or in-suite) outside of the defined suite initial and final cycle times.
+
+
+def on_url_clicked(widget, url, event, window):
+    if event.button != 3:
+        return False
+    # URL is node ID
+    right_click_menu(event, url, type='live task', window=window)
+
+
+def right_click_menu(event, task_id, type='live task', window=None):
+    name, point_string = TaskID.split(task_id)
+
+    menu = gtk.Menu()
+    menu_root = gtk.MenuItem(task_id)
+    menu_root.set_submenu(menu)
+
+    group_item = gtk.MenuItem('Group')
+    group_item.connect('activate', grouping, name, window, False, False)
+    ungroup_item = gtk.MenuItem('UnGroup')
+    ungroup_item.connect('activate', grouping, name, window, True, False)
+    ungroup_rec_item = gtk.MenuItem('Recursive UnGroup')
+    ungroup_rec_item.connect('activate', grouping, name, window, True, True)
+
+    title_item = gtk.MenuItem(task_id)
+    title_item.set_sensitive(False)
+    menu.append(title_item)
+
+    menu.append(gtk.SeparatorMenuItem())
+
+    menu.append(group_item)
+    menu.append(ungroup_item)
+    menu.append(ungroup_rec_item)
+
+    menu.show_all()
+    menu.popup(None, None, None, event.button, event.time)
+
+    # TODO - popup menus are not automatically destroyed and can be
+    # reused if saved; however, we need to reconstruct or at least
+    # alter ours dynamically => should destroy after each use to
+    # prevent a memory leak? But I'm not sure how to do this as yet.)
+
+    return True
+
+
+def grouping(w, name, window, un, recursive):
+    if not un:
+        window.get_graph(group_nodes=[name])
+    else:
+        if recursive:
+            window.get_graph(ungroup_nodes=[name], ungroup_recursive=True)
         else:
-            open(options.output_filename).write(output_text)
+            window.get_graph(ungroup_nodes=[name], ungroup_recursive=False)
+
+
+def main():
+    parser = cop(
+        __doc__, jset=True, prep=True,
+        argdoc=[
+            ('[SUITE]', 'Suite name or path'),
+            ('[START]', 'Initial cycle point '
+             '(default: suite initial point)'),
+            ('[STOP]', 'Final cycle point '
+             '(default: initial + 3 points)')])
+
+    parser.add_option(
+        "-n", "--namespaces",
+        help="Plot the suite namespace inheritance hierarchy "
+             "(task run time properties).",
+        action="store_true", default=False, dest="namespaces")
+
+    parser.add_option(
+        "-f", "--file",
+        help="View a specific dot-language graphfile.",
+        metavar="FILE", action="store", default=None, dest="filename")
+
+    parser.add_option(
+        "--filter", help="Filter out one or many nodes.",
+        metavar="NODE_NAME_PATTERN", action="append", dest="filter_patterns")
+
+    parser.add_option(
+        "-O", "--output-file",
+        help="Output to a specific file, with a format given by "
+             "--output-format or extrapolated from the extension. "
+             "'-' implies stdout in plain format.",
+        metavar="FILE", action="store", default=None, dest="output_filename")
+
+    parser.add_option(
+        "--output-format",
+        help="Specify a format for writing out the graph to --output-file "
+             "e.g. png, svg, jpg, eps, dot. 'ref' is a special sorted plain "
+             "text format for comparison and reference purposes.",
+        metavar="FORMAT", action="store", default=None, dest="output_format")
+
+    parser.add_option(
+        "-r", "--reference",
+        help="Output in a sorted plain text format for comparison purposes. "
+             "If not given, assume --output-file=-.",
+        action="store_true", default=False, dest="reference")
+
+    parser.add_option(
+        "--show-suicide",
+        help="Show suicide triggers.  They are not shown by default, unless "
+             "toggled on with the tool bar button.",
+        action="store_true", default=False, dest="show_suicide")
+
+    (options, args) = parser.parse_args()
+
+    if options.filename:
+        if len(args) != 0:
+            parser.error(
+                "file graphing arguments: '-f FILE' or '--file=FILE'")
+            sys.exit(1)
+        file = options.filename
+        from xdot import DotWindow
+        if options.output_filename:
+            raise SystemExit("ERROR: output-file not supported for "
+                             "dot files. Use 'dot' command instead.")
+        window = DotWindow()
+        window.update(file)
+        window.connect('destroy', gtk.main_quit)
+        # checking periodically for file changed
+        gobject.timeout_add(1000, window.update, file)
+        gtk.main()
+        sys.exit(0)
+
+    should_hide_gtk_window = (options.output_filename is not None)
+
+    suite, suiterc, junk = parser.get_suite()
+
+    start_point_string = stop_point_string = None
+    if len(args) >= 2:
+        start_point_string = args[1]
+    if len(args) == 3:
+        stop_point_string = args[2]
+
+    if options.namespaces:
+        window = MyDotWindow2(suite, suiterc, options.templatevars,
+                              options.templatevars_file,
+                              should_hide=should_hide_gtk_window)
     else:
-        if options.output_filename == "-":
-            window.graph.draw(sys.stdout, format="plain", prog="dot")
-        elif options.output_format:
-            window.graph.draw(
-                options.output_filename, format=options.output_format,
-                prog="dot"
-            )
+        hide_suicide = not options.show_suicide
+        window = MyDotWindow(suite, suiterc, start_point_string,
+                             stop_point_string, options.templatevars,
+                             options.templatevars_file,
+                             should_hide=should_hide_gtk_window,
+                             ignore_suicide=hide_suicide)
+
+    window.widget.connect('clicked', on_url_clicked, window)
+    if options.filter_patterns:
+        filter_patterns = set(options.filter_patterns)
+        window.set_filter_graph_patterns(options.filter_patterns)
+    window.get_graph()
+
+    if options.reference and options.output_filename is None:
+        options.output_filename = "-"
+
+    if options.output_filename:
+        if (options.reference or options.output_filename.endswith(".ref") or
+                options.output_format == "ref"):
+            dest = StringIO.StringIO()
+            window.graph.draw(dest, format="plain", prog="dot")
+            output_text = get_reference_from_plain_format(dest.getvalue())
+            if options.output_filename == "-":
+                sys.stdout.write(output_text)
+            else:
+                open(options.output_filename).write(output_text)
         else:
-            window.graph.draw(options.output_filename, prog="dot")
-    sys.exit(0)
+            if options.output_filename == "-":
+                window.graph.draw(sys.stdout, format="plain", prog="dot")
+            elif options.output_format:
+                window.graph.draw(
+                    options.output_filename, format=options.output_format,
+                    prog="dot"
+                )
+            else:
+                window.graph.draw(options.output_filename, prog="dot")
+        sys.exit(0)
+
+    window.connect('destroy', gtk.main_quit)
+    gtk.main()
+
 
-window.connect('destroy', gtk.main_quit)
-gtk.main()
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-graph-diff b/bin/cylc-graph-diff
index 914ee1a..75662e6 100755
--- a/bin/cylc-graph-diff
+++ b/bin/cylc-graph-diff
@@ -19,7 +19,7 @@ set -eu
 
 usage() {
   cat <<'__USAGE__'
-USAGE: cylc graph-diff [OPTIONS] SUITE1 SUITE2 -- [GRAPH_OPTIONS_ARGS]
+Usage: cylc graph-diff [OPTIONS] SUITE1 SUITE2 -- [GRAPH_OPTIONS_ARGS]
 
 Difference 'cylc graph --reference' output for SUITE1 and SUITE2.
 
diff --git a/bin/cylc-gsummary b/bin/cylc-gscan
similarity index 51%
copy from bin/cylc-gsummary
copy to bin/cylc-gscan
index 7b3ecd8..115ee38 100755
--- a/bin/cylc-gsummary
+++ b/bin/cylc-gscan
@@ -16,36 +16,34 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from optparse import OptionParser
+"""cylc gscan [OPTIONS]
 
-parser = OptionParser( """cylc gsummary [OPTIONS]
-cylc gsummary [OPTIONS]
-
-This is the cylc summary gui for monitoring running suites on a set of
+This is the cylc scan gui for monitoring running suites on a set of
 hosts.
 
 To customize themes copy $CYLC_DIR/conf/gcylcrc/gcylc.rc.eg to
-$HOME/.cylc/gcylc.rc and follow the instructions in the file.""")
-parser.add_option( "--user",
-                   help="User account name (defaults to $USER).",
-                   metavar="USER", default=None,
-                   action="store", dest="owner" )
-parser.add_option( "--host",
-                   help="Host names to monitor (override site default).",
-                   metavar="HOST", action="append",
-                   dest="hosts" )
-parser.add_option( "--poll-interval",
-                   help="Polling interval (time between updates) in seconds",
-                   type="int", metavar="SECONDS", dest="interval" )
-
-( options, args ) = parser.parse_args()
+$HOME/.cylc/gcylc.rc and follow the instructions in the file."""
+
+from cylc.CylcOptionParsers import cop
+
+parser = cop(__doc__, argdoc=[])
+
+parser.add_option("--host",
+                  help="Host names to monitor (override site default).",
+                  metavar="HOST", action="append",
+                  dest="hosts")
+parser.add_option("--poll-interval",
+                  help="Polling interval (time between updates) in seconds",
+                  type="int", metavar="SECONDS", dest="interval")
+
+options, args = parser.parse_args()
 
 import gtk
 import warnings
 warnings.filterwarnings('ignore', 'use the new', Warning)
 
-from cylc.gui.gsummary import SummaryApp
+from cylc.gui.gscan import ScanApp
 
-SummaryApp(hosts=options.hosts, owner=options.owner,
-           poll_interval=options.interval)
+ScanApp(hosts=options.hosts, owner=options.owner,
+        poll_interval=options.interval)
 gtk.main()
diff --git a/bin/cylc-gsummary b/bin/cylc-gsummary
index 7b3ecd8..2d5c032 100755
--- a/bin/cylc-gsummary
+++ b/bin/cylc-gsummary
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/bin/bash
 
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
@@ -16,36 +16,4 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from optparse import OptionParser
-
-parser = OptionParser( """cylc gsummary [OPTIONS]
-cylc gsummary [OPTIONS]
-
-This is the cylc summary gui for monitoring running suites on a set of
-hosts.
-
-To customize themes copy $CYLC_DIR/conf/gcylcrc/gcylc.rc.eg to
-$HOME/.cylc/gcylc.rc and follow the instructions in the file.""")
-parser.add_option( "--user",
-                   help="User account name (defaults to $USER).",
-                   metavar="USER", default=None,
-                   action="store", dest="owner" )
-parser.add_option( "--host",
-                   help="Host names to monitor (override site default).",
-                   metavar="HOST", action="append",
-                   dest="hosts" )
-parser.add_option( "--poll-interval",
-                   help="Polling interval (time between updates) in seconds",
-                   type="int", metavar="SECONDS", dest="interval" )
-
-( options, args ) = parser.parse_args()
-
-import gtk
-import warnings
-warnings.filterwarnings('ignore', 'use the new', Warning)
-
-from cylc.gui.gsummary import SummaryApp
-
-SummaryApp(hosts=options.hosts, owner=options.owner,
-           poll_interval=options.interval)
-gtk.main()
+exec $(dirname $0)/cylc scan "$@"
diff --git a/bin/cylc-gui b/bin/cylc-gui
index 4a0ad14..82a6502 100755
--- a/bin/cylc-gui
+++ b/bin/cylc-gui
@@ -16,24 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove('--use-ssh')
-    from cylc.remote import remrun
-    if remrun().execute():
-        sys.exit(0)
-
-import os
-from cylc.CylcOptionParsers import cop
-import cylc.flags
-
-sys.path.append(
-    os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../lib')
-sys.path.append(
-    os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../')
-
-parser = cop(
-    """cylc gui [OPTIONS] [REG]
+"""cylc gui [OPTIONS] [REG]
 gcylc [OPTIONS] [REG]
 
 This is the cylc Graphical User Interface.
@@ -50,54 +33,78 @@ To see current configuration settings use "cylc get-gui-config".
 
 In the graph view, View -> Options -> "Write Graph Frames" writes .dot graph
 files to the suite share directory (locally, for a remote suite). These can
-be processed into a movie by \$CYLC_DIR/dev/bin/live-graph-movie.sh=.""",
-    pyro=True, noforce=True, jset=True, argdoc=[('[REG]', 'Suite name')])
-
-parser.add_option(
-    "-r", "--restricted",
-    help="Restrict display to 'active' task states: submitted, "
-    "submit-failed, submit-retrying, running, failed, retrying; "
-    "and disable the graph view.  This may be needed for very large "
-    "suites. The state summary icons in the status bar still "
-    "represent all task proxies.",
-    action="store_true", default=False, dest="restricted")
-
-(options, args) = parser.parse_args()
-
-# import modules that require gtk now, so that a display is not needed
-# just to get command help (e.g. when running make on a post-commit hook
-# on a remote repository).
-
-import gtk
-import warnings
-warnings.filterwarnings('ignore', 'use the new', Warning)
-from cylc.gui.app_gcylc import ControlApp
-
-# Make current working directory be $HOME. Otherwise (1) if the user
-# attempts to start gcylc from a CWD that has been removed, Pyro will
-# not be importable below; and (2) if the CWD gets removed later while
-# gcylc is running, subprocesses spawned by gcylc will fail when they
-# attempt to determine their CWD.
-os.chdir(os.environ['HOME'])
-
-gtk.settings_get_default().set_long_property(
-    "gtk-toolbar-icon-size", gtk.ICON_SIZE_SMALL_TOOLBAR, "main")
-gtk.settings_get_default().set_long_property(
-    "gtk-button-images", True, "main")
-gtk.settings_get_default().set_long_property(
-    "gtk-menu-images", True, "main")
-
-if len(args) == 1:
-    suite = args[0]
-else:
-    suite = None
-
-try:
+be processed into a movie by \$CYLC_DIR/dev/bin/live-graph-movie.sh=."""
+
+import sys
+if '--use-ssh' in sys.argv[1:]:
+    sys.argv.remove('--use-ssh')
+    from cylc.remote import remrun
+    if remrun().execute():
+        sys.exit(0)
+
+import os
+from cylc.CylcOptionParsers import cop
+import cylc.flags
+
+
+def main():
+    sys.path.append(
+        os.path.dirname(
+            os.path.realpath(os.path.abspath(__file__))) + '/../lib')
+    sys.path.append(
+        os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../')
+
+    parser = cop(__doc__, pyro=True, noforce=True, jset=True,
+                 argdoc=[('[REG]', 'Suite name')])
+
+    parser.add_option(
+        "-r", "--restricted",
+        help="Restrict display to 'active' task states: submitted, "
+        "submit-failed, submit-retrying, running, failed, retrying; "
+        "and disable the graph view.  This may be needed for very large "
+        "suites. The state summary icons in the status bar still "
+        "represent all task proxies.",
+        action="store_true", default=False, dest="restricted")
+
+    (options, args) = parser.parse_args()
+
+    # import modules that require gtk now, so that a display is not needed
+    # just to get command help (e.g. when running make on a post-commit hook
+    # on a remote repository).
+
+    import gtk
+    import warnings
+    warnings.filterwarnings('ignore', 'use the new', Warning)
+    from cylc.gui.app_gcylc import ControlApp
+
+    # Make current working directory be $HOME. Otherwise (1) if the user
+    # attempts to start gcylc from a CWD that has been removed, Pyro will
+    # not be importable below; and (2) if the CWD gets removed later while
+    # gcylc is running, subprocesses spawned by gcylc will fail when they
+    # attempt to determine their CWD.
+    os.chdir(os.environ['HOME'])
+
+    gtk.settings_get_default().set_long_property(
+        "gtk-toolbar-icon-size", gtk.ICON_SIZE_SMALL_TOOLBAR, "main")
+    gtk.settings_get_default().set_long_property(
+        "gtk-button-images", True, "main")
+    gtk.settings_get_default().set_long_property(
+        "gtk-menu-images", True, "main")
+
+    if len(args) == 1:
+        suite = args[0]
+    else:
+        suite = None
     app = ControlApp(suite, options.db, options.owner, options.host,
                      options.port, options.pyro_timeout, options.templatevars,
                      options.templatevars_file, options.restricted)
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
-gtk.main()
+    gtk.main()
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-hold b/bin/cylc-hold
index 4c64575..9b980e7 100755
--- a/bin/cylc-hold
+++ b/bin/cylc-hold
@@ -16,71 +16,75 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] hold [OPTIONS] ARGS
+
+Hold one or more waiting tasks (cylc hold REG MATCH POINT), or
+a whole suite (cylc hold REG).
+
+Held tasks do not submit even if they are ready to run."""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
+    if remrun().execute(force_required=True):
         sys.exit(0)
 
+import cylc.flags
 from cylc.prompt import prompt
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
 from cylc.CylcOptionParsers import cop, multitask_usage
-from cylc.command_prep import prep_pyro
-import cylc.flags
 
-parser = cop( """cylc [control] hold [OPTIONS] ARGS
 
-Hold one or more waiting tasks (cylc hold REG MATCH POINT), or
-a whole suite (cylc hold REG).
-
-Held tasks do not submit even if they are ready to run.""" +
-    multitask_usage + """
+def main():
+    parser = cop(
+        __doc__ + multitask_usage + """
 See also 'cylc [control] release'.""", pyro=True, multitask=True,
-    argdoc=[("REG", "Suite name"),
-    ('[MATCH]', 'Task or family name matching regular expression'),
-    ('[POINT]', 'Task cycle point (e.g. date-time or integer)') ])
+        argdoc=[
+            ("REG", "Suite name"),
+            ('[MATCH]', 'Task or family name matching regular expression'),
+            ('[POINT]', 'Task cycle point (e.g. date-time or integer)')])
 
-parser.add_option(
-    "--after",
-    help="Hold whole suite AFTER this cycle point.",
-    metavar="CYCLE_POINT", action="store", dest="hold_point_string")
+    parser.add_option(
+        "--after",
+        help="Hold whole suite AFTER this cycle point.",
+        metavar="CYCLE_POINT", action="store", dest="hold_point_string")
 
-(options, args) = parser.parse_args()
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-whole_suite = True
-if len(args) == 3:
-    whole_suite = False
-    name = args[1]
-    point_string = args[2]
-    prompt( 'Hold task(s) ' + name + ' at ' + point_string + ' in ' + suite,
-            options.force )
-elif options.hold_point_string:
-    prompt( 'Hold suite after ' + options.hold_point_string, options.force )
-elif len(args) == 1:
-    prompt( 'Hold suite ' + suite, options.force )
-else:
-    parser.error( "Wrong number of arguments" )
+    whole_suite = True
+    if len(args) == 3:
+        whole_suite = False
+        name = args[1]
+        point_string = args[2]
+        prompt('Hold task(s) %s at %s in %s' % (name, point_string, suite),
+               options.force)
+    elif options.hold_point_string:
+        prompt('Hold suite after %s' % options.hold_point_string,
+               options.force)
+    elif len(args) == 1:
+        prompt('Hold suite %s' % suite, options.force)
+    else:
+        parser.error("Wrong number of arguments")
 
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
     if options.hold_point_string:
-        result = proxy.put( 'hold suite after', options.hold_point_string )
+        pclient.put_command('hold_after_point_string',
+                            options.hold_point_string)
     elif whole_suite:
-        result = proxy.put( 'hold suite now' )
+        pclient.put_command('hold_suite')
     else:
-        result = proxy.put( 'hold task now', name, point_string,
-                            options.is_family )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+        pclient.put_command('hold_task', name, point_string, options.is_family)
+
 
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-import-examples b/bin/cylc-import-examples
index 0085286..cace729 100755
--- a/bin/cylc-import-examples
+++ b/bin/cylc-import-examples
@@ -20,7 +20,7 @@ set -e
 
 usage() {
     echo ""
-    echo "USAGE: cylc [admin] import-examples DIR [GROUP]"
+    echo "Usage: cylc [admin] import-examples DIR [GROUP]"
     echo ""
     echo "Copy the cylc example suites to DIR/GROUP and register"
     echo "them for use under the GROUP suite name group."
diff --git a/bin/cylc-insert b/bin/cylc-insert
index 296cd20..a618935 100755
--- a/bin/cylc-insert
+++ b/bin/cylc-insert
@@ -16,20 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
-    from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
-        sys.exit(0)
-
-from cylc.prompt import prompt
-from cylc import cylc_pyro_client
-from cylc.CylcOptionParsers import cop, multitask_usage
-from cylc.command_prep import prep_pyro
-import cylc.flags
-
-parser = cop( """cylc [control] insert [OPTIONS] ARGS
+"""cylc [control] insert [OPTIONS] ARGS
 
 Insert task proxies into a running suite. Uses of insertion include:
  1) insert a task that was excluded by the suite definition at start-up.
@@ -44,38 +31,57 @@ will not be added to the pool if it catches up to another task with the
 same ID).
 
 See also 'cylc submit', for running tasks without the scheduler.
-""" + multitask_usage, pyro=True, multitask=True,
-    argdoc=[("REG", "Suite name"),
-        ('MATCH', 'Task or family name matching regular expression'),
-        ('CYCLE_POINT', 'Cycle point (e.g. date-time or integer)'),
-        ('[STOP_POINT]', 'Optional stop cycle point for inserted task.' )])
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-name = args[1]
-point_string = args[2]
-if len(args) == 4:
-    stop_point_string = args[3]
-else:
-    stop_point_string = None
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-    prompt( 'Insert ' + name + ' at ' + point_string + ' in ' + suite,
-            options.force )
-    result = proxy.put(
-        'insert task', name, point_string, options.is_family,
-        stop_point_string )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
-
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+"""
+
+import sys
+if '--use-ssh' in sys.argv[1:]:
+    sys.argv.remove('--use-ssh')
+    from cylc.remote import remrun
+    if remrun().execute(force_required=True):
+        sys.exit(0)
+
+import cylc.flags
+from cylc.prompt import prompt
+from cylc.network.suite_command import SuiteCommandClient
+from cylc.CylcOptionParsers import cop, multitask_usage
+
+
+def main():
+    parser = cop(
+        __doc__ + multitask_usage,
+        pyro=True, multitask=True,
+        argdoc=[
+            ("REG", "Suite name"),
+            ('MATCH', 'Task or family name matching regular expression'),
+            ('CYCLE_POINT', 'Cycle point (e.g. date-time or integer)'),
+            ('[STOP_POINT]', 'Optional stop cycle point for inserted task.')])
+
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    name = args[1]
+    point_string = args[2]
+    if len(args) == 4:
+        stop_point_string = args[3]
+    else:
+        stop_point_string = None
+
+    prompt('Insert %s at %s in %s' % (name, point_string, suite),
+           options.force)
+
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+
+    pclient.put_command('insert_task', name, point_string, options.is_family,
+                        stop_point_string)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-job-logs-retrieve b/bin/cylc-job-logs-retrieve
new file mode 100755
index 0000000..a86dd84
--- /dev/null
+++ b/bin/cylc-job-logs-retrieve
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""cylc [hook] job-logs-retrieve [OPTIONS] HOST:HOST-PATH LOCALHOST-PATH
+
+(This command is for internal use.)
+Retrieve logs from a remote host for a task job.
+
+"""
+
+
+import os
+from subprocess import check_call
+import shlex
+import sys
+import traceback
+
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.CylcOptionParsers import cop
+from cylc.wallclock import get_time_string_from_unix_time
+
+
+def main():
+    """CLI main."""
+    # Options and arguments
+    opt_parser = cop(__doc__, argdoc=[
+        ("HOST:HOST-PATH", "Path to remote job logs directory"),
+        ("LOCALHOST-PATH", "Path to local job logs directory"),
+    ])
+    opt_parser.add_option(
+        "--max-size",
+        help="Don't transfer any file larger than SIZE.",
+        action="store", default="10M", dest="max_size", metavar="SIZE")
+    opts, args = opt_parser.parse_args()
+
+    # Determine the remote shell template to use
+    source, target = args
+    source_auth, source_path = source.split(":", 1)
+    if "@" in source_auth:
+        source_owner, source_host = source_auth.split("@", 1)
+    else:
+        source_owner, source_host = (None, source_auth)
+    ssh_tmpl = str(GLOBAL_CFG.get_host_item(
+        "remote shell template", source_host, source_owner)).replace(" %s", "")
+
+    # Retrieve remote job logs
+    # N.B. "scp" does not have a "max-size" option.
+    check_call([
+        "rsync", "-a", "--rsh=" + ssh_tmpl, "--max-size=" + opts.max_size,
+        source + "/", target])
+
+    filenames = os.listdir(target)
+    if "job.out" not in filenames:
+        sys.exit("ERROR: job.out: file not found")
+    sys.stdout.write("%s:\n" % os.path.basename(sys.argv[0]))
+    for filename in filenames:
+        stat = os.stat(os.path.join(target, filename))
+        sys.stdout.write("%s\t%s\t%s\n" % (
+            get_time_string_from_unix_time(stat.st_mtime),
+            stat.st_size,
+            filename))
+
+    os.listdir(target)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/bin/cylc-job-submit b/bin/cylc-job-submit
index 990fd93..baa98cc 100755
--- a/bin/cylc-job-submit
+++ b/bin/cylc-job-submit
@@ -24,15 +24,12 @@ Submit a job file.
 """
 
 
-from cylc.CylcOptionParsers import cop
-from cylc.batch_sys_manager import BATCH_SYS_MANAGER
 import sys
+from cylc.remote import remrun
 
 
 def main():
     """CLI main."""
-
-    # Parse command line
     parser = cop(
         __doc__,
         argdoc=[("JOB-FILE-PATH", "the path of the job file")])
@@ -41,7 +38,6 @@ def main():
         help="Is this being run on a remote job host?",
         action="store_true", dest="remote_mode", default=False)
     opts, args = parser.parse_args()
-
     ret_code, out, err, job_id = BATCH_SYS_MANAGER.job_submit(
         args[0], opts.remote_mode)
     if err:
@@ -54,5 +50,7 @@ def main():
     sys.exit(ret_code)
 
 
-if __name__ == "__main__":
+if __name__ == "__main__" and not remrun().execute():
+    from cylc.CylcOptionParsers import cop
+    from cylc.batch_sys_manager import BATCH_SYS_MANAGER
     main()
diff --git a/bin/cylc-version b/bin/cylc-jobs-kill
similarity index 50%
copy from bin/cylc-version
copy to bin/cylc-jobs-kill
index 402e946..8178707 100755
--- a/bin/cylc-version
+++ b/bin/cylc-jobs-kill
@@ -15,31 +15,30 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""cylc [control] jobs-kill JOB-LOG-ROOT [JOB-LOG-DIR ...]
 
-"""Print the cylc release or git repository version number."""
+(This command is for internal use. Users should use "cylc kill".) Read job
+status files to obtain the names of the batch systems and the job IDs in the
+systems. Invoke the relevant batch system commands to ask the batch systems to
+terminate the jobs.
 
-import sys
-from cylc.remote import remrun
-if remrun().execute():
-    sys.exit(0)
-
-from cylc.CylcOptionParsers import cop
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.version import CYLC_VERSION
-import cylc.flags
+"""
 
 
-parser = cop("""cylc [info] version
-
-Print the cylc version invoked at the command line.
+import sys
+from cylc.remote import remrun
 
-Note that "cylc -v,--version" just prints the version string from the main
-command interface, whereas this is a proper cylc command that can take the
-standard --host and --user options, etc.
 
-For the cylc version of running a suite daemon see
-  "cylc get-suite-version".""")
+def main():
+    """CLI main."""
+    parser = cop(__doc__, argdoc=[
+        ("JOB-LOG-ROOT", "The log/job sub-directory for the suite"),
+        ("[JOB-LOG-DIR ...]", "A point/name/submit_num sub-directory")])
+    args = parser.parse_args()[1]
+    BATCH_SYS_MANAGER.jobs_kill(args[0], args[1:])
 
-(options, args) = parser.parse_args()
 
-print CYLC_VERSION
+if __name__ == "__main__" and not remrun().execute():
+    from cylc.CylcOptionParsers import cop
+    from cylc.batch_sys_manager import BATCH_SYS_MANAGER
+    main()
diff --git a/bin/cylc-reregister b/bin/cylc-jobs-poll
similarity index 51%
copy from bin/cylc-reregister
copy to bin/cylc-jobs-poll
index 7e59537..309e720 100755
--- a/bin/cylc-reregister
+++ b/bin/cylc-jobs-poll
@@ -15,34 +15,29 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""cylc [control] jobs-poll JOB-LOG-ROOT [JOB-LOG-DIR ...]
 
-import sys
-from cylc.remote import remrun
-if remrun().execute():
-    sys.exit(0)
+(This command is for internal use. Users should use "cylc poll".) Read job
+status files to obtain the statuses of the jobs. If necessary, Invoke the
+relevant batch system commands to ask the batch systems for more statuses.
 
-from cylc.CylcOptionParsers import cop
-from cylc.registration import localdb
-import cylc.flags
+"""
 
-parser = cop( usage = """cylc [db] reregister|rename [OPTIONS] ARGS
 
-Change the name of a suite (or group of suites) from REG1 to REG2.
-Example:
-  cylc db rereg foo.bar.baz test.baz""",
-       argdoc=[("REG1", "original name"),
-         ("REG2", "new name")])
+import sys
+from cylc.remote import remrun
 
-( options, args ) = parser.parse_args()
 
-arg_from = args[0]
-arg_to = args[1]
+def main():
+    """CLI main."""
+    parser = cop(__doc__, argdoc=[
+        ("JOB-LOG-ROOT", "The log/job sub-directory for the suite"),
+        ("[JOB-LOG-DIR ...]", "A point/name/submit_num sub-directory")])
+    args = parser.parse_args()[1]
+    BATCH_SYS_MANAGER.jobs_poll(args[0], args[1:])
 
-db = localdb( file=options.db )
 
-try:
-    db.reregister( arg_from, arg_to )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+if __name__ == "__main__" and not remrun().execute():
+    from cylc.CylcOptionParsers import cop
+    from cylc.batch_sys_manager import BATCH_SYS_MANAGER
+    main()
diff --git a/bin/cylc-job-submit b/bin/cylc-jobs-submit
similarity index 62%
copy from bin/cylc-job-submit
copy to bin/cylc-jobs-submit
index 990fd93..b746a05 100755
--- a/bin/cylc-job-submit
+++ b/bin/cylc-jobs-submit
@@ -15,44 +15,34 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""cylc [control] jobs-submit JOB-LOG-ROOT [JOB-LOG-DIR ...]
 
-"""cylc [task] job-submit [--remote-mode] JOB-FILE-PATH
-
-(This command is for internal use. Users should use "cylc submit".)
-Submit a job file.
+(This command is for internal use. Users should use "cylc submit".) Submit task
+jobs to relevant batch systems. On a remote job host, this command reads the
+job files from STDIN.
 
 """
 
 
-from cylc.CylcOptionParsers import cop
-from cylc.batch_sys_manager import BATCH_SYS_MANAGER
 import sys
+from cylc.remote import remrun
 
 
 def main():
     """CLI main."""
-
-    # Parse command line
-    parser = cop(
-        __doc__,
-        argdoc=[("JOB-FILE-PATH", "the path of the job file")])
+    parser = cop(__doc__, argdoc=[
+        ("JOB-LOG-ROOT", "The log/job sub-directory for the suite"),
+        ("[JOB-LOG-DIR ...]", "A point/name/submit_num sub-directory")])
     parser.add_option(
         "--remote-mode",
         help="Is this being run on a remote job host?",
         action="store_true", dest="remote_mode", default=False)
     opts, args = parser.parse_args()
-
-    ret_code, out, err, job_id = BATCH_SYS_MANAGER.job_submit(
-        args[0], opts.remote_mode)
-    if err:
-        sys.stderr.write(err)
-    if out:
-        sys.stdout.write(out)
-    if job_id:
-        sys.stdout.write(
-            "%s=%s\n" % (BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID, job_id))
-    sys.exit(ret_code)
+    BATCH_SYS_MANAGER.jobs_submit(
+        args[0], args[1:], remote_mode=opts.remote_mode)
 
 
-if __name__ == "__main__":
+if __name__ == "__main__" and not remrun().execute():
+    from cylc.CylcOptionParsers import cop
+    from cylc.batch_sys_manager import BATCH_SYS_MANAGER
     main()
diff --git a/bin/cylc-jobscript b/bin/cylc-jobscript
index 54883bc..1affc92 100755
--- a/bin/cylc-jobscript
+++ b/bin/cylc-jobscript
@@ -19,27 +19,28 @@
 set -e
 
 usage() {
-    echo ""
-    echo "USAGE: cylc [prep] jobscript [OPTIONS] REG TASK"
-    echo ""
-    echo "Generate a task job script and print it to stdout."
-    echo ""
-    echo "Here's how to capture the script in the vim editor:"
-    echo "  % cylc jobscript REG TASK | vim -"
-    echo "Emacs unfortunately cannot read from stdin:"
-    echo "  % cylc jobscript REG TASK > tmp.sh; emacs tmp.sh"
-    echo ""
-    echo "This command wraps 'cylc [control] submit --dry-run'."
-    echo "Other options (e.g. for suite host and owner) are passed"
-    echo "through to the submit command."
-    echo ""
-    echo "Options:"
-    echo "  -h,--help   - print this usage message."
-    echo " (see also 'cylc submit --help')"
-    echo ""
-    echo "Arguments:"
-    echo "  REG         - Registered suite name."
-    echo "  TASK        - Task ID (NAME.CYCLE_POINT)"
+    cat <<__END__
+Usage: cylc [prep] jobscript [OPTIONS] REG TASK
+
+Generate a task job script and print it to stdout.
+
+Here's how to capture the script in the vim editor:
+  % cylc jobscript REG TASK | vim -
+Emacs unfortunately cannot read from stdin:
+  % cylc jobscript REG TASK > tmp.sh; emacs tmp.sh
+
+This command wraps 'cylc [control] submit --dry-run'.
+Other options (e.g. for suite host and owner) are passed
+through to the submit command.
+
+Options:
+  -h,--help   - print this usage message.
+ (see also 'cylc submit --help')
+
+Arguments:
+  REG         - Registered suite name.
+  TASK        - Task ID (NAME.CYCLE_POINT)
+__END__
 }
 
 for arg in "${@}"; do
diff --git a/bin/cylc-kill b/bin/cylc-kill
index 5e6d72a..0946b3a 100755
--- a/bin/cylc-kill
+++ b/bin/cylc-kill
@@ -16,47 +16,67 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] kill [OPTIONS] ARGS
+
+Kill jobs of active tasks (those in the 'submitted' or 'running' states) and
+update their statuses accordingly.
+
+To kill one or more tasks, "cylc kill REG MATCH POINT"; to kill all active
+tasks: "cylc kill REG".
+
+Kill a 'submitted' or 'running' task and update the suite state accordingly.
+"""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
+    if remrun().execute(force_required=True):
         sys.exit(0)
 
+import cylc.flags
 from cylc.prompt import prompt
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
 from cylc.CylcOptionParsers import cop, multitask_usage
-from cylc.command_prep import prep_pyro
-import cylc.flags
 
-parser = cop( """cylc [control] kill [OPTIONS] ARGS
 
-Kill a 'submitted' or 'running' task and update the suite state accordingly.
-""" + multitask_usage, pyro=True, multitask=True,
-        argdoc=[ ('REG', 'Suite name'),
-            ('MATCH', 'Task or family name matching regular expression'),
-            ('POINT', 'Task cycle point (e.g. date-time or integer)') ])
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-name = args[1]
-point_string = args[2]
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-    prompt( 'Kill task ' + name + ' at ' + point_string + ' in ' + suite, options.force )
-    result = proxy.put( 'kill tasks', name, point_string, options.is_family )
-
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+def main():
+    parser = cop(
+        __doc__ + multitask_usage,
+        pyro=True, multitask=True,
+        argdoc=[('REG', 'Suite name'),
+                ('[MATCH]', 'Task or family name matching regular expression'),
+                ('[POINT]', 'Task cycle point (e.g. date-time or integer)')])
+
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    if len(args) == 3:
+        name = args[1]
+        point_string = args[2]
+    elif len(args) == 1:
+        name = None
+        point_string = None
+    else:
+        parser.error("Wrong number of arguments.")
+
+    if name and point_string:
+        prompt('Kill task %s at %s in %s' % (name, point_string, suite),
+               options.force)
+    else:
+        prompt('Kill ALL task in %s' % (suite), options.force)
+
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+    pclient.put_command('kill_tasks', name, point_string, options.is_family)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-list b/bin/cylc-list
index b4197c8..c135a61 100755
--- a/bin/cylc-list
+++ b/bin/cylc-list
@@ -16,127 +16,145 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys
+"""cylc [info|prep] list|ls [OPTIONS] ARGS
+
+Print runtime namespace names (tasks and families), the first-parent
+inheritance graph, or actual tasks for a given cycle range.
+
+The first-parent inheritance graph determines the primary task family
+groupings that are collapsible in gcylc suite views and the graph
+viewer tool. To visualize the full multiple inheritance hierarchy use:
+  'cylc graph -n'."""
+
+import os
+import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
-from cylc.CylcOptionParsers import cop
-from cylc.config import config
 import cylc.flags
+from cylc.CylcOptionParsers import cop
+from cylc.config import SuiteConfig
 
-parser = cop( """cylc [info|prep] list|ls [OPTIONS] ARGS
 
-Print runtime namespace names (tasks and families), the first-parent
-inheritance graph, or actual tasks for a given cycle range.
+def main():
 
-The first-parent inheritance graph determines the primary task family
-groupings that are collapsible in gcylc suite views and the graph
-viewer tool. To visualize the full multiple inheritance hierarchy use:
-  'cylc graph -n'.""", jset=True, prep=True )
+    parser = cop(__doc__, jset=True, prep=True)
 
-parser.add_option( "-a","--all-tasks",
+    parser.add_option(
+        "-a", "--all-tasks",
         help="Print all tasks, not just those used in the graph.",
-        action="store_true", default=False, dest="all_tasks" )
+        action="store_true", default=False, dest="all_tasks")
 
-parser.add_option( "-n","--all-namespaces",
+    parser.add_option(
+        "-n", "--all-namespaces",
         help="Print all runtime namespaces, not just tasks.",
-        action="store_true", default=False, dest="all_namespaces" )
+        action="store_true", default=False, dest="all_namespaces")
 
-parser.add_option( "-m","--mro",
+    parser.add_option(
+        "-m", "--mro",
         help="Print the linear \"method resolution order\" for each namespace "
-        "(the multiple-inheritance precedence order as determined by the "
-        "C3 linearization algorithm).",
-        action="store_true", default=False, dest="mro" )
+             "(the multiple-inheritance precedence order as determined by the "
+             "C3 linearization algorithm).",
+        action="store_true", default=False, dest="mro")
 
-parser.add_option( "-t","--tree",
+    parser.add_option(
+        "-t", "--tree",
         help="Print the first-parent inheritance hierarchy in tree form.",
-        action="store_true", default=False, dest="tree" )
+        action="store_true", default=False, dest="tree")
 
-parser.add_option( "-b","--box",
+    parser.add_option(
+        "-b", "--box",
         help="With -t/--tree, using unicode box characters. Your terminal "
-        "must be able to display unicode characters.",
-        action="store_true", default=False, dest="box" )
+             "must be able to display unicode characters.",
+        action="store_true", default=False, dest="box")
 
-parser.add_option( "-w","--with-titles",
-        help="Print namespaces titles too.",
-        action="store_true", default=False, dest="titles" )
+    parser.add_option(
+        "-w", "--with-titles", help="Print namespaces titles too.",
+        action="store_true", default=False, dest="titles")
 
-parser.add_option("-p", "--points",
-        help="Print actual task IDs from the START [through STOP] cycle "
-        "points.", metavar="START[,STOP]", action="store", default=None,
-        dest="crange")
-
-parser.add_option( "-c", "--cycles", help="(deprecated: use -p/--points).",
+    parser.add_option(
+        "-p", "--points",
+        help="Print actual task IDs from the "
+             "START [through STOP] cycle points.",
         metavar="START[,STOP]", action="store", default=None, dest="crange")
 
-(options, args) = parser.parse_args()
-
-suite, suiterc, junk = parser.get_suite()
+    parser.add_option(
+        "-c", "--cycles", help="(deprecated: use -p/--points).",
+        metavar="START[,STOP]", action="store", default=None, dest="crange")
 
-if options.all_tasks and options.all_namespaces:
-    parser.error("Choose either -a or -n")
-if options.all_tasks:
-    which = "all tasks"
-elif options.all_namespaces:
-    which = "all namespaces"
-elif options.crange:
-    which = "crange"
-    try:
-        tr_start, tr_stop = options.crange.split(',')
-    except ValueError:
-        tr_start = tr_stop = options.crange
+    (options, args) = parser.parse_args()
+    suite, suiterc, junk = parser.get_suite()
 
-else:
-    which = "graphed tasks"
+    if options.all_tasks and options.all_namespaces:
+        parser.error("Choose either -a or -n")
+    if options.all_tasks:
+        which = "all tasks"
+    elif options.all_namespaces:
+        which = "all namespaces"
+    elif options.crange:
+        which = "crange"
+        try:
+            tr_start, tr_stop = options.crange.split(',')
+        except ValueError:
+            tr_start = tr_stop = options.crange
+    else:
+        which = "graphed tasks"
 
-if options.tree:
-    if os.environ['LANG'] == 'C' and options.box:
-        print >> sys.stderr, "WARNING, ignoring -t/--tree: $LANG=C"
-        options.tree = False
+    if options.tree:
+        if os.environ['LANG'] == 'C' and options.box:
+            print >> sys.stderr, "WARNING, ignoring -t/--tree: $LANG=C"
+            options.tree = False
 
-if options.titles and options.mro:
-    parser.error( "Please choose --mro or --title, not both")
+    if options.titles and options.mro:
+        parser.error("Please choose --mro or --title, not both")
 
-if options.tree and any( [options.all_tasks, options.all_namespaces, options.mro] ):
-    print >> sys.stderr, "WARNING: -t chosen, ignoring non-tree options."
+    if options.tree and any(
+            [options.all_tasks, options.all_namespaces, options.mro]):
+        print >> sys.stderr, "WARNING: -t chosen, ignoring non-tree options."
 
-try:
-    config = config( suite, suiterc,
+    config = SuiteConfig(
+        suite, suiterc,
         template_vars=options.templatevars,
         template_vars_file=options.templatevars_file)
     if options.tree:
-        config.print_first_parent_tree( pretty=options.box, titles=options.titles )
+        config.print_first_parent_tree(
+            pretty=options.box, titles=options.titles)
     elif options.crange:
-        node_labels = config.get_node_labels( tr_start, tr_stop )
+        node_labels = config.get_node_labels(tr_start, tr_stop)
         node_labels.sort()
         for nl in node_labels:
             print nl
     else:
-        result = config.get_namespace_list( which )
+        result = config.get_namespace_list(which)
         namespaces = result.keys()
         namespaces.sort()
 
-        if ( options.mro or options.titles ):
+        if (options.mro or options.titles):
             # compute padding
             maxlen = 0
             for ns in namespaces:
-                if len(ns) > maxlen: maxlen = len(ns)
-            padding = maxlen*' '
+                if len(ns) > maxlen:
+                    maxlen = len(ns)
+            padding = maxlen * ' '
 
         for ns in namespaces:
             if options.mro:
-                print ns, padding[0:len(padding)-len(ns)],
-                for i in config.get_mro( ns ):
+                print ns, padding[0:len(padding) - len(ns)],
+                for i in config.get_mro(ns):
                     print i,
                 print
             elif options.titles:
-                print ns, padding[0:len(padding)-len(ns)],
+                print ns, padding[0:len(padding) - len(ns)],
                 print result[ns]
             else:
                 print ns
 
-except Exception,x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-message b/bin/cylc-message
index be88dc6..4664ae9 100755
--- a/bin/cylc-message
+++ b/bin/cylc-message
@@ -15,13 +15,7 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import os, sys
-from optparse import OptionParser
-from cylc.task_message import message
-import cylc.flags
-
-usage = """cylc [task] message [OPTIONS] MESSAGE
+"""cylc [task] message [OPTIONS] MESSAGE ...
 
 This command is part of the cylc task messaging interface, used by
 running tasks to communicate progress to their parent suite.
@@ -31,68 +25,43 @@ Other messages received by the suite daemon will just be logged.
 
 Suite and task identity are determined from the task execution
 environment supplied by the suite (or by the single task 'submit'
-command, in which case case the message is just printed to stdout).
-
-See also:
-    cylc [task] started
-    cylc [task] succeeded
-    cylc [task] failed"""
-
-parser = OptionParser( usage )
+command, in which case case the message is just printed to stdout)."""
 
-parser.add_option( "-p",
-        metavar="PRIORITY", type="choice",
-        choices=[ 'NORMAL', 'WARNING', 'CRITICAL' ],
-        help="message priority: NORMAL, WARNING, or CRITICAL; default NORMAL.",
-        action="store", dest="priority", default="NORMAL" )
-
-parser.add_option( "--next-restart-completed",
-        help="Report next restart file(s) completed",
-        action="store_true", dest="next_restart_completed" )
-
-parser.add_option( "--all-restart-outputs-completed",
-        help="Report all restart outputs completed at once.",
-        action="store_true", dest="all_restarts_completed" )
 
-parser.add_option( "--all-outputs-completed",
-        help="Report all internal outputs completed at once.",
-        action="store_true", dest="all_outputs_completed" )
+import os
+import sys
+from optparse import OptionParser
+import cylc.flags
+from cylc.task_message import TaskMessage
 
-parser.add_option( "-v", "--verbose",
-        help="Verbose output mode.",
-        action="store_true", default=False, dest="verbose" )
 
-( options, args ) = parser.parse_args()
-cylc.flags.verbose = options.verbose
+def main():
+    """CLI."""
+    parser = OptionParser(__doc__)
 
-if options.next_restart_completed:
-    message().shortcut_next_restart()
-    sys.exit(0)
+    parser.add_option(
+        "-p", "--priority", metavar="PRIORITY", type="choice",
+        choices=['NORMAL', 'WARNING', 'CRITICAL'],
+        help="message priority: NORMAL, WARNING, or CRITICAL; default NORMAL.",
+        action="store", dest="priority", default="NORMAL")
 
-elif options.all_restarts_completed:
-    message().shortcut_all_restarts()
-    sys.exit(0)
+    parser.add_option(
+        "-v", "--verbose", help="Verbose output mode.", action="store_true",
+        default=False, dest="verbose")
 
-elif options.all_outputs_completed:
-    message().shortcut_all_outputs()
-    sys.exit(0)
+    options, args = parser.parse_args()
+    if not args:
+        parser.error("No task message supplied")
 
-elif len( args ) == 0:
-    parser.error( "No task message supplied" )
+    try:
+        TaskMessage(priority=options.priority).send(args)
+    except Exception, exc:
+        print >> sys.stderr, 'ERROR: task messaging failure.'
+        if os.getenv("CYLC_DEBUG") in ["True", "true"]:
+            import traceback
+            traceback.print_exc(exc)
+        raise SystemExit(exc)
 
-debug = False
-try:
-    # from task execution environment
-    if os.environ['CYLC_DEBUG'] == 'True':
-        debug = True
-except KeyError:
-    pass
 
-msg = ' '.join( args )
-try:
-    message( msg=msg, priority=options.priority ).send()
-except Exception, x:
-    print >> sys.stderr, 'ERROR: task messaging failure.'
-    if debug:
-        raise
-    raise SystemExit(x)
+if __name__ == "__main__":
+    main()
diff --git a/bin/cylc-monitor b/bin/cylc-monitor
index 0aa81ac..a3cc081 100755
--- a/bin/cylc-monitor
+++ b/bin/cylc-monitor
@@ -17,7 +17,7 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
-Display the state of all existing task proxy objects.
+Display the state of live task proxies in a running suite.
 
 For color terminal ASCII escape codes, see
 http://ascii-table.com/ansi-escape-sequences.php
@@ -26,233 +26,235 @@ http://ascii-table.com/ansi-escape-sequences.php
 import sys
 if '--use-ssh' in sys.argv[1:]:
     # requires local terminal
-    sys.exit( "No '--use-ssh': this command requires a local terminal." )
+    sys.exit("No '--use-ssh': this command requires a local terminal.")
 
-import os, re
-from copy import copy
+import os
+import re
 from time import sleep
-from cylc import cylc_pyro_client
-from cylc.passphrase import SecurityError
+
+from parsec.OrderedDict import OrderedDict
 from cylc.CylcOptionParsers import cop
 from cylc.task_state import task_state
+from cylc.registration import localdb
+from cylc.network.suite_state import (
+    StateSummaryClient, SuiteStillInitialisingError)
 from cylc.wallclock import get_time_string_from_unix_time
-from Pyro.errors import ProtocolError,TimeoutError,ConnectionClosedError
-from cylc.command_prep import prep_pyro
-import cylc.flags
-
-parser = cop("""cylc [info] monitor [OPTIONS] ARGS
-
-A terminal-based live suite monitor.""", pyro=True, noforce=True)
-
-parser.add_option(
-    "-a", "--align",
-    help="Align task names. Only useful for small suites.",
-    action="store_true", default=False, dest="align_columns")
-
-parser.add_option(
-    "-r", "--restricted",
-    help="Restrict display to 'active' task states: submitted, "
-    "submit-failed, submit-retrying, running, failed, retrying. "
-    "This may be needed for very large suites. The state summary "
-    "line still represents all task proxies.",
-    action="store_true", default=False, dest="restricted")
-
-parser.add_option(
-    "-o", "--once",
-    help="Show a single view then exit.",
-    action="store_true", default=False, dest="once")
-
-parser.add_option(
-    "-u", "--runahead",
-    help="Display task proxies in the runahead pool (off by default).",
-    action="store_true", default=False, dest="display_runahead")
-
-(options, args) = parser.parse_args()
-suite, pphrase = prep_pyro(args[0], options).execute()
-
-legend = ''
-for state in task_state.legal:
-    legend += "%s%s%s " % (task_state.ctrl[state], state, task_state.ctrl_end)
-legend = legend.rstrip()
-len_header = sum(len(s) for s in task_state.legal) + len(task_state.legal) - 1
-
-alerted = False
-alerted2 = False
-alerted3 = False
-try:
-    while True:
-        try:
-            proxy = cylc_pyro_client.client(
-                    suite, pphrase, options.owner, options.host,
-                    options.pyro_timeout, options.port
-                    ).get_proxy('state_summary')
-        except SecurityError as exc:
-            if cylc.flags.debug:
-                raise
-            raise SystemExit(exc)
-        except Exception as exc:
-            if not alerted:
-                print "\n\033[1;37;41mfailed to connect%s" % (
-                        task_state.ctrl_end)
-                print >> sys.stderr, exc 
-                alerted = True
-            # Try again to connect.
-            sleep(1)
-            continue
-        else:
-            proxy._setTimeout(60)
-        alerted = False
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+
+
+class SuiteMonitor(object):
+    def __init__(self):
+        self.parser = cop(
+            """cylc [info] monitor [OPTIONS] ARGS
+
+A terminal-based live suite monitor.  Exit with 'Ctrl-C'.""",
+            pyro=True, noforce=True)
+
+        self.parser.add_option(
+            "-a", "--align",
+            help="Align task names. Only useful for small suites.",
+            action="store_true", default=False, dest="align_columns")
+
+        self.parser.add_option(
+            "-r", "--restricted",
+            help="Restrict display to 'active' task states: submitted, "
+            "submit-failed, submit-retrying, running, failed, retrying. "
+            "This may be needed for very large suites. The state summary "
+            "line still represents all task proxies.",
+            action="store_true", default=False, dest="restricted")
+
+        def_sort_order = GLOBAL_CFG.get(["monitor", "sort order"])
+
+        self.parser.add_option(
+            "-s", "--sort", metavar="ORDER",
+            help="Task sort order: \"definition\" or \"alphanumeric\"."
+            "The default is " + def_sort_order + " order, as determined by "
+            "global config. (Definition order is the order that tasks appear "
+            "under [runtime] in the suite definition).",
+            action="store", default=def_sort_order, dest="sort_order")
+
+        self.parser.add_option(
+            "-o", "--once",
+            help="Show a single view then exit.",
+            action="store_true", default=False, dest="once")
+
+        self.parser.add_option(
+            "-u", "--runahead",
+            help="Display task proxies in the runahead pool (off by default).",
+            action="store_true", default=False, dest="display_runahead")
+
+        self.parser.add_option(
+            "-i", "--interval",
+            help="Interval between suite state retrievals, "
+                 "in seconds (default 1).",
+            metavar="SECONDS", action="store", default=1,
+            dest="update_interval")
+
+    def run(self):
+        (options, args) = self.parser.parse_args()
+        suite = args[0]
+
+        client_name = os.path.basename(sys.argv[0])
+        if options.restricted:
+            client_name += " -r"
+
+        legend = ''
+        for state in task_state.legal:
+            legend += "%s%s%s" % (
+                task_state.ctrl[state], state, task_state.ctrl_end)
+        legend = legend.rstrip()
+        len_header = sum(len(s) for s in task_state.legal)
+
+        self.pclient = StateSummaryClient(
+            suite, options.owner, options.host, options.pyro_timeout,
+            options.port, options.db)
 
         while True:
             try:
-                glbl, task_summaries, fam_summaries = proxy.get_state_summary()
-            except TimeoutError:
-                if not alerted3:
-                    print "\n\033[1;37;41mconnection timed out%s" % (
-                            task_state.ctrl_end)
-                    alerted3 = True
-                # Try again on same connection.
-                sleep(1)
-                continue
-            except ConnectionClosedError:
-                if not alerted2:
-                    print "\n\033[1;37;41mconnection closed%s" % (
-                            task_state.ctrl_end)
-                    alerted2 = True
-                sleep(1)
-                # Try to reconnect.
-                break
-            except ProtocolError:
-                if not alerted:
-                    print "\n\033[1;37;41mfailed to connect%s" % (
-                            task_state.ctrl_end)
-                    alerted = True
-                sleep(1)
-                continue
-
-            states = [
-                    t["state"] for t in task_summaries.values() if (
-                    "state" in t)]
-
-            state_count = {}
-            for state in states:
-                state_count.setdefault(state, 0)
-                state_count[state] += 1
-            state_totals = state_count.items()
-            state_totals.sort()
-            state_totals.sort(lambda x, y: cmp(y[1], x[1]))
-
-            n_tasks_total = len(states) 
-            if options.restricted:
-                task_summaries = dict(
+                glbl, task_summaries, fam_summaries = (
+                    self.pclient.get_suite_state_summary())
+            except SuiteStillInitialisingError as exc:
+                print str(exc)
+            except Exception as exc:
+                print >> sys.stderr, "\033[1;37;41mERROR%s" % (
+                    task_state.ctrl_end), str(exc)
+                self.pclient.reset()
+            else:
+                states = [t["state"] for t in task_summaries.values() if (
+                          "state" in t)]
+                n_tasks_total = len(states)
+                if options.restricted:
+                    task_summaries = dict(
                         (i, j) for i, j in task_summaries.items() if (
-                        j['state'] in
-                        task_state.legal_for_restricted_monitoring))
-            if not options.display_runahead:
-                task_summaries = dict(
+                            j['state'] in
+                            task_state.legal_for_restricted_monitoring))
+                if not options.display_runahead:
+                    task_summaries = dict(
                         (i, j) for i, j in task_summaries.items() if (
-                        j['state'] != 'runahead' ))
- 
-            alerted2 = False
-            alerted3 = False
-            try:
-                updated_at = get_time_string_from_unix_time(
+                            j['state'] != 'runahead'))
+                try:
+                    updated_at = get_time_string_from_unix_time(
                         glbl['last_updated'])
-            except (TypeError, ValueError):
-                # Older suite.
-                updated_at = glbl['last_updated'].isoformat()
-
-            run_mode = glbl['run_mode']
-            paused = glbl['paused']
-            stopping = glbl['stopping']
-            will_pause_at = glbl['will_pause_at']
-            will_stop_at = glbl['will_stop_at']
-
-            task_info = {}
-            name_list = set()
-            task_ids = task_summaries.keys()
-            for task_id in task_ids:
-                name = task_summaries[task_id]['name']
-                point_string = task_summaries[task_id]['label']
-                state = task_summaries[task_id]['state']
-                name_list.add(name)
-                if point_string not in task_info:
-                    task_info[point_string] = {}
-                task_info[point_string][name] = "%s%s%s" % (
+                except (TypeError, ValueError):
+                    # Older suite.
+                    updated_at = glbl['last_updated'].isoformat()
+
+                run_mode = glbl['run_mode']
+                paused = glbl['paused']
+                stopping = glbl['stopping']
+                will_pause_at = glbl['will_pause_at']
+                will_stop_at = glbl['will_stop_at']
+                ns_defn_order = glbl['namespace definition order']
+
+                task_info = {}
+                name_list = set()
+                task_ids = task_summaries.keys()
+                for task_id in task_ids:
+                    name = task_summaries[task_id]['name']
+                    point_string = task_summaries[task_id]['label']
+                    state = task_summaries[task_id]['state']
+                    name_list.add(name)
+                    if point_string not in task_info:
+                        task_info[point_string] = {}
+                    task_info[point_string][name] = "%s%s%s" % (
                         task_state.ctrl[state], name, task_state.ctrl_end)
 
-            # Construct lines to blit to the screen.
-            blit = []
-            prefix = suite
-            suffix = "%d tasks" % n_tasks_total
-            title_str = ' ' * len_header
-            title_str = prefix + title_str[len(prefix):]
-            title_str = '\033[1;37;44m%s%s%s' % (
-                    title_str[:-len(suffix)], suffix, task_state.ctrl_end)
-            blit.append(title_str)
-            blit.append(legend)
+                # Sort the tasks in each cycle point.
+                if options.sort_order == "alphanumeric":
+                    sorted_name_list = sorted(name_list)
+                else:
+                    sorted_name_list = ns_defn_order
 
-            mode_str = "%s mode" % run_mode
-            if options.restricted:
-                mode_str = "%s %srestricted display%s" % (
-                        mode_str, task_state.ctrl['failed'],
-                        task_state.ctrl_end)
-            blit.append(mode_str)
+                sorted_task_info = {}
+                for point_str, info in task_info.items():
+                    sorted_task_info[point_str] = OrderedDict()
+                    for name in sorted_name_list:
+                        if name in name_list:
+                            # (Defn order includes family names.).
+                            sorted_task_info[point_str][name] = info.get(name)
 
-            updated_str = "last update: %s%s%s" % (
+                # Construct lines to blit to the screen.
+                blit = []
+
+                suite_name = suite
+                if run_mode != "live":
+                    suite_name += " (%s)" % run_mode
+                prefix = "%s - %d tasks" % (suite_name, int(n_tasks_total))
+                suffix = "%s %s" % (client_name, self.pclient.my_uuid)
+                title_str = ' ' * len_header
+                title_str = prefix + title_str[len(prefix):]
+                title_str = '\033[1;37;44m%s%s%s' % (
+                    title_str[:-len(suffix)], suffix, task_state.ctrl_end)
+                blit.append(title_str)
+                blit.append(legend)
+
+                updated_str = "updated: %s%s%s" % (
                     '\033[1;38m', updated_at, task_state.ctrl_end)
-            blit.append(updated_str)
+                blit.append(updated_str)
 
-            summary = 'state summary:'
-            for state, tot in state_totals:
-                summary += ' %s %d %s' % (
+                summary = 'state summary:'
+                try:
+                    state_totals = glbl['state totals']
+                except KeyError:
+                    # Back-compat for suite daemons <= 6.4.1.
+                    state_totals = {}
+                    for state in states:
+                        state_totals.setdefault(state, 0)
+                        state_totals[state] += 1
+                for state, tot in state_totals.items():
+                    summary += '%s %d %s' % (
                         task_state.ctrl[state], tot, task_state.ctrl_end)
-            blit.append(summary)
-
-            if stopping:
-                suffix = 'S_T_O_P_P_I_N_G'
-            elif paused:
-                suffix = 'P_A_U_S_E_D'
-            elif will_pause_at:
-                suffix = 'P_A_U_S_I_N_G__A_T__' + will_pause_at
-            elif will_stop_at:
-                suffix = 'S_T_O_P_P_I_N_G__A_T__' + will_stop_at
-            else:
-                suffix = 'R_U_N_N_I_N_G'
-            divider_str = '_'*len_header
-            divider_str = "\033[1;31m%s%s%s" % (
+                blit.append(summary)
+
+                if stopping:
+                    suffix = 'S_T_O_P_P_I_N_G'
+                elif paused:
+                    suffix = 'P_A_U_S_E_D'
+                elif will_pause_at:
+                    suffix = 'P_A_U_S_I_N_G__A_T__' + will_pause_at
+                elif will_stop_at:
+                    suffix = 'S_T_O_P_P_I_N_G__A_T__' + will_stop_at
+                else:
+                    suffix = 'R_U_N_N_I_N_G'
+                divider_str = '_' * len_header
+                divider_str = "\033[1;31m%s%s%s" % (
                     divider_str[:-len(suffix)], suffix, task_state.ctrl_end)
-            blit.append(divider_str)
+                blit.append(divider_str)
 
-            # Info by cycle point.
-            blitlines = {}
-            for point_str, val in task_info.items():
-                indx = point_str
-                line = "%s%s%s" % (
+                blitlines = {}
+                for point_str, val in sorted_task_info.items():
+                    indx = point_str
+                    line = "%s%s%s" % (
                         '\033[1;34m', point_str, task_state.ctrl_end)
-                for name, info in val.items():
-                    if not options.align_columns and re.match('^\s+$', info):
-                        pass
-                    else:
-                        line = "%s %s" % (line, info)
-                blitlines[indx] = line
-
-            if not options.once:
-                os.system("clear")
-            print '\n'.join(blit)
-            indxs = blitlines.keys()
-            try:
-                int(indxs[1])
-            except:
-                indxs.sort()
-            else:
-                indxs.sort(key=int)
-            for ix in indxs:
-                print blitlines[ix]
+                    for name, info in val.items():
+                        if info is not None:
+                            line += " %s" % info
+                        elif options.align_columns:
+                            line += " %s" % (' ' * len(name))
+                    blitlines[indx] = line
+
+                if not options.once:
+                    os.system("clear")
+                print '\n'.join(blit)
+                indxs = blitlines.keys()
+                try:
+                    int(indxs[1])
+                except:
+                    indxs.sort()
+                else:
+                    indxs.sort(key=int)
+                for ix in indxs:
+                    print blitlines[ix]
+
             if options.once:
                 break
-            sleep(1)
-        if options.once:
-            break
-except KeyboardInterrupt:
-    sys.exit(0)
+            else:
+                sleep(float(options.update_interval))
+
+
+if __name__ == "__main__":
+    monitor = SuiteMonitor()
+    try:
+        monitor.run()
+    except KeyboardInterrupt:
+        monitor.pclient.signout()
diff --git a/bin/cylc-nudge b/bin/cylc-nudge
index 364f83d..ce731b3 100755
--- a/bin/cylc-nudge
+++ b/bin/cylc-nudge
@@ -16,45 +16,53 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] nudge [OPTIONS] ARGS
+
+Cause the cylc task processing loop to be invoked in a running suite.
+
+This happens automatically when the state of any task changes such that
+task processing (dependency negotation etc.) is required, or if a
+clock-trigger task is ready to run.
+
+The main reason to use this command is to update the "estimated time till
+completion" intervals shown in the tree-view suite control GUI, during
+periods when nothing else is happening.
+"""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
     if remrun().execute():
         sys.exit(0)
 
-from cylc import cylc_pyro_client
-from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_pyro
 import cylc.flags
+from cylc.network.suite_command import SuiteCommandClient
+from cylc.CylcOptionParsers import cop
 
-parser = cop( """cylc [control] nudge [OPTIONS] ARGS
 
-Cause the cylc task processing loop to be invoked in a running suite.
+def main():
+    parser = cop(__doc__, pyro=True)
 
-This happens automatically when the state of any task changes such that
-task processing (dependency negotation etc.) is required, or if a
-clock-triggered task is ready to run.
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-The main reason to use this command is to update the "estimated time till
-completion" intervals shown in the tree-view suite control GUI, during
-periods when nothing else is happening.""", pyro=True )
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-    result = proxy.put( 'nudge suite' )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
-
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+
+    success, msg = pclient.put_command('nudge')
+    if success:
+        print msg
+    else:
+        sys.exit(msg)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-ping b/bin/cylc-ping
index c8e712f..12e42c1 100755
--- a/bin/cylc-ping
+++ b/bin/cylc-ping
@@ -16,67 +16,68 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [discovery] ping [OPTIONS] ARGS
+
+If suite REG is running or TASK in suite REG is currently in the 'running'
+state exit with success status, else exit with error status."""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
     if remrun().execute():
         sys.exit(0)
 
+import cylc.flags
 from cylc.CylcOptionParsers import cop
 from cylc.task_id import TaskID
-from cylc import cylc_pyro_client
-from cylc.command_prep import prep_pyro
+from cylc.network.suite_info import SuiteInfoClient
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
-import cylc.flags
 
-parser = cop( """cylc [discovery] ping [OPTIONS] ARGS
 
-If suite REG is running or TASK in suite REG is currently in the 'running'
-state exit with success status, else exit with error status.""",
-    pyro=True,
-    argdoc=[('REG', 'Suite name'),
-            ('[TASK]', 'Task ' + TaskID.SYNTAX)])
+def main():
+    parser = cop(
+        __doc__, pyro=True,
+        argdoc=[('REG', 'Suite name'), ('[TASK]', 'Task ' + TaskID.SYNTAX)])
 
-parser.add_option( "--print-ports",
+    parser.add_option(
+        "--print-ports",
         help="Print the port range from the cylc site config file.",
-        action="store_true",default=False, dest="print_ports" )
-
-( options, args ) = parser.parse_args()
-
-if options.print_ports:
-    base = GLOBAL_CFG.get( ['pyro','base port'] )
-    range = GLOBAL_CFG.get( ['pyro','maximum number of ports'] )
-    print base, '<= port <=', base + range
-    sys.exit(0)
-
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-# cylc ping SUITE
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'suite-info' )
-    res = proxy.get( 'ping suite' ) # (no need to check the result)
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if len(args) == 1:
-    sys.exit(0)
-
-# cylc ping SUITE TASKID
-task_id = args[1]
-if not TaskID.is_valid_id(task_id):
-    sys.exit("Invalid task ID: " + task_id)
-
-try:
-    res, msg = proxy.get( 'ping task', task_id )
-except Exception,x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if not res:
-    sys.exit( 'ERROR: ' + msg )
+        action="store_true", default=False, dest="print_ports")
+
+    (options, args) = parser.parse_args()
+
+    if options.print_ports:
+        base = GLOBAL_CFG.get(['pyro', 'base port'])
+        range = GLOBAL_CFG.get(['pyro', 'maximum number of ports'])
+        print base, '<= port <=', base + range
+        sys.exit(0)
+
+    suite = args[0]
+
+    pclient = SuiteInfoClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+
+    # cylc ping SUITE
+    pclient.get_info('ping_suite')  # (no need to check the result)
+    if len(args) == 1:
+        sys.exit(0)
+
+    # cylc ping SUITE TASKID
+    task_id = args[1]
+    if not TaskID.is_valid_id(task_id):
+        sys.exit("Invalid task ID: " + task_id)
+    success, msg = pclient.get_info('ping_task', task_id)
+    if not success:
+        sys.exit('ERROR: ' + msg)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-poll b/bin/cylc-poll
index ee5e015..07a7475 100755
--- a/bin/cylc-poll
+++ b/bin/cylc-poll
@@ -16,23 +16,11 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
-    from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
-        sys.exit(0)
+"""cylc [control] poll [OPTIONS] ARGS
 
-from cylc.prompt import prompt
-from cylc import cylc_pyro_client
-from cylc.CylcOptionParsers import cop, multitask_usage
-from cylc.command_prep import prep_pyro
-import cylc.flags
-
-parser = cop("""cylc [control] poll [OPTIONS] ARGS
-
-Poll active tasks (those in the 'submitted' or 'running' states) to verify
-or update their statuses - even if they have suffered an external hard kill.
+Poll jobs of active tasks (those in the 'submitted' or 'running' states) to
+verify or update their statuses - even if they have suffered an external hard
+kill.
 
 To poll one or more tasks, "cylc poll REG MATCH POINT"; to poll all active
 tasks: "cylc poll REG".
@@ -43,38 +31,58 @@ that do not allow any communication by RPC (pyro) or ssh back to the suite host
 
 Polling is also done automatically on restarting a suite, for any tasks that
 were recorded as submitted or running when the suite went down.
-""" + multitask_usage, pyro=True, multitask=True,
+"""
+
+import sys
+if '--use-ssh' in sys.argv[1:]:
+    sys.argv.remove('--use-ssh')
+    from cylc.remote import remrun
+    if remrun().execute(force_required=True):
+        sys.exit(0)
+
+import cylc.flags
+from cylc.prompt import prompt
+from cylc.network.suite_command import SuiteCommandClient
+from cylc.CylcOptionParsers import cop, multitask_usage
+
+
+def main():
+    parser = cop(
+        __doc__ + multitask_usage,
+        pyro=True, multitask=True,
         argdoc=[('REG', 'Suite name'),
-            ('[MATCH]', 'Task or family name matching regular expression'),
-            ('[POINT]', 'Task cycle point (e.g. date-time or integer)')])
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro(args[0], options).execute()
-
-if len(args) == 3:
-    name = args[1]
-    point_string = args[2]
-elif len(args) == 1:
-    name = str(None)
-    point_string = str(None)
-else:
-    parser.error("Wrong number of arguments.")
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-    prompt( 'Poll task ' + name + ' at ' + point_string + ' in ' + suite,
-            options.force )
-    result = proxy.put( 'poll tasks', name, point_string, options.is_family )
-
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+                ('[MATCH]', 'Task or family name matching regular expression'),
+                ('[POINT]', 'Task cycle point (e.g. date-time or integer)')])
+
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    if len(args) == 3:
+        name = args[1]
+        point_string = args[2]
+    elif len(args) == 1:
+        name = None
+        point_string = None
+    else:
+        parser.error("Wrong number of arguments.")
+
+    if name and point_string:
+        prompt('Poll task %s at %s in %s' % (name, point_string, suite),
+               options.force)
+    else:
+        prompt('Poll ALL task in %s' % (suite), options.force)
+
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+    pclient.put_command('poll_tasks', name, point_string, options.is_family)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-print b/bin/cylc-print
index b158539..319c9b2 100755
--- a/bin/cylc-print
+++ b/bin/cylc-print
@@ -16,81 +16,88 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [db] print [OPTIONS] [REGEX]
+
+Print suite database registrations.
+
+Note on result filtering:
+  (a) The filter patterns are Regular Expressions, not shell globs, so
+the general wildcard is '.*' (match zero or more of anything), NOT '*'.
+  (b) For printing purposes there is an implicit wildcard at the end of
+each pattern ('foo' is the same as 'foo.*'); use the string end marker
+to prevent this ('foo$' matches only literal 'foo')."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
-import os, re
+import os
+import re
+
+import cylc.flags
 from cylc.CylcOptionParsers import cop
 from cylc.registration import localdb
 from cylc.print_tree import print_tree
 from cylc.regpath import RegPath
-import cylc.flags
 
-def get_padding( reglist ):
+
+def get_padding(reglist):
     maxlen = 0
     for reg in reglist:
         items = RegPath(reg[0]).get_list()
-        for i in range(0,len(items)):
+        for i in range(0, len(items)):
             if i == 0:
                 tmp = len(items[i])
             else:
-                tmp = 2*i + 1 + len(items[i])
+                tmp = 2 * i + 1 + len(items[i])
             if tmp > maxlen:
                 maxlen = tmp
     return maxlen * ' '
 
-parser = cop( usage = """cylc [db] print [OPTIONS] [REGEX]
 
-Print suite database registrations.
+def main():
+    parser = cop(__doc__,
+                 argdoc=[('[REGEX]', 'Suite name regular expression pattern')])
 
-Note on result filtering:
-  (a) The filter patterns are Regular Expressions, not shell globs, so
-the general wildcard is '.*' (match zero or more of anything), NOT '*'.
-  (b) For printing purposes there is an implicit wildcard at the end of
-each pattern ('foo' is the same as 'foo.*'); use the string end marker
-to prevent this ('foo$' matches only literal 'foo').""",
-        argdoc=[('[REGEX]', 'Suite name regular expression pattern')])
-
-parser.add_option( "-t","--tree",
-        help="Print registrations in nested tree form.",
-        action="store_true", default=False, dest="tree" )
+    parser.add_option(
+        "-t", "--tree", help="Print registrations in nested tree form.",
+        action="store_true", default=False, dest="tree")
 
-parser.add_option( "-b", "--box",
+    parser.add_option(
+        "-b", "--box",
         help="Use unicode box drawing characters in tree views.",
-        action="store_true", default=False, dest="unicode" )
+        action="store_true", default=False, dest="unicode")
 
-parser.add_option( "-a","--align",
-        help="Align columns.",
-        action="store_true", default=False, dest="align" )
+    parser.add_option(
+        "-a", "--align", help="Align columns.",
+        action="store_true", default=False, dest="align")
 
-parser.add_option( "-x",
-        help="don't print suite definition directory paths.",
-        action="store_true", default=False, dest="x" )
+    parser.add_option(
+        "-x", help="don't print suite definition directory paths.",
+        action="store_true", default=False, dest="x")
 
-parser.add_option( "-y",
-        help="Don't print suite titles.",
-        action="store_true", default=False, dest="y" )
+    parser.add_option(
+        "-y", help="Don't print suite titles.",
+        action="store_true", default=False, dest="y")
 
-parser.add_option( "--fail",
-        help="Fail (exit 1) if no matching suites are found.",
-        action="store_true", default=False, dest="fail" )
+    parser.add_option(
+        "--fail", help="Fail (exit 1) if no matching suites are found.",
+        action="store_true", default=False, dest="fail")
 
-( options, args ) = parser.parse_args()
+    (options, args) = parser.parse_args()
 
-if len(args) == 0:
-    regfilter = None
-elif len(args) == 1:
-    regfilter = args[0]
-else:
-    parser.error( "Wrong number of arguments.")
+    if len(args) == 0:
+        regfilter = None
+    elif len(args) == 1:
+        regfilter = args[0]
+    else:
+        parser.error("Wrong number of arguments.")
 
-db = localdb(file=options.db )
-try:
+    db = localdb(file=options.db)
     allsuites = db.get_list(regfilter)
     if options.fail and len(allsuites) == 0:
-        raise SystemExit( 'ERROR: no suites matched.' )
+        raise SystemExit('ERROR: no suites matched.')
     if not options.tree:
         if options.align:
             maxlen_suite = 0
@@ -103,10 +110,10 @@ try:
             spacer_suite = maxlen_suite * ' '
             spacer_title = maxlen_title * ' '
         for suite, dir, title in allsuites:
-            dir = re.sub( '^' + os.environ['HOME'], '~', dir )
+            dir = re.sub('^' + os.environ['HOME'], '~', dir)
             if options.align:
-                suite = suite + spacer_suite[ len(suite): ]
-                title = title + spacer_title[ len(title): ]
+                suite = suite + spacer_suite[len(suite):]
+                title = title + spacer_title[len(title):]
             if not options.x and not options.y:
                 line = suite + ' | ' + title + ' | ' + dir
             elif not options.y:
@@ -126,9 +133,9 @@ try:
             spacer_title = maxlen_title * ' '
 
         for suite, dir, title in allsuites:
-            dir = re.sub( '^' + os.environ['HOME'], '~', dir )
+            dir = re.sub('^' + os.environ['HOME'], '~', dir)
             if options.align:
-                title = title + spacer_title[ len(title): ]
+                title = title + spacer_title[len(title):]
             regpath = RegPath(suite).get_list()
             sub = tree
             for key in regpath[:-1]:
@@ -145,10 +152,14 @@ try:
                 line = ''
             sub[regpath[-1]] = line
 
-        pad = get_padding( allsuites )
-        print_tree(tree, pad, options.unicode )
+        pad = get_padding(allsuites)
+        print_tree(tree, pad, options.unicode)
+
 
-except Exception,x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-purge b/bin/cylc-purge
deleted file mode 100755
index d8efc43..0000000
--- a/bin/cylc-purge
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
-    from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
-        sys.exit(0)
-
-from cylc.prompt import prompt
-from cylc.task_id import TaskID
-from cylc import cylc_pyro_client
-from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_pyro
-import cylc.flags
-
-# Development note: post cylc-3.0 we could potentially use the suite
-# graph to determine downstream tasks to remove in a purge operation without
-# doing an internal triggering simulation.
-
-parser = cop( """cylc [control] purge [OPTIONS] ARGS
-
-Remove an entire tree of dependent tasks, over multiple cycles, from a running
-suite. The top task will be forced to spawn and will then be removed, then so
-will every task that depends on it, and every task that depends on those, and
-so on until the given stop cycle point.
-
-THIS COMMAND IS DANGEROUS.
-
-UNDERSTANDING HOW PURGE WORKS: cylc identifies tasks that depend on the top
-task, and then on its downstream dependents, and then on theirs, etc., by
-simulating what would happen if the top task were to trigger: it artificially
-sets the top task to the "succeeded" state then negotatiates dependencies and
-artificially sets any tasks whose prerequisites get satisfied to "succeeded";
-then it negotiates dependencies again, and so on until the stop cycle is
-reached or nothing new triggers. Finally it marks "virtually triggered" tasks
-for removal.  Consequently:
- * Dependent tasks will only be identified as such, and purged, if they have
-   already spawned into the top cycle - so let them catch up first.
- * You can't purge a tree of tasks that has already triggered, because the
-   algorithm relies on detecting new triggering.
-Note that suite runahead must be sufficient enough to bridge the purge gap.""",
-    pyro=True, argdoc=[("REG", "Suite name"),
-                ("TASK", "Task (NAME.CYCLE_POINT) to start purge"),
-                ("STOP_POINT", "Cycle point (inclusive!) to stop purge")])
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-target = args[1]
-stop_point_string = args[2]
-
-if not TaskID.is_valid_id(target):
-    sys.exit("Invalid Task ID: " + target)
-
-name, start = TaskID.split(target)
-
-stop = stop_point_string
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-    prompt( 'Purge from ' + target + ' to ' + stop + ' in ' + suite, options.force )
-    result = proxy.put( 'purge tree', target, stop )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
diff --git a/bin/cylc-random b/bin/cylc-random
index 6013451..9fbc3d1 100755
--- a/bin/cylc-random
+++ b/bin/cylc-random
@@ -16,27 +16,39 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys
-import random
-from optparse import OptionParser
-
-parser = OptionParser( usage = """cylc [util] random A B
+"""cylc [util] random A B
 
 Generate a random integer in the range [A,B). This is just a command
 interface to Python's random.randrange() function.
 
 Arguments:
    A     start of the range interval (inclusive)
-   B     end of the random range (exclusive, so must be > A)""")
+   B     end of the random range (exclusive, so must be > A)"""
+
+import os
+import sys
+import random
+from optparse import OptionParser
+
+import cylc.flags
+
 
-(options, args) = parser.parse_args()
+def main():
+    parser = OptionParser(__doc__)
+    (options, args) = parser.parse_args()
 
-if len( args ) != 2:
-    parser.error( "Two integer arguments required" )
+    if len(args) != 2:
+        parser.error("Two integer arguments required")
 
-try:
     start = int(args[0])
     end = int(args[1])
-    print random.randrange( start, end )
-except ValueError, x:
-    raise SystemExit(x)
+    print random.randrange(start, end)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-refresh b/bin/cylc-refresh
index 0e4bfce..482f0da 100755
--- a/bin/cylc-refresh
+++ b/bin/cylc-refresh
@@ -16,52 +16,57 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [db] refresh [OPTIONS] ARGS
+
+Check a suite database for invalid registrations (no suite definition
+directory or suite.rc file) and refresh suite titles in case they have
+changed since the suite was registered. Explicit wildcards must be
+used in the match pattern (e.g. 'f' will not match 'foo.bar' unless
+you use 'f.*')."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
+import cylc.flags
 from cylc.CylcOptionParsers import cop
 from cylc.registration import localdb, RegistrationError
-from cylc.config import config, SuiteConfigError
+from cylc.config import SuiteConfigError
 from cylc.regpath import RegPath
-import cylc.flags
 
-parser = cop( usage = """cylc [db] refresh [OPTIONS] ARGS
 
-Check a suite database for invalid registrations (no suite definition
-directory or suite.rc file) and refresh suite titles in case they have
-changed since the suite was registered. Explicit wildcards must be
-used in the match pattern (e.g. 'f' will not match 'foo.bar' unless
-you use 'f.*').""",
-argdoc=[('[REGEX]', 'Suite name match pattern')])
+def main():
+    parser = cop(__doc__,
+                 argdoc=[('[REGEX]', 'Suite name match pattern')])
 
-parser.add_option( "-u","--unregister",
+    parser.add_option(
+        "-u", "--unregister",
         help="Automatically unregister invalid registrations.",
-        action="store_true", default=False, dest="unregister" )
+        action="store_true", default=False, dest="unregister")
+
+    (options, args) = parser.parse_args()
 
-( options, args ) = parser.parse_args()
+    db = localdb(file=options.db)
 
-db = localdb(file=options.db )
+    if len(args) == 0:
+        pattern = '.*'
+    else:
+        pattern = args[0]
+        # force explicit wildcards
+        if not pattern.startswith('^'):
+            pattern = '^' + pattern
+        if not pattern.endswith('$'):
+            pattern += '$'
 
-if len(args) == 0:
-    pattern = '.*'
-else:
-    pattern = args[0]
-    # force explicit wildcards
-    if not pattern.startswith( '^' ):
-        pattern = '^' + pattern
-    if not pattern.endswith( '$' ):
-        pattern += '$'
+    invalid = []  # no suite.rc file
+    readerror = []  # can't read title (suite.rc parse error)
 
-invalid = []    # no suite.rc file
-readerror = []  # can't read title (suite.rc parse error)
-try:
     # check validity
     invalid = db.get_invalid()
     # refresh titles
     changed = []
-    items = db.get_list( pattern )
+    items = db.get_list(pattern)
     if len(items) == 0:
         if pattern:
             print 'No suites found to match', pattern
@@ -71,23 +76,29 @@ try:
         if suite in invalid:
             continue
         try:
-            db.refresh_suite_title( suite )
+            db.refresh_suite_title(suite)
         except (RegistrationError, SuiteConfigError), x:
             print >> sys.stderr, x
             readerror.append(suite)
     if len(invalid) > 0:
-        print 'ERROR,', len(invalid), 'invalid registrations (no suite.rc file):'
+        print ("ERROR, %d invalid registrations "
+               "(no suite.rc file):" % len(invalid))
         for i in invalid:
             if options.unregister:
                 db.unregister(i)
             else:
                 print ' -', i
     if len(readerror) > 0:
-        print 'ERROR,', len(readerror), 'title parse failures (bad suite.rc file):'
+        print ("ERROR, %d title parse failures "
+               "(bad suite.rc file):" % len(readerror))
         for i in readerror:
             print ' -', i
 
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-register b/bin/cylc-register
index 11a5408..349c0af 100755
--- a/bin/cylc-register
+++ b/bin/cylc-register
@@ -16,17 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-from cylc.remote import remrun
-if remrun().execute():
-    sys.exit(0)
-
-import os
-from cylc.CylcOptionParsers import cop
-from cylc.registration import localdb
-import cylc.flags
-
-parser = cop( usage = """cylc [db] register [OPTIONS] ARGS
+"""cylc [db] register [OPTIONS] ARGS
 
 Register the suite definition located in PATH as REG.
 
@@ -45,39 +35,55 @@ For suite definition directories /home/bob/(one,two,three,four):
 % cylc db reg foo.bar.waz /home/bob/four
 
 % cylc db pr '^foo'             # print in flat form
-  bob         | "Test Suite One"   | /home/bob/one
-  foo.bag     | "Test Suite Two"   | /home/bob/two
-  foo.bar.baz | "Test Suite Four"  | /home/bob/three
-  foo.bar.waz | "Test Suite Three" | /home/bob/four
+  bob         | 'Test Suite One'   | /home/bob/one
+  foo.bag     | 'Test Suite Two'   | /home/bob/two
+  foo.bar.baz | 'Test Suite Four'  | /home/bob/three
+  foo.bar.waz | 'Test Suite Three' | /home/bob/four
 
 % cylc db pr -t '^foo'          # print in tree form
-  bob        "Test Suite One"   | /home/bob/one
+  bob        'Test Suite One'   | /home/bob/one
   foo
-   |-bag     "Test Suite Two"   | /home/bob/two
+   |-bag     'Test Suite Two'   | /home/bob/two
    `-bar
-     |-baz   "Test Suite Three" | /home/bob/three
-     `-waz   "Test Suite Four"  | /home/bob/four""",
-     argdoc=[("REG", "Suite name"),
-         ("PATH", "Suite definition directory")])
-
-( options, args ) = parser.parse_args()
-
-if args[1].endswith('suite.rc'):
-    suiterc = args[1]
-    rdir = os.path.dirname(suiterc)
-else:
-    rdir = args[1]
-    suiterc = os.path.join( rdir, 'suite.rc' )
-
-if not os.path.isdir( rdir ):
-    raise SystemExit( "Directory not found: " + rdir )
-
-suite = args[0]
-
-db = localdb( file=options.db )
-try:
-    db.register( suite, rdir )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+     |-baz   'Test Suite Three' | /home/bob/three
+     `-waz   'Test Suite Four'  | /home/bob/four"""
+
+import sys
+from cylc.remote import remrun
+if remrun().execute():
+    sys.exit(0)
+
+import os
+
+from cylc.CylcOptionParsers import cop
+from cylc.registration import localdb
+import cylc.flags
+
+
+def main():
+    parser = cop(
+        __doc__,
+        argdoc=[("REG", "Suite name"),
+                ("PATH", "Suite definition directory")])
+
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    if args[1].endswith('suite.rc'):
+        suiterc = args[1]
+        rdir = os.path.dirname(suiterc)
+    else:
+        rdir = args[1]
+        suiterc = os.path.join(rdir, 'suite.rc')
+
+    db = localdb(file=options.db)
+    db.register(suite, rdir)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-release b/bin/cylc-release
index e69448e..b0ed344 100755
--- a/bin/cylc-release
+++ b/bin/cylc-release
@@ -16,63 +16,66 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] release|unhold [OPTIONS] ARGS
+
+Release one or more held tasks (cylc release REG MATCH POINT)
+or the whole suite (cylc release REG). Held tasks do not
+submit even if they are ready to run."""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
+    if remrun().execute(force_required=True):
         sys.exit(0)
 
+import cylc.flags
 from cylc.prompt import prompt
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
 from cylc.CylcOptionParsers import cop, multitask_usage
-from cylc.command_prep import prep_pyro
-import cylc.flags
 
-parser = cop( """cylc [control] release|unhold [OPTIONS] ARGS
 
-Release one or more held tasks (cylc release REG MATCH POINT)
-or the whole suite (cylc release REG). Held tasks do not
-submit even if they are ready to run.""" +
-   multitask_usage + """
-See also 'cylc [control] hold'.""", pyro=True, multitask=True,
-    argdoc=[("REG", 'Suite name'),
-    ('[MATCH]', 'Task or family name matching regular expression'),
-    ('[POINT]', 'Task cycle point (e.g. date-time or integer)') ])
+def main():
+    parser = cop(
+        __doc__ + multitask_usage + "\nSee also 'cylc [control] hold'.",
+        pyro=True, multitask=True,
+        argdoc=[
+            ("REG", 'Suite name'),
+            ('[MATCH]', 'Task or family name matching regular expression'),
+            ('[POINT]', 'Task cycle point (e.g. date-time or integer)')])
 
-(options, args) = parser.parse_args()
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-suite, pphrase = prep_pyro( args[0], options ).execute()
+    if len(args) == 3:
+        whole_suite = False
+        name = args[1]
+        point_string = args[2]
+        prompt('Release task(s) %s at %s in %s' % (name, point_string, suite),
+               options.force)
 
-if len(args) == 3:
-    whole_suite = False
-    name = args[1]
-    point_string = args[2]
-    prompt(
-        'Release task(s) ' + name + ' at ' + point_string + ' in ' + suite,
-        options.force
-    )
-elif len(args) == 1:
-    whole_suite = True
-    prompt( 'Release suite ' + suite, options.force )
-else:
-    parser.error("Wrong number of arguments.")
+    elif len(args) == 1:
+        whole_suite = True
+        prompt('Release suite %s' % suite, options.force)
+    else:
+        parser.error("Wrong number of arguments.")
+
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
 
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
     if whole_suite:
-        result = proxy.put( 'release suite' )
+        pclient.put_command('release_suite')
     else:
-        result = proxy.put( 'release task', name, point_string,
-                            options.is_family )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
+        pclient.put_command('release_task', name, point_string,
+                            options.is_family)
+
 
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-reload b/bin/cylc-reload
index 486b2ea..6e7709e 100755
--- a/bin/cylc-reload
+++ b/bin/cylc-reload
@@ -16,21 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
-    from cylc.remote import remrun
-    if remrun().execute():
-        sys.exit(0)
-
-from cylc import cylc_pyro_client
-from cylc.config import config
-from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_pyro
-from cylc.prompt import prompt
-import cylc.flags
-
-parser = cop( """cylc [control] reload [OPTIONS] ARGS
+"""cylc [control] reload [OPTIONS] ARGS
 
 Tell a suite to reload its definition at run time. All settings
 including task definitions, with the exception of suite log
@@ -51,25 +37,38 @@ apply to the reload (only changes to the suite.rc file itself are
 reloaded).
 
 If the modified suite definition does not parse, failure to reload will
-be reported but no harm will be done to the running suite.""", pyro=True )
+be reported but no harm will be done to the running suite."""
+
+import sys
+if '--use-ssh' in sys.argv[1:]:
+    sys.argv.remove('--use-ssh')
+    from cylc.remote import remrun
+    if remrun().execute():
+        sys.exit(0)
+
+import cylc.flags
+from cylc.network.suite_command import SuiteCommandClient
+from cylc.CylcOptionParsers import cop
+from cylc.prompt import prompt
 
-(options, args) = parser.parse_args()
 
-suite, pphrase = prep_pyro( args[0], options ).execute()
+def main():
+    parser = cop(__doc__, pyro=True)
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-prompt( 'Reload ' + suite, options.force )
+    prompt('Reload %s' % suite, options.force)
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+    pclient.put_command('reload_suite')
 
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-    result = proxy.put( 'reload suite' )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
 
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-remove b/bin/cylc-remove
index 29fffef..f7d8530 100755
--- a/bin/cylc-remove
+++ b/bin/cylc-remove
@@ -16,72 +16,75 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] remove [OPTIONS] ARGS
+
+Remove one or more tasks (cylc remove REG MATCH POINT), or all tasks with a
+given cycle point (cylc remove REG POINT) from a running suite.
+
+Tasks will spawn successors first if they have not done so already.
+"""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
+    if remrun().execute(force_required=True):
         sys.exit(0)
 
+import cylc.flags
 from cylc.prompt import prompt
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
 from cylc.CylcOptionParsers import cop, multitask_usage
-from cylc.command_prep import prep_pyro
-import cylc.flags
 
-parser = cop( """cylc [control] remove [OPTIONS] ARGS
 
-Remove one or more tasks (cylc remove REG MATCH POINT), or all tasks with a
-given cycle point (cylc remove REG POINT) from a running suite.
-
-Tasks will spawn successors first if they have not done so already.
-""" + multitask_usage, pyro=True, multitask=True,
-    argdoc=[("REG", "Suite name"),
-        ('[MATCH]', 'Task or family name matching regular expression'),
-        ('[POINT]', 'Task cycle point (e.g. date-time or integer)') ])
+def main():
+    parser = cop(
+        __doc__ + multitask_usage, pyro=True, multitask=True,
+        argdoc=[
+            ("REG", "Suite name"),
+            ('[MATCH]', 'Task or family name matching regular expression'),
+            ('[POINT]', 'Task cycle point (e.g. date-time or integer)')])
 
-parser.add_option( "--no-spawn",
+    parser.add_option(
+        "--no-spawn",
         help="Do not spawn successors before removal.",
-        action="store_true", default=False, dest="no_spawn" )
+        action="store_true", default=False, dest="no_spawn")
 
-(options, args) = parser.parse_args()
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-suite, pphrase = prep_pyro( args[0], options ).execute()
+    if len(args) == 3:
+        name = args[1]
+        point_string = args[2]
+        remove_point = False
+    elif len(args) == 2:
+        point_string = args[1]
+        remove_point = True
+    else:
+        parser.error("Wrong number of arguments.")
 
-if len(args) == 3:
-    name = args[1]
-    point_string = args[2]
-    remove_point = False
-elif len(args) == 2:
-    point_string = args[1]
-    remove_point = True
-else:
-    parser.error("Wrong number of arguments.")
+    spawn = not options.no_spawn
 
-spawn = not options.no_spawn
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
 
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
     if remove_point:
-        prompt( 'remove ALL tasks at ' + point_string + ' in ' + suite,
-                options.force )
-        result = proxy.put( 'remove cycle', point_string, spawn )
+        prompt('remove ALL tasks at %s in %s' % (point_string, suite),
+               options.force)
+        pclient.put_command('remove_cycle', point_string, spawn)
     else:
-        prompt(
-            'remove task(s) ' + name + ' at ' + point_string + ' in ' + suite,
-            options.force
-        )
-        result = proxy.put(
-            'remove task', name, point_string, options.is_family, spawn)
-
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+        prompt('remove task(s) %s at %s in %s' % (name, point_string, suite),
+               options.force)
+        pclient.put_command('remove_task', name, point_string,
+                            options.is_family, spawn)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-reregister b/bin/cylc-reregister
index 7e59537..f5bb87b 100755
--- a/bin/cylc-reregister
+++ b/bin/cylc-reregister
@@ -16,6 +16,12 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [db] reregister|rename [OPTIONS] ARGS
+
+Change the name of a suite (or group of suites) from REG1 to REG2.
+Example:
+  cylc db rereg foo.bar.baz test.baz"""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
@@ -25,24 +31,22 @@ from cylc.CylcOptionParsers import cop
 from cylc.registration import localdb
 import cylc.flags
 
-parser = cop( usage = """cylc [db] reregister|rename [OPTIONS] ARGS
-
-Change the name of a suite (or group of suites) from REG1 to REG2.
-Example:
-  cylc db rereg foo.bar.baz test.baz""",
-       argdoc=[("REG1", "original name"),
-         ("REG2", "new name")])
 
-( options, args ) = parser.parse_args()
+def main():
+    parser = cop(__doc__, argdoc=[("REG1", "original name"),
+                                  ("REG2", "new name")])
+    (options, args) = parser.parse_args()
+    arg_from = args[0]
+    arg_to = args[1]
 
-arg_from = args[0]
-arg_to = args[1]
+    db = localdb(file=options.db)
+    db.reregister(arg_from, arg_to)
 
-db = localdb( file=options.db )
 
-try:
-    db.reregister( arg_from, arg_to )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-reset b/bin/cylc-reset
index 1824f09..1bd49ec 100755
--- a/bin/cylc-reset
+++ b/bin/cylc-reset
@@ -16,68 +16,69 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] reset [OPTIONS] ARGS
+
+Force one or more task proxies in a running suite to change state and modify
+their prerequisites and outputs accordingly.  For example, the 'waiting' state
+means "prerequisites not satisfied, outputs not completed"; 'ready' means
+"prerequisites satisfied, outputs not completed". Setting a task to 'ready'
+generally has the same effect as using the "cylc trigger" command.
+
+See the documentation for the -s/--state option for legal reset states."""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
+    if remrun().execute(force_required=True):
         sys.exit(0)
 
+import cylc.flags
 from cylc.prompt import prompt
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
 from cylc.CylcOptionParsers import cop, multitask_usage
-from cylc.command_prep import prep_pyro
-import cylc.flags
+from cylc.task_state import task_state
 
-parser = cop( """cylc [control] reset [OPTIONS] ARGS
 
-Reset one or more tasks in a running suite to one of the following states:
-  * 'waiting' .... prerequisites not satisfied
-  * 'ready' ...... prerequisites satisfied
-  * 'succeeded' .. outputs completed
-  * 'failed' ..... failed
+def main():
+    parser = cop(
+        __doc__ + multitask_usage, pyro=True, multitask=True,
+        argdoc=[
+            ('REG', 'Suite name'),
+            ('MATCH', 'Task or family name matching regular expression'),
+            ('POINT', 'Task cycle point (e.g. date-time or integer)')])
 
-Additionally you can choose:
-  * 'spawn' ...... force tasks to spawn if they haven't done so already
+    parser.add_option(
+        "-s", "--state", metavar="STATE",
+        help="Reset task state to STATE to on of %s" % (
+            ', '.join(task_state.legal_for_reset)),
+        action="store", default=None, dest="state")
 
-Tasks set to 'ready' will trigger immediately (see also "cylc trigger").
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-""" + multitask_usage, pyro=True, multitask=True,
-        argdoc=[ ('REG', 'Suite name'),
-            ('MATCH', 'Task or family name matching regular expression'),
-            ('POINT', 'Task cycle point (e.g. date-time or integer)') ])
-
-allowed_states = ['waiting', 'ready', 'succeeded', 'failed', 'spawn']
-
-parser.add_option( "-s", "--state", metavar="STATE",
-        help="Reset task state to STATE, "
-        "must be one of " + ' '.join( allowed_states ),
-        action="store", default=None, dest="state" )
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-if options.state not in allowed_states:
-    parser.error( "Illegal STATE value: " + options.state )
-
-name = args[1]
-point_string = args[2]
-
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-    prompt( 'Reset task(s) ' + name + ' at ' + point_string + ' in ' + suite,
-            options.force )
-    result = proxy.put( 'reset task state', name, point_string, options.state,
-                        options.is_family )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
-
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+    if options.state not in task_state.legal_for_reset:
+        parser.error("Illegal STATE value: " + options.state)
+
+    name = args[1]
+    point_string = args[2]
+
+    prompt('Reset task(s) ' + name + ' at ' + point_string + ' in ' + suite,
+           options.force)
+
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+
+    pclient.put_command('reset_task_state', name, point_string, options.state,
+                        options.is_family)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-restart b/bin/cylc-restart
index 658f885..070de5d 100755
--- a/bin/cylc-restart
+++ b/bin/cylc-restart
@@ -16,90 +16,102 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] restart [OPTIONS] ARGS
+
+Start a suite run from a previous state. To start from scratch (cold or warm
+start) see the 'cylc run' command.
+
+The scheduler runs in daemon mode unless you specify n/--no-detach or --debug.
+
+The most recent previous suite state is loaded by default, but earlier state
+files in the suite state directory can be specified on the command line.
+
+Tasks recorded as 'submitted' or 'running' will be polled at start-up to
+determine what happened to them while the suite was down."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
+import os
+import re
 from datetime import datetime
-import os, re
-from cylc.config import TaskNotDefinedError
+
+import cylc.flags
+from cylc.config import SuiteConfig, TaskNotDefinedError
 from cylc.CylcOptionParsers import cop
 from cylc.scheduler import scheduler
 from cylc.suite_state_dumping import SuiteStateDumper
 from cylc.task_state import task_state
 from cylc.run import main
-from cylc.command_prep import prep_file
+from cylc.get_task_proxy import get_task_proxy
+from cylc.registration import localdb
 from cylc.task_id import TaskID
 from cylc.cycling.loader import get_point, DefaultCycler, ISO8601_CYCLING_TYPE
 from cylc.wallclock import get_current_time_string
+from cylc.network.suite_broadcast import BroadcastServer
 
 
-class restart( scheduler ):
-    def __init__( self ):
-        usage = """cylc [control] restart [OPTIONS] ARGS
-
-Start a suite run from a previous state. To start from scratch (cold or warm
-start) see the 'cylc run' command.
-
-The scheduler runs in daemon mode unless you specify n/--no-detach or --debug.
+class restart(scheduler):
+    def __init__(self):
 
-The most recent previous suite state is loaded by default, but earlier state
-files in the suite state directory can be specified on the command line.
-
-Tasks recorded as 'submitted' or 'running' will be polled at start-up to
-determine what happened to them while the suite was down."""
-
-        self.parser = cop( usage, jset=True, argdoc=[("REG", "Suite name"),
-   ( "[FILE]", """Optional state dump file, assumed to reside in the
+        self.parser = cop(
+            __doc__, jset=True,
+            argdoc=[
+                ("REG", "Suite name"),
+                ("[FILE]", """Optional state dump, assumed to reside in the
                         suite state dump directory unless an absolute path
-                        is given. Defaults to the most recent suite state.""")])
+                        is given. Defaults to the most recent suite state.""")
+            ]
+        )
 
-        self.parser.add_option( "--non-daemon",
-                help="(deprecated: use --no-detach)",
-                action="store_true", default=False, dest="no_detach" )
+        self.parser.add_option(
+            "--non-daemon", help="(deprecated: use --no-detach)",
+            action="store_true", default=False, dest="no_detach")
 
-        self.parser.add_option( "-n", "--no-detach",
-                help="Do not daemonize the suite",
-                action="store_true", default=False, dest="no_detach" )
+        self.parser.add_option(
+            "-n", "--no-detach", help="Do not daemonize the suite",
+            action="store_true", default=False, dest="no_detach")
 
-        self.parser.add_option( "--profile",
-                help="Output profiling (performance) information",
-                action="store_true", default=False, dest="profile_mode" )
+        self.parser.add_option(
+            "--profile", help="Output profiling (performance) information",
+            action="store_true", default=False, dest="profile_mode")
 
-        self.parser.add_option( "--ignore-final-cycle-point",
+        self.parser.add_option(
+            "--ignore-final-cycle-point",
             help="Ignore the final cycle point in the state dump. If one is"
-            "specified in the suite definition it will be used, however.",
-            action="store_true", default=False, dest="ignore_stop_point" )
+                 "specified in the suite definition it will be used, however.",
+            action="store_true", default=False, dest="ignore_stop_point")
 
-        self.parser.add_option( "--ignore-initial-cycle-point",
+        self.parser.add_option(
+            "--ignore-initial-cycle-point",
             help="Ignore the initial cycle point in the state dump. If one is "
-            "specified in the suite definition it will be used, however.",
-            action="store_true", default=False, dest="ignore_start_point" )
-
-        scheduler.__init__( self, is_restart=True )
-
-    def parse_commandline( self ):
-        ( self.options, self.args ) = self.parser.parse_args()
+                 "specified in the suite definition it will be used, however.",
+            action="store_true", default=False, dest="ignore_start_point")
 
-        self.suite, self.suiterc = prep_file( self.args[0], self.options ).execute()
+        scheduler.__init__(self, is_restart=True)
 
-        self.suite_dir = os.path.dirname( self.suiterc )
+    def parse_commandline(self):
+        (self.options, self.args) = self.parser.parse_args()
+        self.suite = self.args[0]
+        self.suiterc = localdb(self.options.db).get_suiterc(self.suite)
+        self.suite_dir = os.path.dirname(self.suiterc)
 
         # For user-defined job submission methods:
-        sys.path.append( os.path.join( self.suite_dir, 'python' ))
+        sys.path.append(os.path.join(self.suite_dir, 'python'))
 
         self.restart_from = None
-        if len( self.args ) == 2:
+        if len(self.args) == 2:
             self.restart_from = self.args[1]
 
-        scheduler.parse_commandline( self )
+        scheduler.parse_commandline(self)
 
-    def get_state_initial_point_string( self ):
+    def get_state_initial_point_string(self):
         """Return the initial point string from the state file, if any."""
         return self.get_state_file_info()[0]
-        
-    def get_state_file_path( self ):
+
+    def get_state_file_path(self):
         """Return the state file path that we are restarting from."""
         my_dumper = self.state_dumper
         if my_dumper is None:
@@ -117,14 +129,14 @@ determine what happened to them while the suite was down."""
             raise Exception("state dump file not found: " + file_name)
         return os.path.realpath(file_name)
 
-    def get_state_file_info( self ):
+    def get_state_file_info(self):
         """Return the state file start & stop strings, broadcast, tasks.
 
         The state dump file format is:
-        run mode : <mode> 
+        run mode : <mode>
         time : <time> (<unix time>)
         initial cycle : 2014050100
-        final cycle : (none)
+        final cycle : None
         (dp1   # (Broadcast pickle string)
         .      # (Broadcast pickle string)
         Begin task states
@@ -136,9 +148,9 @@ determine what happened to them while the suite was down."""
         """
         file_name = self.get_state_file_path()
         try:
-            FILE = open( file_name, 'r' )
-        except IOError,x:
-            print >> sys.stderr, x
+            FILE = open(file_name, 'r')
+        except IOError as exc:
+            print >> sys.stderr, str(exc)
             raise Exception(
                 "ERROR, cannot open suite state dump: %s" % file_name)
         lines = FILE.readlines()
@@ -146,7 +158,7 @@ determine what happened to them while the suite was down."""
 
         nlines = len(lines)
         if nlines == 0:
-            raise Exception( "ERROR, empty suite state dump: %s" % file_name )
+            raise Exception("ERROR, empty suite state dump: %s" % file_name)
         elif nlines < 3:
             print >> sys.stderr, (
                 "ERROR, The suite state dump contains only %d lines" % nlines)
@@ -154,14 +166,14 @@ determine what happened to them while the suite was down."""
                 print ' ', l.rstrip()
             raise Exception(
                 "ERROR, incomplete suite state dump: %s" % file_name)
-       
+
         index = 0
         # run mode : <mode>
         line0 = lines[index].rstrip()
-        if line0.startswith( 'suite time' ):
+        if line0.startswith('suite time'):
             # backward compatibility for pre-5.4.11 state dumps
             old_run_mode = 'live'
-        elif line0.startswith( 'simulation time' ):
+        elif line0.startswith('simulation time'):
             # backward compatibility for pre-5.4.11 state dumps
             old_run_mode = 'simulation'
         else:
@@ -173,8 +185,7 @@ determine what happened to them while the suite was down."""
         if self.run_mode == 'live' and old_run_mode != 'live':
             raise Exception(
                 "ERROR: cannot RESTART in %s from a %s state dump" % (
-                    self.run_mode, old_run_mode)
-            )
+                    self.run_mode, old_run_mode))
 
         state_start_string = None
         index += 1
@@ -187,7 +198,7 @@ determine what happened to them while the suite was down."""
                 'ERROR, Illegal state dump line 2 (initial cycle):')
             print >> sys.stderr, ' ', line2
             raise Exception("ERROR: corrupted state dump")
-        if oldstartcycle != '(none)':
+        if oldstartcycle != 'None':
             state_start_string = oldstartcycle
 
         state_stop_string = None
@@ -202,7 +213,7 @@ determine what happened to them while the suite was down."""
             print >> sys.stderr, ' ', line3
             raise Exception("ERROR: corrupted state dump")
 
-        if oldstopcycle != '(none)':
+        if oldstopcycle != 'None':
             state_stop_string = oldstopcycle
 
         # broadcast variables (universal):
@@ -233,14 +244,14 @@ determine what happened to them while the suite was down."""
         return (state_start_string, state_stop_string,
                 pickled_broadcast, task_lines)
 
-    def load_tasks( self ):
+    def load_tasks(self):
 
         # FIND THE INITIAL STATE DUMP FILE
         base_name = self.state_dumper.BASE_NAME
         dir_name = self.state_dumper.dir_name
         file_name = self.get_state_file_path()
 
-        self.log.info( 'Restart ' + file_name )
+        self.log.info('Restart ' + file_name)
         src_name = file_name
         if os.path.realpath(dir_name) == os.path.dirname(file_name):
             src_name = os.path.basename(file_name)
@@ -258,8 +269,11 @@ determine what happened to them while the suite was down."""
             if self.options.ignore_start_point:
                 # ignore it and take whatever the suite.rc file gives us
                 if self.start_point is not None:
-                    print >> sys.stderr, """WARNING: I'm ignoring the old initial cycle point as requested, but I
-  can't ignore the one set in the suite definition."""
+                    print >> sys.stderr, (
+                        "WARNING: I'm ignoring the old initial cycle point"
+                        " as requested,\n"
+                        "but I can't ignore the one set in"
+                        " the suite definition.")
             elif self.start_point is not None:
                 # a start cycle was given in the suite.rc file
                 if self.start_point != state_start_point:
@@ -280,18 +294,21 @@ determine what happened to them while the suite was down."""
             else:
                 # reinstate the former start cycle
                 self.start_point = state_start_point
-        
+
         if state_stop_string is not None:
             # the state dump prescribes a stop cycle
-            # (else we take whatever the command line or suite.rc file gives us)
+            # (else take whatever the command line or suite.rc file gives us)
             state_stop_point = get_point(state_stop_string)
             if self.options.ignore_stop_point:
-                # ignore it and take whatever the command line or suite.rc file gives us
+                # take whatever the command line or suite.rc file gives us
                 if self.stop_point is not None:
-                    print >> sys.stderr, """WARNING: I'm ignoring the old final cycle point as requested, but I
-  can't ignore the one set on the command line or in the suite definition."""
+                    print >> sys.stderr, (
+                        "WARNING: I'm ignoring the old final cycle point"
+                        " as requested,\n"
+                        "but I can't ignore the one set on"
+                        " the command line or in the suite definition.")
             elif self.stop_point is not None:
-                # a stop cycle was given on the restart command line or suite.rc file
+                # a stop cycle was given on the command line or suite.rc file
                 if self.stop_point != state_stop_point:
                     print >> sys.stderr, (
                         "WARNING: overriding the old stop cycle point "
@@ -301,7 +318,7 @@ determine what happened to them while the suite was down."""
                 # reinstate the old stop cycle
                 self.stop_point = state_stop_point
 
-        self.pool.wireless.load( broadcast )
+        BroadcastServer.get_inst().load(broadcast)
 
         # parse each task line and create the task it represents
         tasknames = {}
@@ -310,12 +327,12 @@ determine what happened to them while the suite was down."""
         for line in task_lines:
             # instance variables
             try:
-                ( id, state ) = line.split(' : ')
+                (id, state) = line.split(' : ')
                 name, point_string = TaskID.split(id)
             except:
                 print >> sys.stderr, "ERROR, Illegal line in suite state dump:"
                 print >> sys.stderr, " ", line
-                raise Exception( "ERROR: corrupted state dump" )
+                raise Exception("ERROR: corrupted state dump")
             if (point_string == "1" and
                     DefaultCycler.TYPE == ISO8601_CYCLING_TYPE):
                 # A state file from a pre-cylc-6 with mixed-async graphing.
@@ -326,7 +343,7 @@ determine what happened to them while the suite was down."""
                 id = new_id
             tasknames[name] = True
             if 'status=submitting,' in state:
-                # backward compatibility for state dumps generated prior to #787
+                # back compat for state dumps generated prior to #787
                 state = state.replace('status=submitting,',
                                       'status=ready,', 1)
             if 'status=runahead,' in state:
@@ -337,18 +354,16 @@ determine what happened to them while the suite was down."""
                 task_state(state)
             except Exception as e:
                 print >> sys.stderr, "ERROR: ", type(e).__name__, e
-                raise Exception( "ERROR: corrupted state dump" )
-            taskstates[id] = (name, point_string, state )
+                raise Exception("ERROR: corrupted state dump")
+            taskstates[id] = (name, point_string, state)
             task_point_strings.append(point_string)
 
         task_point_strings = list(set(task_point_strings))
-        cycleinfo = {}
 
         print "LOADING data from suite db"
 
-        for point_string in task_point_strings:
-            res = self.db.get_restart_info(point_string)
-            cycleinfo[point_string] = res
+        task_states_data = self.pri_dao.select_task_states_by_cycles(
+            ["submit_num", "try_num", "host"], task_point_strings)
         # RESURRECTING TASKS FROM A SUITE STATE DUMP FILE
         #
         # The state of task prerequisites (satisfied or not) and outputs
@@ -373,21 +388,22 @@ determine what happened to them while the suite was down."""
         # polled to determine what their current true status is.
 
         initial_task_list = tasknames.keys()
-        task_list = self.filter_initial_task_list( initial_task_list )
+        task_list = self.filter_initial_task_list(initial_task_list)
 
         print "RELOADING task proxies"
 
+        config = SuiteConfig.get_inst()
         for id in taskstates:
             name, point_string, state = taskstates[id]
             if name not in task_list:
                 continue
 
             print " +", id
+            task_states_datum = task_states_data.get((name, point_string))
             try:
+                submit_num = task_states_datum.get("submit_num", 0)
                 # startup is True only for a cold start
-                submit_num = cycleinfo[point_string].get(name, 0)
-                
-                itask = self.config.get_task_proxy(
+                itask = get_task_proxy(
                     name,
                     get_point(point_string),
                     state,
@@ -399,7 +415,9 @@ determine what happened to them while the suite was down."""
                 print >> sys.stderr, (
                     "WARNING: ignoring task %s " % name +
                     "from the suite state dump file")
-                print >> sys.stderr, "(the task definition has probably been deleted from the suite)."
+                print >> sys.stderr, (
+                    "(the task definition has probably been "
+                    "deleted from the suite).")
                 continue
             except Exception, x:
                 print >> sys.stderr, str(x)
@@ -428,69 +446,48 @@ determine what happened to them while the suite was down."""
                 itask.state.set_status('waiting')
 
             elif itask.state.is_currently('submitted', 'running'):
-                itask.prerequisites.set_all_satisfied()
-                # get user_at_host from run-db
-                user_at_host = None
-                row = self.db.get_task_location(name, point_string)
-                if row and row[0]:
-                    user_at_host = row[0]
-                    self.old_user_at_host_set.add(str(user_at_host))
-                else:
-                    print >> sys.stderr, (
-                        "WARNING: %s " % id +
-                        "failed to read user at host from run-db!"
-                    )
-
-                # get submit_method_id and try_num from run-db
-                submit_method_id = try_num = None
-                row = self.db.get_task_submit_method_id_and_try(
-                    name, point_string)
-                if row and row[0]:
-                    submit_method_id, try_num = row
+                itask.set_prerequisites_all_satisfied()
+                # update the task proxy with submit ID etc.
+                itask.try_number = task_states_datum.get("try_num")
+                itask.user_at_host = task_states_datum.get("host")
+                self.old_user_at_host_set.add(itask.user_at_host)
+                if itask.user_at_host is None:
+                    itask.user_at_host = "localhost"
+                # update timers in case regular polling is configured for itask
+                if '@' in itask.user_at_host:
+                    host = itask.user_at_host.split('@', 1)[1]
                 else:
-                    print >> sys.stderr, (
-                        "WARNING: %s " % id +
-                        "failed to read submit_method_id and try_num from " +
-                        "run-db!"
-                    )
-
-                if None in [ user_at_host, submit_method_id, try_num ]:
-                    print >> sys.stderr, (
-                        "WARNING: cannot determine what happened to %s" % id)
-                else:
-                    # update the task proxy with submit ID etc.
-                    itask.submit_method_id = submit_method_id
-                    itask.try_number = try_num
-                    itask.user_at_host = user_at_host
-                    # poll the task
-                    try:
-                        itask.poll()
-                    except Exception as exc:
-                        print >>sys.stderr, "WARNING: %s: poll failed" % id
-                    # update poll timers in case regular polling is configured for itask
-                    if '@' in user_at_host:
-                        owner, host = user_at_host.split('@')
-                    else:
-                        host = user_at_host
-                    itask.submission_poll_timer.set_host( host, set_timer=True )
-                    itask.execution_poll_timer.set_host( host, set_timer=True )
-
-            elif itask.state.is_currently( 'queued', 'ready','submit-retrying', 'submit-failed', 'retrying', 'failed'):
-                itask.prerequisites.set_all_satisfied()
-                if not itask.state.is_currently( 'failed', 'submit-failed' ):
+                    host = itask.user_at_host
+                itask.submission_poll_timer.set_host(host, set_timer=True)
+                itask.execution_poll_timer.set_host(host, set_timer=True)
+
+            elif itask.state.is_currently('queued', 'ready', 'submit-retrying',
+                                          'submit-failed', 'retrying',
+                                          'failed'):
+                itask.set_prerequisites_all_satisfied()
+                if not itask.state.is_currently('failed', 'submit-failed'):
                     # reset to waiting as these had not been submitted yet.
                     itask.state.set_status('waiting')
 
             elif itask.state.is_currently('succeeded'):
-                itask.prerequisites.set_all_satisfied()
+                itask.set_prerequisites_all_satisfied()
+                # TODO - just poll for outputs in the job status file.
                 itask.outputs.set_all_completed()
 
             else:
                 raise Exception(
                     'ERROR: unknown task state for %s' % itask.identity)
 
-            self.pool.add_to_runahead_pool( itask )
+            self.pool.add_to_runahead_pool(itask)
+
+        # Poll all submitted and running task jobs
+        self.pool.poll_task_jobs()
 
 
 if __name__ == '__main__':
-    main("restart", restart)
+    try:
+        main("restart", restart)
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-run b/bin/cylc-run
index 851123f..09cf509 100755
--- a/bin/cylc-run
+++ b/bin/cylc-run
@@ -16,22 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-from cylc.remote import remrun
-if remrun().execute():
-    sys.exit(0)
-
-import datetime
-import os
-from cylc.CylcOptionParsers import cop
-from cylc.scheduler import scheduler
-from cylc.run import main
-from cylc.command_prep import prep_file
-from cylc.task_proxy import TaskProxySequenceBoundsError
-
-class start( scheduler ):
-    def __init__( self ):
-        usage = """cylc [control] run|start [OPTIONS] ARGS
+"""cylc [control] run|start [OPTIONS] ARGS
 
 Start a suite run from scratch, wiping out any previous suite state. To
 restart from a previous state see 'cylc restart --help'.
@@ -54,67 +39,88 @@ Aside from the starting cycle point there is no difference between cold and
 warm start unless you use special cold-start tasks. See "Suite Start-up" and
 "Cold-Start Tasks" in the User Guide for more."""
 
-        self.parser = cop( usage, jset=True, argdoc=[ ("REG", "Suite name"),
-                ("[START_POINT]", """Initial cycle point or 'now'; overrides the
-                         suite definition.""")])
-
-        self.parser.add_option( "--non-daemon",
-                help="(deprecated: use --no-detach)",
-                action="store_true", default=False, dest="no_detach" )
-
-        self.parser.add_option( "-n", "--no-detach",
-                help="Do not daemonize the suite",
-                action="store_true", default=False, dest="no_detach" )
-
-        self.parser.add_option( "--profile",
-                help="Output profiling (performance) information",
-                action="store_true", default=False, dest="profile_mode" )
-
-        self.parser.add_option( "-w", "--warm", help="Warm start the suite. "
-                "The default is to cold start.",
-                action="store_true", default=False, dest="warm" )
-
-        self.parser.add_option( "--ict",
-                help="Set $CYLC_SUITE_INITIAL_CYCLE_POINT to the initial "
-                "cycle point even in a warm start (as for cold starts).",
-                action="store_true", default=False, dest="set_ict" )
-
-        scheduler.__init__( self )
-
-    def parse_commandline( self ):
-        ( self.options, self.args ) = self.parser.parse_args()
+import sys
+from cylc.remote import remrun
+if remrun().execute():
+    sys.exit(0)
 
-        self.suite, self.suiterc = prep_file( self.args[0], self.options ).execute()
+import os
+import datetime
 
-        self.suite_dir = os.path.dirname( self.suiterc )
+import cylc.flags
+from cylc.CylcOptionParsers import cop
+from cylc.scheduler import scheduler
+from cylc.run import main
+from cylc.registration import localdb
+from cylc.task_proxy import TaskProxySequenceBoundsError
+from cylc.get_task_proxy import get_task_proxy
+
+
+class start(scheduler):
+    def __init__(self):
+        self.parser = cop(
+            __doc__, jset=True,
+            argdoc=[("REG", "Suite name"),
+                    ("[START_POINT]",
+                     # 31 spaces, initial ident on 2nd line
+                     "Initial cycle point or 'now';\n" +
+                     " " * 31 + "overrides the suite definition.")])
+
+        self.parser.add_option(
+            "--non-daemon", help="(deprecated: use --no-detach)",
+            action="store_true", default=False, dest="no_detach")
+
+        self.parser.add_option(
+            "-n", "--no-detach", help="Do not daemonize the suite",
+            action="store_true", default=False, dest="no_detach")
+
+        self.parser.add_option(
+            "--profile", help="Output profiling (performance) information",
+            action="store_true", default=False, dest="profile_mode")
+
+        self.parser.add_option(
+            "-w", "--warm",
+            help="Warm start the suite. "
+                 "The default is to cold start.",
+            action="store_true", default=False, dest="warm")
+
+        self.parser.add_option(
+            "--ict",
+            help="Set $CYLC_SUITE_INITIAL_CYCLE_POINT to the initial "
+                 "cycle point even in a warm start (as for cold starts).",
+            action="store_true", default=False, dest="set_ict")
+
+        scheduler.__init__(self)
+
+    def parse_commandline(self):
+        (self.options, self.args) = self.parser.parse_args()
+        self.suite = self.args[0]
+        self.suiterc = localdb(self.options.db).get_suiterc(self.suite)
+        self.suite_dir = os.path.dirname(self.suiterc)
 
         # For user-defined job submission methods:
-        sys.path.append( os.path.join( self.suite_dir, 'python' ))
-
-        if len( self.args ) == 2:
-            start_point_string = self.args[1]
-            if start_point_string == "now":
-                start_point_string = (
-                    datetime.datetime.utcnow().strftime("%Y%m%dT%H%MZ")
-                )
+        sys.path.append(os.path.join(self.suite_dir, 'python'))
+
+        if len(self.args) == 2:
             if self.options.warm:
-                self._cli_start_point_string = start_point_string
+                self._cli_start_point_string = self.args[1]
             else:
-                self._cli_initial_point_string = start_point_string
+                self._cli_initial_point_string = self.args[1]
         elif self.options.warm:
             # No warm-start cycle point supplied.
             sys.exit(self.parser.get_usage())
 
-        scheduler.parse_commandline( self )
+        scheduler.parse_commandline(self)
 
-    def load_tasks( self ):
+    def load_tasks(self):
         if self.start_point is not None:
             if self.options.warm:
-                self.log.info( 'Warm Start %s' % self.start_point)
+                self.log.info('Warm Start %s' % self.start_point)
             else:
-                self.log.info( 'Cold Start %s' % self.start_point )
+                self.log.info('Cold Start %s' % self.start_point)
 
-        task_list = self.filter_initial_task_list( self.config.get_task_name_list() )
+        task_list = self.filter_initial_task_list(
+            self.config.get_task_name_list())
         coldstart_tasks = self.config.get_coldstart_task_list()
 
         for name in task_list:
@@ -122,17 +128,22 @@ warm start unless you use special cold-start tasks. See "Suite Start-up" and
                 # No start cycle point at which to load cycling tasks.
                 continue
             try:
-                itask = self.config.get_task_proxy(
+                itask = get_task_proxy(
                     name, self.start_point, 'waiting', is_startup=True)
             except TaskProxySequenceBoundsError as exc:
                 self.log.debug(str(exc))
                 continue
             if name in coldstart_tasks and self.options.warm:
-                itask.state.set_status( 'succeeded' )
-                itask.prerequisites.set_all_satisfied()
+                itask.state.set_status('succeeded')
+                itask.set_prerequisites_all_satisfied()
                 itask.outputs.set_all_completed()
             # Load task.
-            self.pool.add_to_runahead_pool( itask )
+            self.pool.add_to_runahead_pool(itask)
 
 if __name__ == '__main__':
-    main("run", start)
+    try:
+        main("run", start)
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-scan b/bin/cylc-scan
index f7a2dd9..aaad62f 100755
--- a/bin/cylc-scan
+++ b/bin/cylc-scan
@@ -15,13 +15,29 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
 """cylc [discovery] scan [OPTIONS] [HOSTS ...]
 
-Detect running suites by port scanning.  Use --verbose to see (with "connection
-denied") suites owned by others too.
+Print information about cylc suites currently running on scanned hosts. The
+list of hosts to scan is determined by the global configuration "[suite host
+scanning]" setting, or hosts can be specified explicitly on the command line.
+
+By default, just your own suites are listed (this assumes your username is the
+same on all scanned hosts). Use -a/--all-suites to see all suites on all hosts,
+or restrict suites displayed with the -o/--owner and -n/--name options (with
+--name the default owner restriction (i.e. just your own suites) is disabled.
+
+Suite passphrases are not needed to get identity information (name and owner)
+from suites running cylc >= 6.6.0.  Titles, descriptions, state totals, and
+cycle point state totals may also be revealed publicly, depending on global
+and sutie authentication settings. Suite passphrases still grant full access
+regardless of what is revealed publicly.
+
+Passphrases are still required to get identity information from older suites
+(cylc <= 6.5.0), otherwise you'll see "connection denied (security reasons)".
 
-WARNING: a suite suspended with Ctrl-Z will cause port scans to hang for all
-users until timing out (see --pyro-timeout, default 60 seconds)."""
+WARNING: a suite suspended with Ctrl-Z will cause port scans to hang until the
+connection times out (see --pyro-timeout)."""
 
 import sys
 if "--use-ssh" in sys.argv[1:]:
@@ -30,11 +46,30 @@ if "--use-ssh" in sys.argv[1:]:
     if remrun().execute():
         sys.exit(0)
 
+import re
 from multiprocessing import cpu_count, Pool
 from time import sleep
-from cylc.port_scan import scan
+import traceback
+
+import cylc.flags
+from cylc.network.port_scan import scan
 from cylc.CylcOptionParsers import cop
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.suite_host import is_remote_host
+from cylc.task_state import task_state
+from cylc.owner import user
+
+
+NO_BOLD = False
+
+
+def bold(line):
+    """Add terminal control characters for bold text."""
+    global NO_BOLD
+    if NO_BOLD:
+        return line
+    else:
+        return "\033[1m" + line + "\033[0m"
 
 
 def main():
@@ -43,29 +78,78 @@ def main():
         __doc__,
         pyro=True,
         noforce=True,
-        argdoc=[("[HOSTS ...]", "hosts to scan instead of the default ones")],
-        auto_add=False)
+        argdoc=[(
+            "[HOSTS ...]", "Hosts to scan instead of the configured hosts.")]
+    )
+
+    parser.add_option(
+        "-a", "--all", "--all-suites",
+        help="List all suites found on all scanned hosts (the default is "
+             "just your own suites).",
+        action="store_true", default=False, dest="all_suites")
+
+    parser.add_option(
+        "-n", "--name",
+        metavar="PATTERN",
+        help="List suites with name matching PATTERN (regular expression). "
+             "Defaults to any name. Can be used multiple times.",
+        action="append", dest="patterns_name", default=[])
+
+    parser.add_option(
+        "-o", "--owner",
+        metavar="PATTERN",
+        help="List suites with owner matching PATTERN (regular expression). "
+             "Defaults to just your own suites. Can be used multiple times.",
+        action="append", dest="patterns_owner", default=[])
+
+    parser.add_option(
+        "-d", "--describe",
+        help="Print suite titles and descriptions if available.",
+        action="store_true", default=False, dest="describe")
+
+    parser.add_option(
+        "-s", "--state-totals",
+        help="Print number of tasks in each state if available "
+             "(total, and by cycle point).",
+        action="store_true", default=False, dest="print_totals")
+
+    parser.add_option(
+        "-f", "--full",
+        help="Print all available information about each suite.",
+        action="store_true", default=False, dest="print_full")
+
+    parser.add_option(
+        "-c", "--color", "--colour",
+        help="Print task state summaries using terminal color control codes.",
+        action="store_true", default=False, dest="color")
+
+    parser.add_option(
+        "-b", "--no-bold",
+        help="Don't use any bold text in the command output.",
+        action="store_true", default=False, dest="no_bold")
 
     parser.add_option(
         "--print-ports",
-        help=(
-            "Print the port range from the site config file "
-            "($CYLC_DIR/conf/global.rc)."),
-        action="store_true",
-        default=False,
-        dest="print_ports")
-
-    parser.add_std_options()
-    parser.remove_option("--pyro-timeout")
+        help="Print the port range from the site config file "
+             "($CYLC_DIR/conf/global.rc).",
+        action="store_true", default=False, dest="print_ports")
+
+    parser.add_option(
+        "--pyro-timeout", metavar="SEC",
+        help="Set a timeout for network connections "
+             "to running suites. The default is 60 seconds.",
+        action="store", default=60, dest="pyro_timeout")
+
     parser.add_option(
-        "--pyro-timeout",
-        metavar="SEC",
-        help=(
-            "Set a timeout for network connections " +
-            "to running suites. The default is 60 seconds."),
-        action="store",
-        default=60,
-        dest="pyro_timeout")
+        "--old", "--old-format",
+        help='Legacy output format ("suite owner host port").',
+        action="store_true", default=False, dest="old_format")
+
+    parser.add_option(
+        "-r", "--raw", "--raw-format",
+        help='Parsable format ("suite|owner|host|property|value")',
+        action="store_true", default=False, dest="raw_format"
+    )
 
     options, args = parser.parse_args()
 
@@ -75,14 +159,200 @@ def main():
         print base, "<= port <=", base + max_num_ports
         sys.exit(0)
 
-    for scan_result in scan_all(args, options.db, options.pyro_timeout):
-        print "%s %s %s %s" % scan_result
+    indent = "   "
+
+    global NO_BOLD
+    if options.no_bold:
+        NO_BOLD = True
+
+    if options.print_full:
+        options.describe = options.print_totals = True
+
+    if options.color:
+        options.print_totals = True
+
+    if options.raw_format and (options.old_format or options.describe or
+                               options.print_totals or options.color):
+        parser.error(
+            "--raw-format cannot be used with other format options.")
+
+    if options.all_suites:
+        if options.patterns_name != []:
+            parser.error("-a and -n are mutually exclusive.")
+        if options.patterns_owner != []:
+            parser.error("-a and -o are mutually exclusive.")
+        patterns_name = ['.*']  # Any name.
+        patterns_owner = ['.*']  # Any owner.
+    else:
+        if options.patterns_name:
+            patterns_name = options.patterns_name
+        else:
+            # Any suite name.
+            patterns_name = ['.*']
+        if options.patterns_owner:
+            patterns_owner = options.patterns_owner
+        else:
+            if options.patterns_name:
+                # Any suite owner.
+                patterns_owner = ['.*']
+            else:
+                # Just the user's suites.
+                patterns_owner = [user]
+    pattern_name = "(" + ")|(".join(patterns_name) + ")"
+    pattern_owner = "(" + ")|(".join(patterns_owner) + ")"
+
+    state_legend = ""
+    if options.color:
+        n_states = len(task_state.legal)
+        for index, state in enumerate(task_state.legal):
+            state_legend += "%s%s%s " % (
+                task_state.ctrl[state], state, task_state.ctrl_end)
+            if index == n_states / 2:
+                state_legend += "\n"
+        state_legend = state_legend.rstrip()
+
+    skip_one = True
+    for result in scan_all(args, options.db, options.pyro_timeout):
+        host, scan_result = result
+        try:
+            port, suite_identity = scan_result
+        except ValueError:
+            # Back-compat (<= 6.5.0 no title or state totals).
+            port, name, owner = scan_result
+            if not (re.match(pattern_name, name) and
+                    re.match(pattern_owner, owner)):
+                continue
+            if options.old_format:
+                print name, owner, host, port
+            elif options.raw_format:
+                print "%s|%s|%s|port|%s" % (name, owner, host, port)
+            else:
+                print "%s %s@%s:%s" % (name, owner, host, port)
+            continue
+        else:
+            name = suite_identity['name']
+            owner = suite_identity['owner']
+
+        if not (re.match(pattern_name, name) and
+                re.match(pattern_owner, owner)):
+            continue
+
+        if options.old_format:
+            print name, owner, host, port
+            continue
+
+        if options.raw_format:
+            print "%s|%s|%s|port|%s" % (name, owner, host, port)
+            for property in ["title", "description", "update-time"]:
+                value = suite_identity.get(property, None)
+                if value:
+                    print "%s|%s|%s|%s|%s" % (
+                        name, owner, host, property,
+                        str(value).replace("\n", " ")
+                    )
+            totals = suite_identity.get('states', None)
+            if totals is None:
+                continue
+            point_state_lines = get_point_state_count_lines(
+                *totals, use_color=options.color)
+            for point, state_line in point_state_lines:
+                property = "states"
+                if point:
+                    property = "states:%s" % point
+                print "%s|%s|%s|%s|%s" % (
+                    name, owner, host, property, state_line)
+            continue
+
+        line = '%s %s@%s:%s' % (name, owner, host, port)
+        if options.describe or options.print_totals:
+            if skip_one:
+                skip_one = False
+                if state_legend != "":
+                    print state_legend + "\n"
+            else:
+                print
+            print bold(line)
+        else:
+            print line
+
+        if options.describe:
+            title = suite_identity.get('title', None)
+            if title is None:
+                print indent + bold("(description and state totals withheld)")
+                continue
+            print indent + bold("Title:")
+            if title == "":
+                line = "(no title)"
+            else:
+                line = '"%s"' % title
+            print indent * 2 + line
+
+            description = suite_identity.get('description', None)
+            print indent + bold("Description:")
+            if description == "":
+                lines = "(no description)"
+            else:
+                lines = '"%s"' % description
+            line1 = True
+            for line in lines.split('\n'):
+                line = line.lstrip()
+                if not line1:
+                    # Indent under the double quote.
+                    line = " " + line
+                line1 = False
+                print indent * 2 + line
+
+        totals = suite_identity.get('states', None)
+        if totals is not None:
+            state_count_totals, state_count_cycles = totals
+
+        if options.print_totals:
+            if totals is None:
+                print indent + bold("(state totals withheld)")
+                continue
+            print indent + bold("Task state totals:")
+            point_state_lines = get_point_state_count_lines(
+                *totals, use_color=options.color)
+            for point, state_line in point_state_lines:
+                point_prefix = ""
+                if point:
+                    point_prefix = "%s " % point
+                print indent * 2 + "%s%s" % (point_prefix, state_line)
+
+
+def get_point_state_count_lines(state_count_totals, state_count_cycles,
+                                use_color=False):
+    """Yield (point, state_summary_text) tuples."""
+    line = ""
+    for state, tot in sorted(state_count_totals.items()):
+        if use_color:
+            line += '%s %d %s' % (
+                task_state.ctrl[state], tot, task_state.ctrl_end)
+        else:
+            line += '%s:%d ' % (state, tot)
+    yield ("", line.strip())
+
+    for point_string in sorted(state_count_cycles.keys()):
+        line = ""
+        for st, tot in sorted(state_count_cycles[point_string].items()):
+            if use_color:
+                line += '%s %d %s' % (
+                        task_state.ctrl[st], tot, task_state.ctrl_end)
+            else:
+                line += '%s:%d ' % (st, tot)
+        yield (point_string, line.strip())
 
 
 def scan_all(hosts=None, reg_db_path=None, pyro_timeout=None):
     """Scan all hosts."""
     if not hosts:
         hosts = GLOBAL_CFG.get(["suite host scanning", "hosts"])
+    # Ensure that it does "localhost" only once
+    hosts = set(hosts)
+    for host in list(hosts):
+        if not is_remote_host(host):
+            hosts.remove(host)
+            hosts.add("localhost")
     proc_pool_size = GLOBAL_CFG.get(["process pool size"])
     if proc_pool_size is None:
         proc_pool_size = cpu_count()
@@ -95,16 +365,22 @@ def scan_all(hosts=None, reg_db_path=None, pyro_timeout=None):
             scan, [host, reg_db_path, pyro_timeout])
     proc_pool.close()
     scan_results = []
+    hosts = []
     while async_results:
         sleep(0.05)
         for host, async_result in async_results.items():
             if async_result.ready():
                 async_results.pop(host)
-                scan_results.extend(async_result.get())
+                try:
+                    res = async_result.get()
+                except:
+                    if cylc.flags.debug:
+                        traceback.print_exc()
+                else:
+                    scan_results.extend(res)
+                    hosts.extend([host] * len(res))
     proc_pool.join()
-    scan_results.sort()
-    return scan_results
-
+    return zip(hosts, scan_results)
 
 if __name__ == "__main__":
     main()
diff --git a/bin/cylc-search b/bin/cylc-search
index 55834d0..bdaa6e0 100755
--- a/bin/cylc-search
+++ b/bin/cylc-search
@@ -16,74 +16,76 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [prep] search|grep [OPTIONS] ARGS
+
+Search for pattern matches in suite definitions and any files in the
+suite bin directory. Matches are reported by line number and suite
+section. An unquoted list of PATTERNs will be converted to an OR'd
+pattern. Note that the order of command line arguments conforms to
+normal cylc command usage (suite name first) not that of the grep
+command.
+
+Note that this command performs a text search on the suite definition,
+it does not search the data structure that results from parsing the
+suite definition - so it will not report implicit default settings.
+
+For case insenstive matching use '(?i)PATTERN'."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
-import os, re
+import os
+import re
 from collections import deque
 from cylc.CylcOptionParsers import cop
 from parsec.include import inline
 
-def section_level( heading ):
+
+def section_level(heading):
     # e.g. foo => 0
     #     [foo] => 1
     #    [[foo]] => 2
-    m = re.match( '^(\[+)', heading )
+    m = re.match('^(\[+)', heading)
     if m:
-        level = len( m.groups()[0] )
+        level = len(m.groups()[0])
     else:
         level = 0
     return level
 
-def print_heading( heading ):
-    print '>>>' + '->'.join(heading)
-    #space = ' '
-    #indent = '>>>'
-    #for i in range( 1, section_level(heading) ):
-    #    indent += space * 2
-    #print indent + heading
-
-parser = cop( """cylc [prep] search|grep [OPTIONS] ARGS
 
-Search for pattern matches in suite definitions and any files in the
-suite bin directory. Matches are reported by line number and suite
-section. An unquoted list of PATTERNs will be converted to an OR'd
-pattern. Note that the order of command line arguments conforms to
-normal cylc command usage (suite name first) not that of the grep
-command.
-
-Note that this command performs a text search on the suite definition,
-it does not search the data structure that results from parsing the
-suite definition - so it will not report implicit default settings.
+def print_heading(heading):
+    print '>>>' + '->'.join(heading)
 
-For case insenstive matching use '(?i)PATTERN'.""", prep=True,
-    argdoc = [('SUITE', 'Suite name or path'),
-        ('PATTERN', 'Python-style regular expression'),
-        ('[PATTERN2...]', 'Additional search patterns')])
+parser = cop(
+    __doc__, prep=True,
+    argdoc=[('SUITE', 'Suite name or path'),
+            ('PATTERN', 'Python-style regular expression'),
+            ('[PATTERN2...]', 'Additional search patterns')])
 
-parser.add_option( "-x", help="Do not search in the suite bin directory",
-        action="store_false", default=True, dest="search_bin" )
+parser.add_option(
+    "-x", help="Do not search in the suite bin directory",
+    action="store_false", default=True, dest="search_bin")
 
-( options, args ) = parser.parse_args()
+(options, args) = parser.parse_args()
 
 suite, suiterc, junk = parser.get_suite()
 
 # cylc search SUITE PATTERN
 pattern = '|'.join(args[1:])
 
-suitedir = os.path.dirname( suiterc )
+suitedir = os.path.dirname(suiterc)
 
-if os.path.isfile( suiterc ):
-    h = open( suiterc, 'rb' )
+if os.path.isfile(suiterc):
+    h = open(suiterc, 'rb')
     lines = h.readlines()
     h.close()
-    lines = inline( lines, suitedir, suiterc, for_grep=True )
+    lines = inline(lines, suitedir, suiterc, for_grep=True)
 else:
-    parser.error("File not found: " + suiterc )
+    parser.error("File not found: " + suiterc)
 
-sections = deque( ['(top)'] )
+sections = deque(['(top)'])
 
 line_count = 1
 inc_file = None
@@ -93,7 +95,7 @@ prev_file = None
 
 for line in lines:
 
-    m = re.match('^#\+\+\+\+ START INLINED INCLUDE FILE ([\w/\.\-]+)', line )
+    m = re.match('^#\+\+\+\+ START INLINED INCLUDE FILE ([\w/\.\-]+)', line)
     if m:
         inc_file = m.groups()[0]
         in_include_file = True
@@ -104,30 +106,30 @@ for line in lines:
         line_count += 1
     else:
         inc_line_count += 1
-        m = re.match('^#\+\+\+\+ END INLINED INCLUDE FILE ' + inc_file, line )
+        m = re.match('^#\+\+\+\+ END INLINED INCLUDE FILE ' + inc_file, line)
         if m:
             in_include_file = False
             inc_file = None
             continue
 
-    m = re.match( '\s*(\[+\s*(.+)\s*\]+)', line )
+    m = re.match('\s*(\[+\s*(.+)\s*\]+)', line)
     if m:
         # new section heading detected
         heading = m.groups()[0]
-        level = section_level( heading )
+        level = section_level(heading)
         name = m.groups()[1]
         # unwind to the current section level
         while len(sections) > level - 1:
             sections.pop()
-        sections.append( heading )
+        sections.append(heading)
         continue
 
-    if re.search( pattern, line ):
+    if re.search(pattern, line):
         # Found a pattern match.
 
         # Print the file name
         if in_include_file:
-            curr_file = os.path.join( suitedir, inc_file )
+            curr_file = os.path.join(suitedir, inc_file)
             line_no = inc_line_count
         else:
             curr_file = suiterc
@@ -138,11 +140,9 @@ for line in lines:
             print "\nFILE:", curr_file
 
         # Print the nested section headings
-        section_key = '->'.join( sections )
+        section_key = '->'.join(sections)
         if section_key != prev_section_key:
             prev_section_key = section_key
-            #for heading in sections:
-            #    print_heading( heading )
             print '   SECTION:', section_key
 
         # Print the pattern match, with line number
@@ -152,22 +152,22 @@ if not options.search_bin:
     sys.exit(0)
 
 # search files in suite bin directory
-bin = os.path.join( suitedir, 'bin' )
-if not os.path.isdir( bin ):
+bin = os.path.join(suitedir, 'bin')
+if not os.path.isdir(bin):
     print >> sys.stderr, "\nSuite " + suite + " has no bin directory"
     sys.exit(0)
 
-for file in os.listdir( bin ):
-    if re.match( '^\.', file):
+for file in os.listdir(bin):
+    if re.match('^\.', file):
         # skip hidden dot-files
         # (e.g. vim editor temporary files)
         continue
     new_file = True
     try:
-        h = open( os.path.join(bin,file), 'rb')
+        h = open(os.path.join(bin, file), 'rb')
     except IOError, x:
         # e.g. there's a sub-directory under bin; ignore it.
-        print >> sys.stderr, 'Unable to open file ' + os.path.join(bin,file)
+        print >> sys.stderr, 'Unable to open file ' + os.path.join(bin, file)
         print >> sys.stderr, x
         continue
     contents = h.readlines()
@@ -177,8 +177,8 @@ for file in os.listdir( bin ):
     for line in contents:
         line = line.rstrip('\n')
         count += 1
-        if re.search( pattern, line ):
+        if re.search(pattern, line):
             if new_file:
-                print '\nFILE:', os.path.join( bin, file )
+                print '\nFILE:', os.path.join(bin, file)
                 new_file = False
-            print '   ('+str(count)+'): ' + line
+            print '   (' + str(count) + '): ' + line
diff --git a/bin/cylc-set-runahead b/bin/cylc-set-runahead
index addd2e3..581ea54 100755
--- a/bin/cylc-set-runahead
+++ b/bin/cylc-set-runahead
@@ -16,56 +16,61 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] set-runahead [OPTIONS] ARGS
+
+Change the suite runahead limit in a running suite. This is the number of
+hours that the fastest task is allowed to get ahead of the slowest. If a
+task spawns beyond that limit it will be held back from running until the
+slowest tasks catch up enough. WARNING: if you omit HOURS no runahead
+limit will be set - DO NOT DO THIS for for any cycling suite that has
+no near stop cycle set and is not constrained by clock-trigger tasks."""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
+    if remrun().execute(force_required=True):
         sys.exit(0)
 
+import cylc.flags
 from cylc.prompt import prompt
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
 from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_pyro
-import cylc.flags
 
-parser = cop("""cylc [control] set-runahead [OPTIONS] ARGS
 
-Change the suite runahead limit in a running suite. This is the number of
-hours that the fastest task is allowed to get ahead of the slowest. If a
-task spawns beyond that limit it will be held back from running until the
-slowest tasks catch up enough. WARNING: if you omit HOURS no runahead
-limit will be set - DO NOT DO THIS for for any cycling suite that has
-no near stop cycle set and is not constrained by clock-triggered
-tasks.""", pyro=True,
-    argdoc=[ ('REG', 'Suite name'),
-        ('[HOURS]', 'Runahead limit (default: no limit)')])
+def main():
+    parser = cop(
+        __doc__, pyro=True,
+        argdoc=[('REG', 'Suite name'),
+                ('[HOURS]', 'Runahead limit (default: no limit)')])
 
-(options, args) = parser.parse_args()
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-suite, pphrase = prep_pyro( args[0], options ).execute()
+    runahead = None
+    if len(args) == 2:
+        runahead = args[1]
 
-runahead = None
-if len(args) == 2:
-    runahead = args[1]
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
 
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
     if runahead:
-        prompt( 'Change runahead limit in ' + suite + ' to ' + runahead, options.force )
-        result = proxy.put( 'set runahead', runahead )
+        prompt('Change runahead limit in %s to %s' % (suite, runahead),
+               options.force)
+        pclient.put_command('set_runahead', runahead)
     else:
         # no limit!
-        prompt( 'Change runahead limit in ' + suite + ' to NO LIMIT', options.force )
-        result = proxy.put( 'set runahead' )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
+        prompt('Change runahead limit in %s to NO LIMIT' % suite,
+               options.force)
+        pclient.put_command('set_runahead')
+
 
-if result[0]:
-    print result[1]
-else:
-    sys.exit( result[1] )
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-set-verbosity b/bin/cylc-set-verbosity
index 0126bdd..3c35cef 100755
--- a/bin/cylc-set-verbosity
+++ b/bin/cylc-set-verbosity
@@ -16,56 +16,67 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] set-verbosity [OPTIONS] ARGS
+
+Change the logging priority level of a running suite.  Only messages at
+or above the chosen priority level will be logged; for example, if you
+choose WARNING, only warnings and critical messages will be logged."""
+
+from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG
 import sys
 
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
+    if remrun().execute(force_required=True):
         sys.exit(0)
 
+import cylc.flags
 from cylc.prompt import prompt
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
 from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_pyro
-import cylc.flags
-from cylc.job_logs import CommandLogger
 
-parser = cop("""cylc [control] set-verbosity [OPTIONS] ARGS
+LOGGING_LVL_OF = {
+    "INFO": INFO,
+    "NORMAL": INFO,
+    "WARNING": WARNING,
+    "ERROR": ERROR,
+    "CRITICAL": CRITICAL,
+    "DEBUG": DEBUG,
+}
 
-Change the logging priority level of a running suite.  Only messages at
-or above the chosen priority level will be logged; for example, if you
-choose WARNING, only warnings and critical messages will be logged.""",
-    pyro=True,
-    argdoc=[
-        ('REG', 'Suite name'),
-        ('LEVEL', ', '.join(CommandLogger.LOGGING_PRIORITY.keys()))
-    ]
-)
 
-(options, args) = parser.parse_args()
+def main():
+    parser = cop(
+        __doc__, pyro=True,
+        argdoc=[
+            ('REG', 'Suite name'),
+            ('LEVEL', ', '.join(LOGGING_LVL_OF.keys()))
+        ]
+    )
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    priority_str = args[1]
+    try:
+        priority = LOGGING_LVL_OF[priority_str]
+    except KeyError:
+        parser.error("Illegal logging level, %s" % priority)
 
-suite, pphrase = prep_pyro(args[0], options).execute()
+    prompt("Set logging level to %s in %s" % (priority_str, suite),
+           options.force)
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
 
-priority_str = args[1]
-try:
-    priority = CommandLogger.LOGGING_PRIORITY[priority_str]
-except KeyError:
-    parser.error("Illegal logging level, %s" % priority)
+    pclient.put_command('set_verbosity', priority)
 
-try:
-    proxy = cylc_pyro_client.client(
-            suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port).get_proxy('command-interface')
-    prompt("Set logging level to " + priority_str + " in " + suite, options.force)
-    res, msg = proxy.put('set verbosity', priority)
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit(x)
 
-if res:
-    print msg
-else:
-    sys.exit(msg)
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-show b/bin/cylc-show
index 8c8e423..2e0a4bc 100755
--- a/bin/cylc-show
+++ b/bin/cylc-show
@@ -16,6 +16,12 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [info] show [OPTIONS] ARGS
+
+Interrogate a suite daemon for the suite title and description; or for the
+title and description of one of its tasks; or for the current state of the
+prerequisites, outputs, and clock-triggering of a specific task instance."""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
     sys.argv.remove('--use-ssh')
@@ -23,35 +29,31 @@ if '--use-ssh' in sys.argv[1:]:
     if remrun().execute():
         sys.exit(0)
 
-from cylc import cylc_pyro_client
+import cylc.flags
+from cylc.network.suite_info import SuiteInfoClient
 from cylc.CylcOptionParsers import cop
 from cylc.task_id import TaskID
-from cylc.command_prep import prep_pyro
-import cylc.flags
 
-parser = cop(
-    """cylc [info] show [OPTIONS] ARGS
 
-Interrogate a suite daemon for the suite title and description; or for the
-title and description of one of its tasks; or for the current state of the
-prerequisites, outputs, and clock-triggering of a specific task instance.""",
-    pyro=True, noforce=True,
-    argdoc=[('REG', 'Suite name'),
-            ('[' + TaskID.SYNTAX_OPT_POINT + ']', 'Task name or ID')])
+def main():
+    parser = cop(
+        __doc__, pyro=True, noforce=True,
+        argdoc=[('REG', 'Suite name'),
+                ('[' + TaskID.SYNTAX_OPT_POINT + ']', 'Task name or ID')])
 
-(options, args) = parser.parse_args()
-suite, pphrase = prep_pyro(args[0], options).execute()
+    (options, args) = parser.parse_args()
+    suite = args[0]
 
-try:
-    proxy = cylc_pyro_client.client(
-        suite, pphrase, options.owner, options.host, options.pyro_timeout,
-        options.port).get_proxy('suite-info')
+    pclient = SuiteInfoClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
 
     if len(args) == 1:
         # Print suite info.
-        suite_info = proxy.get('suite info')
+        suite_info = pclient.get_info('get_suite_info')
         for key, value in sorted(suite_info.items(), reverse=True):
-            print '%s: %s' % (key,value or "(not given)")
+            print '%s: %s' % (key, value or "(not given)")
         sys.exit(0)
 
     point_string = None
@@ -65,14 +67,14 @@ try:
         # Print task instance info.
         task_id = arg
 
-    info = proxy.get('task info', name)
+    info = pclient.get_info('get_task_info', name)
     if not info:
         sys.exit("ERROR: task not found: %s" % name)
     for key, value in sorted(info.items(), reverse=True):
         print "%s: %s" % (key, value or "(not given)")
 
     if point_string is not None:
-        result = proxy.get('task requisites', name, point_string)
+        result = pclient.get_info('get_task_requisites', name, point_string)
         if not result:
             sys.exit("ERROR: task instance not found: %s" % task_id)
 
@@ -105,7 +107,12 @@ try:
                 print '\nother:'
                 for item in extra_info:
                     print '  o ', item, '...', extra_info[item]
-except Exception as exc:
-    if cylc.flags.debug:
-        raise
-    sys.exit(exc)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-started b/bin/cylc-started
deleted file mode 100755
index 002c63c..0000000
--- a/bin/cylc-started
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import os, sys
-from optparse import OptionParser
-from cylc.task_message import message
-import cylc.flags
-
-usage = """cylc [task] started [OPTIONS]
-
-This command is part of the cylc task messaging interface, used by
-running tasks to communicate progress to their parent suite.
-
-The started command reports commencement of task execution. It is
-called automatically by task job scripts.
-
-Suite and task identity are determined from the task execution
-environment supplied by the suite (or by the single task 'submit'
-command, in which case the message is just printed to stdout).
-
-See also:
-    cylc [task] message
-    cylc [task] succeeded
-    cylc [task] failed"""
-
-parser = OptionParser( usage )
-
-parser.add_option( "-v", "--verbose",
-        help="Verbose output mode.",
-        action="store_true", default=False, dest="verbose" )
-
-( options, args ) = parser.parse_args()
-cylc.flags.verbose = options.verbose
-
-if len( args ) != 0:
-    parser.error( "Wrong number of arguments" )
-
-debug = False
-try:
-    # from task execution environment
-    if os.environ['CYLC_DEBUG'] == 'True':
-        debug = True
-except KeyError:
-    pass
-
-# send start message
-try:
-    message().send_started()
-except Exception, x:
-    if debug:
-        raise
-    print >> sys.stderr, 'ERROR: task messaging failure.'
-    raise SystemExit(x)
diff --git a/bin/cylc-stop b/bin/cylc-stop
index 3411bee..c869055 100755
--- a/bin/cylc-stop
+++ b/bin/cylc-stop
@@ -16,44 +16,57 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] stop|shutdown [OPTIONS] ARGS
+
+Tell a running suite daemon to shut down. In order to prevent failures going
+unnoticed, suites only shut down automatically at a final cycle point if no
+failed tasks are present. There are several shutdown methods (note that an
+'active' task is either 'submitted' or 'running'):
+
+  1. (default) stop after current active tasks finish
+  2. (--now) stop immediately, orphaning current active tasks
+  3. (--kill) stop after killing current active tasks
+  4. (with STOP as a cycle point) stop after cycle point STOP
+  5. (with STOP as a task ID) stop after task ID STOP has succeeded
+  6. (--wall-clock=T) stop after time T (an ISO 8601 date-time format e.g.
+     CCYYMMDDThh:mm, CCYY-MM-DDThh, etc).
+
+Tasks that become 'ready' after the shutdown is ordered will remain that way
+until the shutdown; they will submit if the suite is restarted.  Remaining task
+event handlers and job poll and kill commands, however, will be executed prior
+to shutdown, unless --now is used.
+
+This command exits immediately unless --max-polls is greater than zero, in
+which case it polls to wait for suite shutdown."""
+
 import sys
 if '--use-ssh' in sys.argv[1:]:
-    sys.argv.remove( '--use-ssh' )
+    sys.argv.remove('--use-ssh')
     from cylc.remote import remrun
-    if remrun().execute( force_required=True ):
+    if remrun().execute(force_required=True):
         sys.exit(0)
 
+import cylc.flags
 from cylc.prompt import prompt
 from cylc.task_id import TaskID
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
+from cylc.network.suite_info import SuiteInfoClient
 from cylc.CylcOptionParsers import cop
-from cylc.command_prep import prep_pyro
 from cylc.command_polling import poller
-from cylc.port_file import PortFileError
-import cylc.flags
 
-class stop_poller( poller ):
+
+class stop_poller(poller):
     """A polling object that checks if a suite has stopped yet."""
 
-    def connect( self ):
-        try:
-            self.proxy = cylc_pyro_client.client(
-                    self.args['suite'],
-                    self.args['pphrase'],
-                    self.args['owner'],
-                    self.args['host'],
-                    self.args['pyro_timeout'],
-                    self.args['port'] ).get_proxy('suite-info')
-        except Exception, x:
-            print >> sys.stderr, x
-            return False
-        else:
-            return True
+    def load(self):
+        self.pclient = SuiteInfoClient(
+            self.args['suite'], self.args['owner'], self.args['host'],
+            self.args['pyro_timeout'], self.args['port'], self.args['db'])
 
-    def check( self ):
+    def check(self):
         # return True if suite has stopped (success) else False
         try:
-           self.proxy.get( 'ping suite' )
+            self.pclient.get_info('ping_suite')
         except:
             # failed to ping - suite stopped
             return True
@@ -61,141 +74,100 @@ class stop_poller( poller ):
             # pinged - suite must be alive
             return False
 
-parser = cop( """cylc [control] stop|shutdown [OPTIONS] ARGS
 
-Tell a running suite daemon to shut down. In order to prevent failures going
-unnoticed, suites only shut down automatically at a final cycle point if no
-failed tasks are present. There are several shutdown methods (note that an
-'active' task is either 'submitted' or 'running'):
-
-  1. (default) stop after current active tasks finish
-  2. (--now) stop immediately, orphaning current active tasks
-  3. (--kill) stop after killing current active tasks
-  4. (with STOP as a cycle point) stop after cycle point STOP
-  5. (with STOP as a task ID) stop after task ID STOP has succeeded
-  6. (--wall-clock=T) stop after time T (an ISO 8601 date-time format e.g.
-     CCYYMMDDThh:mm, CCYY-MM-DDThh, etc).
+def main():
+    parser = cop(
+        __doc__, pyro=True,
+        argdoc=[("REG", "Suite name"),
+                ("[STOP]", """a/ task POINT (cycle point), or
+                            b/ ISO 8601 date-time (clock time), or
+                            c/ TASK (task ID).""")])
 
-Tasks that become 'ready' after the shutdown is ordered will remain that way
-until the shutdown; they will submit if the suite is restarted.  Remaining task
-event handlers and job poll and kill commands, however, will be executed prior
-to shutdown, unless --now is used.
-
-This command exits immediately unless --max-polls is greater than zero, in
-which case it polls to wait for suite shutdown.""",
-    pyro=True, argdoc=[ ("REG", "Suite name"),
-    ("[STOP]", """a/ task POINT (cycle point), or
-                        b/ ISO 8601 date-time (clock time), or
-                        c/ TASK (task ID).""")] )
-
-parser.add_option( "-k", "--kill",
+    parser.add_option(
+        "-k", "--kill",
         help="Shut down after killing currently active tasks.",
-        action="store_true", default=False, dest="kill" )
+        action="store_true", default=False, dest="kill")
 
-parser.add_option( "-n", "--now",
+    parser.add_option(
+        "-n", "--now",
         help="Shut down immediately, orphaning currently active tasks.",
-        action="store_true", default=False, dest="now" )
+        action="store_true", default=False, dest="now")
 
-parser.add_option( "-w", "--wall-clock", metavar="STOP",
+    parser.add_option(
+        "-w", "--wall-clock", metavar="STOP",
         help="Shut down after time STOP (ISO 8601 formatted)",
-        action="store", dest="wall_clock" )
-
-stop_poller.add_to_cmd_options( parser, d_max_polls=0 )
-
-(options, args) = parser.parse_args()
-
-suite, pphrase = prep_pyro( args[0], options ).execute()
-
-shutdown_at = False
-if len( args ) == 2:
-    shutdown_at = True
-    shutdown_arg = args[1]
-    if options.kill:
-        parser.error("ERROR: --kill is not compatible with [STOP]")
-
-if options.kill and options.now:
-    parser.error( "ERROR: --kill is not compatible with --now" )
-
-if int(options.max_polls) > 0:
-    # (test to avoid the "nothing to do" warning for # --max-polls=0)
-    spoller = stop_poller( "suite stopped", options.interval, options.max_polls,
-        args={
-            'suite'   : suite,
-            'pphrase' : pphrase,
-            'owner'   : options.owner,
-            'host'    : options.host,
-            'pyro_timeout' : options.pyro_timeout,
-            'port'    : options.port
-            })
-try:
-    proxy = cylc_pyro_client.client( suite, pphrase, options.owner,
-            options.host, options.pyro_timeout,
-            options.port ).get_proxy( 'command-interface' )
-except PortFileError, x:
-    print >> sys.stderr, x
-    print "The suite is apparently not running"
-    # (this is OK for the stop command)
-    sys.exit(0)
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
-
-method = None
-if options.wall_clock:
-    method = 'stop after clock time'
-    prompt_text = 'Set shutdown at wall clock %s' % options.wall_clock
-    shutdown_arg = options.wall_clock
-elif shutdown_at:
-    # STOP argument detected
-    if TaskID.is_valid_id(shutdown_arg):
-        # is a task ID
-        method = 'stop after task'
-        prompt_text = 'Set shutdown after task %s' % shutdown_arg
+        action="store", dest="wall_clock")
+
+    stop_poller.add_to_cmd_options(parser, d_max_polls=0)
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    shutdown_at = False
+    if len(args) == 2:
+        shutdown_at = True
+        shutdown_arg = args[1]
+        if options.kill:
+            parser.error("ERROR: --kill is not compatible with [STOP]")
+
+    if options.kill and options.now:
+        parser.error("ERROR: --kill is not compatible with --now")
+
+    pclient = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+
+    if int(options.max_polls) > 0:
+        # (test to avoid the "nothing to do" warning for # --max-polls=0)
+        spoller = stop_poller(
+            "suite stopped", options.interval, options.max_polls,
+            args={
+                'suite': suite,
+                'owner': options.owner,
+                'host': options.host,
+                'pyro_timeout': options.pyro_timeout,
+                'port': options.port,
+                'db': options.db
+            }
+        )
+
+    method = None
+    if options.wall_clock:
+        method = 'set_stop_after_clock_time'
+        prompt_text = 'Set shutdown at wall clock %s' % options.wall_clock
+        shutdown_arg = options.wall_clock
+    elif shutdown_at:
+        # STOP argument detected
+        if TaskID.is_valid_id(shutdown_arg):
+            # is a task ID
+            method = 'set_stop_after_task'
+            prompt_text = 'Set shutdown after task %s' % shutdown_arg
+        else:
+            # not a task ID, may be a cycle point
+            method = 'set_stop_after_point'
+            prompt_text = 'Set shutdown at cycle point %s' % shutdown_arg
+
+    if method:
+        prompt(prompt_text + ' for ' + suite, options.force)
+        pclient.put_command(method, shutdown_arg)
+    elif options.now:
+        prompt('Shut down %s now' % suite, options.force)
+        pclient.put_command('stop_now')
     else:
-        # not a task ID, may be a cycle point
-        method = 'stop after point'
-        prompt_text = 'Set shutdown at cycle point %s' % shutdown_arg
+        prompt('Shut down %s' % suite, options.force)
+        pclient.put_command('set_stop_cleanly', options.kill)
 
-if method:
-    prompt( prompt_text + ' for ' + suite, options.force )
-    try:
-        result = proxy.put( method, shutdown_arg )
-    except Exception,x:
-        if cylc.flags.debug:
-            raise
-        sys.exit(x)
+    if int(options.max_polls) > 0:
+        # (test to avoid the "nothing to do" warning for # --max-polls=0)
+        spoller.load()
+        if not spoller.poll():
+            sys.exit(1)
 
-elif options.now:
-    prompt( 'Shut down %s now' % suite, options.force )
-    try:
-        result = proxy.put( 'stop now' )
-    except Exception,x:
-        if cylc.flags.debug:
-            raise
-        sys.exit(x)
 
-else:
-    prompt( 'Shut down %s' % suite, options.force )
-    print "Telling the suite to shut down ..."
+if __name__ == "__main__":
     try:
-        result = proxy.put( 'stop cleanly', options.kill )
-    except Exception,x:
+        main()
+    except Exception as exc:
         if cylc.flags.debug:
             raise
-        sys.exit(x)
-
-if result[0]:
-    # command queued
-    print result[1]
-else:
-    # (should not happen)
-    sys.exit( result[1] )
-
-if int(options.max_polls) > 0:
-    # (test to avoid the "nothing to do" warning for # --max-polls=0)
-    if not spoller.connect():
-        print >> sys.stderr, "Could not connect to suite: it has apparently shut down already"
-        sys.exit(0)
-    if not spoller.poll():
-        sys.exit(1)
+        sys.exit(exc)
diff --git a/bin/cylc-submit b/bin/cylc-submit
index 6735214..68c10b5 100755
--- a/bin/cylc-submit
+++ b/bin/cylc-submit
@@ -16,6 +16,19 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [task] submit|single [OPTIONS] ARGS
+
+Submit a single task to run just as it would be submitted by its suite.  Task
+messaging commands will print to stdout but will not attempt to communicate
+with the suite (which does not need to be running).
+
+For tasks present in the suite graph the given cycle point is adjusted up to
+the next valid cycle point for the task. For tasks defined under runtime but
+not present in the graph, the given cycle point is assumed to be valid.
+
+WARNING: do not 'cylc submit' a task that is running in its suite at the
+same time - both instances will attempt to write to the same job logs."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
@@ -24,44 +37,31 @@ if remrun().execute():
 import os
 import subprocess
 from time import sleep
+
+import cylc.flags
 from parsec.OrderedDict import OrderedDict
 from cylc.execute import execute
 from cylc.job_file import JOB_FILE
-from cylc.config import config, SuiteConfigError, TaskNotDefinedError
+from cylc.config import SuiteConfig
 from cylc.CylcOptionParsers import cop
+from cylc.registration import localdb
 from cylc.task_id import TaskID
+from cylc.get_task_proxy import get_task_proxy
 from cylc.cycling.loader import get_point
 from cylc.job_host import RemoteJobHostManager
 from cylc.suite_host import get_suite_host, get_hostname
 from cylc.regpath import RegPath
-from cylc.command_prep import prep_file
 from cylc.suite_logging import suite_log
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
-import cylc.flags
-from cylc.mp_pool import SuiteProcPool, CMD_TYPE_JOB_SUBMISSION
-
-usage = """cylc [task] submit|single [OPTIONS] ARGS
-
-Submit a single task to run just as it would be submitted by its suite.  Task
-messaging commands will print to stdout but will not attempt to communicate
-with the suite (which does not need to be running).
-
-For tasks present in the suite graph the given cycle point is adjusted up to
-the next valid cycle point for the task. For tasks defined under runtime but
-not present in the graph, the given cycle point is assumed to be valid.
-
-WARNING: do not 'cylc submit' a task that is running in its suite at the
-same time - both instances will attempt to write to the same job logs."""
+from cylc.mp_pool import SuiteProcPool
+import cylc.version  # Ensures '$CYLC_VERSION' is set.
 
 
 def commandline_parser():
     parser = cop(
-        usage, jset=True,
-        argdoc=[
-            ("REG", "Suite name"),
-            ("TASK", "Target task (" + TaskID.SYNTAX + ")"),
-        ]
-    )
+        __doc__, jset=True,
+        argdoc=[("REG", "Suite name"),
+                ("TASK", "Target task (" + TaskID.SYNTAX + ")")])
 
     parser.set_defaults(sched=False, dry_run=False)
 
@@ -72,122 +72,128 @@ def commandline_parser():
 
     return parser
 
-# parse command line options and arguments-----------------------------
-parser = commandline_parser()
-(options, args) = parser.parse_args()
 
-suite, suiterc = prep_file(args[0], options).execute()
+def main():
+    parser = commandline_parser()
+    (options, args) = parser.parse_args()
+    if options.debug:
+        cylc.flags.debug = True
+    suite = args[0]
+    suiterc = localdb(options.db).get_suiterc(suite)
 
-owner = options.owner
-host = options.host
+    owner = options.owner
+    host = options.host
 
-suite_env = {}
-suite_task_env = {}
+    suite_env = {}
+    suite_task_env = {}
 
-task_id = args[1]
+    task_id = args[1]
 
-suite_dir = os.path.dirname(suiterc)
-# For user-defined job submission methods:
-sys.path.append(os.path.join(suite_dir, 'python'))
+    suite_dir = os.path.dirname(suiterc)
+    # For user-defined job submission methods:
+    sys.path.append(os.path.join(suite_dir, 'python'))
 
-# check task
-if not TaskID.is_valid_id(task_id):
-    sys.exit("Invalid task ID " + task_id)
+    # check task
+    if not TaskID.is_valid_id(task_id):
+        sys.exit("Invalid task ID " + task_id)
 
-task_name, point_string = TaskID.split(task_id)
+    task_name, point_string = TaskID.split(task_id)
 
-# load suite config
-try:
-    config = config(
+    # load suite config
+    config = SuiteConfig.get_inst(
         suite, suiterc, template_vars=options.templatevars,
         template_vars_file=options.templatevars_file)
-except Exception as exc:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(exc)
-
-# No TASK EVENT HOOKS are set for the submit command because there is
-# no scheduler instance watching for task failure etc.
-
-# Running in UTC time? (else just use the system clock)
-utc = config.cfg['cylc']['UTC mode']
-
-# create log (after CYLC_MODE is exported)
-os.environ['CYLC_MODE'] = 'submit'
-
-GLOBAL_CFG.create_cylc_run_tree(suite)
-slog = suite_log(suite)
-suite_log_dir = slog.get_dir()
-slog.pimp()
-
-RemoteJobHostManager.get_inst().single_task_mode = True
-
-ict = config.cfg['scheduling']['initial cycle point']
-fct = config.cfg['scheduling']['final cycle point']
-
-# static cylc and suite-specific variables:
-suite_env = {
-    'CYLC_UTC': str(utc),
-    'CYLC_MODE': 'submit',
-    'CYLC_DEBUG': str(cylc.flags.debug),
-    'CYLC_VERBOSE': str(cylc.flags.verbose),
-    'CYLC_DIR_ON_SUITE_HOST': os.environ['CYLC_DIR'],
-    'CYLC_SUITE_NAME': suite,
-    'CYLC_SUITE_REG_NAME': suite,  # DEPRECATED
-    'CYLC_SUITE_HOST': str(get_suite_host()),
-    'CYLC_SUITE_OWNER': owner,
-    'CYLC_SUITE_PORT': 'None',
-    'CYLC_SUITE_REG_PATH': RegPath(suite).get_fpath(),  # DEPRECATED
-    'CYLC_SUITE_DEF_PATH_ON_SUITE_HOST': suite_dir,
-    'CYLC_SUITE_INITIAL_CYCLE_POINT': str(ict),  # may be "None"
-    'CYLC_SUITE_FINAL_CYCLE_POINT': str(fct),  # may be "None"
-    'CYLC_SUITE_INITIAL_CYCLE_TIME': str(ict),  # may be "None"
-    'CYLC_SUITE_FINAL_CYCLE_TIME': str(fct),  # may be "None"
-    'CYLC_SUITE_LOG_DIR': suite_log_dir  # needed by the test battery
+
+    # No TASK EVENT HOOKS are set for the submit command because there is
+    # no scheduler instance watching for task failure etc.
+
+    # Running in UTC time? (else just use the system clock)
+    utc = config.cfg['cylc']['UTC mode']
+
+    # create log (after CYLC_MODE is exported)
+    os.environ['CYLC_MODE'] = 'submit'
+
+    GLOBAL_CFG.create_cylc_run_tree(suite)
+    slog = suite_log(suite)
+    suite_log_dir = slog.get_dir()
+    slog.pimp()
+
+    RemoteJobHostManager.get_inst().single_task_mode = True
+
+    ict = config.cfg['scheduling']['initial cycle point']
+    fct = config.cfg['scheduling']['final cycle point']
+
+    # static cylc and suite-specific variables:
+    suite_env = {
+        'CYLC_UTC': str(utc),
+        'CYLC_MODE': 'submit',
+        'CYLC_DEBUG': str(cylc.flags.debug),
+        'CYLC_VERBOSE': str(cylc.flags.verbose),
+        'CYLC_DIR_ON_SUITE_HOST': os.environ['CYLC_DIR'],
+        'CYLC_SUITE_NAME': suite,
+        'CYLC_SUITE_REG_NAME': suite,  # DEPRECATED
+        'CYLC_SUITE_HOST': str(get_suite_host()),
+        'CYLC_SUITE_OWNER': owner,
+        'CYLC_SUITE_PORT': 'None',
+        'CYLC_SUITE_REG_PATH': RegPath(suite).get_fpath(),  # DEPRECATED
+        'CYLC_SUITE_DEF_PATH_ON_SUITE_HOST': suite_dir,
+        'CYLC_SUITE_INITIAL_CYCLE_POINT': str(ict),  # may be "None"
+        'CYLC_SUITE_FINAL_CYCLE_POINT': str(fct),  # may be "None"
+        'CYLC_SUITE_INITIAL_CYCLE_TIME': str(ict),  # may be "None"
+        'CYLC_SUITE_FINAL_CYCLE_TIME': str(fct),  # may be "None"
+        'CYLC_SUITE_LOG_DIR': suite_log_dir  # needed by the test battery
     }
 
-# Note: a suite contact env file is not written by this command (it
-# would overwrite the real one if the suite is running).
-
-# Set local values of variables that are potenitally task-specific
-# due to different directory paths on different task hosts. These
-# are overridden by tasks prior to job submission, but in
-# principle they could be needed locally by event handlers:
-suite_task_env = {
-    'CYLC_SUITE_RUN_DIR': GLOBAL_CFG.get_derived_host_item(
-        suite, 'suite run directory'),
-    'CYLC_SUITE_WORK_DIR': GLOBAL_CFG.get_derived_host_item(
-        suite, 'suite work directory'),
-    'CYLC_SUITE_SHARE_DIR': GLOBAL_CFG.get_derived_host_item(
-        suite, 'suite share directory'),
-    'CYLC_SUITE_SHARE_PATH': '$CYLC_SUITE_SHARE_DIR',  # DEPRECATED
-    'CYLC_SUITE_DEF_PATH': suite_dir
+    # Note: a suite contact env file is not written by this command (it
+    # would overwrite the real one if the suite is running).
+
+    # Set local values of variables that are potenitally task-specific
+    # due to different directory paths on different task hosts. These
+    # are overridden by tasks prior to job submission, but in
+    # principle they could be needed locally by event handlers:
+    suite_task_env = {
+        'CYLC_SUITE_RUN_DIR': GLOBAL_CFG.get_derived_host_item(
+            suite, 'suite run directory'),
+        'CYLC_SUITE_WORK_DIR': GLOBAL_CFG.get_derived_host_item(
+            suite, 'suite work directory'),
+        'CYLC_SUITE_SHARE_DIR': GLOBAL_CFG.get_derived_host_item(
+            suite, 'suite share directory'),
+        'CYLC_SUITE_SHARE_PATH': '$CYLC_SUITE_SHARE_DIR',  # DEPRECATED
+        'CYLC_SUITE_DEF_PATH': suite_dir
     }
-# (note GLOBAL_CFG automatically expands environment variables in local paths)
+    # (GLOBAL_CFG automatically expands environment variables in local paths)
 
-JOB_FILE.set_suite_env(suite_env)
+    JOB_FILE.set_suite_env(suite_env)
 
-point = get_point(point_string).standardise()
-try:
+    point = get_point(point_string).standardise()
     # Try to get a graphed task of the given name.
-    task_proxy = config.get_task_proxy(
+    task_proxy = get_task_proxy(
         task_name, point.standardise(), 'waiting', is_startup=True)
-except TaskNotDefinedError, x:
-    raise SystemExit(
-        'ERROR: task "%s" runtime is not defined, aborting.' % task_name)
-
-if not options.dry_run:
-    proc_pool = SuiteProcPool.get_inst(pool_size=1)
-
-if options.dry_run:
-    print "JOB SCRIPT=%s" % task_proxy.submit(dry_run=True)
-else:
-    task_proxy.submit()
-    while proc_pool.unhandled_results:
-        proc_pool.handle_results_async()
-    proc_pool.close()
-    proc_pool.join()
-    if task_proxy.submit_method_id is not None:
-        print 'Job ID:', task_proxy.submit_method_id
-
-sys.exit(task_proxy.state.get_status() == "submit-failed")
+
+    if not options.dry_run:
+        proc_pool = SuiteProcPool.get_inst(pool_size=1)
+
+    if options.dry_run:
+        task_proxy.prep_submit(dry_run=True)
+        print "JOB SCRIPT=%s" % task_proxy.job_conf['job file path']
+    else:
+        if task_proxy.prep_submit() is None:
+            sys.exit(1)
+        task_proxy.submit()
+        while proc_pool.results:
+            proc_pool.handle_results_async()
+        proc_pool.close()
+        proc_pool.join()
+        if task_proxy.submit_method_id is not None:
+            print 'Job ID:', task_proxy.submit_method_id
+
+    sys.exit(task_proxy.state.get_status() == "submit-failed")
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-succeeded b/bin/cylc-succeeded
deleted file mode 100755
index 8228e14..0000000
--- a/bin/cylc-succeeded
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import os, sys
-from optparse import OptionParser
-from cylc.task_message import message
-import cylc.flags
-
-usage = """cylc [task] succeeded [OPTIONS]
-
-This command is part of the cylc task messaging interface, used by
-running tasks to communicate progress to their parent suite.
-
-The succeeded command reports successful completion of task execution.
-It is called automatically by task job scripts, except in the case of
-"detaching tasks" which must do their own completion messaging.
-
-Suite and task identity are determined from the task execution
-environment supplied by the suite (or by the single task 'submit'
-command, in which case the message is just printed to stdout).
-
-See also:
-    cylc [task] message
-    cylc [task] started
-    cylc [task] failed"""
-
-parser = OptionParser( usage )
-
-parser.add_option( "-v", "--verbose",
-        help="Verbose output mode.",
-        action="store_true", default=False, dest="verbose" )
-
-( options, args ) = parser.parse_args()
-cylc.flags.verbose = options.verbose
-
-if len( args ) != 0:
-    parser.error( "Wrong number of arguments" )
-
-debug = False
-try:
-    # from task execution environment
-    if os.environ['CYLC_DEBUG'] == 'True':
-        debug = True
-except KeyError:
-    pass
-
-# send succeeded message
-try:
-    message().send_succeeded()
-except Exception, x:
-    print >> sys.stderr, 'ERROR: task messaging failure.'
-    if debug:
-        raise
-    raise SystemExit(x)
diff --git a/bin/cylc-suite-state b/bin/cylc-suite-state
index 1f86750..1401ccb 100755
--- a/bin/cylc-suite-state
+++ b/bin/cylc-suite-state
@@ -16,7 +16,36 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys
+"""cylc suite-state REG [OPTIONS]
+
+Print task states retrieved from a suite database; or (with --task,
+--point, and --status) poll until a given task reaches a given state.
+Polling is configurable with --interval and --max-polls; for a one-off
+check use --max-polls=1. The suite database does not need to exist at
+the time polling commences but allocated polls are consumed waiting for
+it (consider max-polls*interval as an overall timeout).
+
+Note for non-cycling tasks --point=1 must be provided.
+
+For your own suites the database location is determined by your
+site/user config. For other suites, e.g. those owned by others, or
+mirrored suite databases, use --run-dir=DIR to specify the location.
+
+Example usages:
+  cylc suite-state REG --task=TASK --cycle=CYCLE --status=STATUS
+returns 0 if TASK.CYCLE reaches STATUS before the maximum number of
+polls, otherwise returns 1.
+
+  cylc suite-state REG --task=TASK --cycle=CYCLE --status=STATUS --offset=PT6H
+adds 6 hours to the value of CYCLE for carrying out the polling operation.
+
+  cylc suite-state REG --task=TASK --status=STATUS --task-point
+uses CYLC_TASK_CYCLE_POINT environment variable as the value for the CYCLE
+to poll. This is useful when you want to use cylc suite-state in a cylc task.
+"""
+
+import os
+import sys
 from time import sleep, time
 from cylc.remote import remrun
 from cylc.task_state import task_state
@@ -25,15 +54,19 @@ if remrun().execute():
     sys.exit(0)
 
 from cylc.CylcOptionParsers import cop
-from cylc.dbstatecheck import CylcSuiteDBChecker, DBNotFoundError, DBOperationError
+from cylc.dbstatecheck import (
+    CylcSuiteDBChecker, DBNotFoundError, DBOperationError)
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.command_polling import poller
 import cylc.flags
 
-class suite_poller( poller ):
+from isodatetime.parsers import TimePointParser, DurationParser
+
+
+class suite_poller(poller):
     """A polling object that checks suite state."""
 
-    def connect( self ):
+    def connect(self):
         """Connect to the suite db, polling if necessary in case the
         suite has not been started up yet."""
 
@@ -42,8 +75,9 @@ class suite_poller( poller ):
         connected = False
 
         if cylc.flags.verbose:
-            sys.stdout.write("connecting to suite db for " +
-                  self.args['run_dir'] + "/" + self.args['suite'])
+            sys.stdout.write(
+                "connecting to suite db for " +
+                self.args['run_dir'] + "/" + self.args['suite'])
 
         # Attempt db connection even if no polls for condition are
         # requested, as failure to connect is useful information.
@@ -53,11 +87,12 @@ class suite_poller( poller ):
         while not connected:
             self.n_polls += 1
             try:
-                self.checker = CylcSuiteDBChecker(self.args['run_dir'], self.args['suite'])
+                self.checker = CylcSuiteDBChecker(
+                    self.args['run_dir'], self.args['suite'])
                 connected = True
                 # ... but ensure at least one poll after connection:
                 self.n_polls -= 1
-            except DBOperationError:
+            except (DBOperationError, DBNotFoundError):
                 if self.n_polls >= max_polls:
                     raise
                 if cylc.flags.verbose:
@@ -67,7 +102,7 @@ class suite_poller( poller ):
             sys.stdout.write('\n')
         return connected
 
-    def check( self ):
+    def check(self):
         # return True if desired suite state achieved, else False
         if self.checker.task_state_met(
                 self.args['task'],
@@ -77,93 +112,160 @@ class suite_poller( poller ):
         else:
             return False
 
-parser = cop( usage = """cylc suite-state REG [OPTIONS]
-
-Print task states retrieved from a suite database; or (with --task,
---point, and --status) poll until a given task reaches a given state.
-Polling is configurable with --interval and --max-polls; for a one-off
-check use --max-polls=1. The suite database does not need to exist at
-the time polling commences but allocated polls are consumed waiting for
-it (consider max-polls*interval as an overall timeout).
-
-Note for non-cycling tasks --point=1 must be provided.
 
-For your own suites the database location is determined by your
-site/user config. For other suites, e.g. those owned by others, or
-mirrored suite databases, use --run-dir=DIR to specify the location.
-
-Example usage:
-  cylc suite-state REG --task=TASK --cycle=CYCLE --status=STATUS
-returns 0 if TASK.CYCLE reaches STATUS before the maximum number of
-polls, otherwise returns 1.""")
+def main():
+    parser = cop(__doc__)
 
-parser.add_option( "-t", "--task",
-        help="Specify a task to check the state of.",
-        action="store", dest="task", default=None )
+    parser.add_option(
+        "-t", "--task", help="Specify a task to check the state of.",
+        action="store", dest="task", default=None)
 
-parser.add_option( "-c", "--cycle",
+    parser.add_option(
+        "-c", "--cycle",
         help="Deprecated. Specify the cycle to check task states for.",
-        action="store", dest="cycle", default=None )
+        action="store", dest="cycle", default=None)
 
-parser.add_option( "-p", "--point",
+    parser.add_option(
+        "-p", "--point",
         help="Specify the cycle point to check task states for.",
-        action="store", dest="cycle", default=None )
+        action="store", dest="cycle", default=None)
+
+    parser.add_option(
+        "-T", "--task-point",
+        help="Use the CYLC_TASK_CYCLE_POINT environment variable as the "
+             "cycle to check task states for. "
+             "Shorthand for --cycle=$CYLC_TASK_CYCLE_POINT",
+        action="store_true", dest="use_task_point", default=False)
 
-parser.add_option( "-d", "--run-dir",
+    parser.add_option(
+        "--template", metavar="TEMPLATE",
+        help="Cyclepoint template string, used to reformat cyclepoints for "
+             "querying suites",
+        action="store", dest="template")
+
+    parser.add_option(
+        "-d", "--run-dir",
         help="The top level cylc run directory if non-standard. The "
-        "database should be DIR/REG/cylc-suite.db. Use to interrogate "
-        "suites owned by others, etc.; see note above.",
-        metavar="DIR", action="store", dest="run_dir", default=None )
+             "database should be DIR/REG/cylc-suite.db. Use to interrogate "
+             "suites owned by others, etc.; see note above.",
+        metavar="DIR", action="store", dest="run_dir", default=None)
+
+    parser.add_option(
+        "-s", "--offset",
+        help="Specify an offset to add to the targetted cycle point",
+        action="store", dest="offset", default=None)
+
+    conds = ("Valid triggering conditions to check for include: '" +
+             ("', '").join(
+                 sorted(CylcSuiteDBChecker.STATE_ALIASES.keys())[:-1]) +
+             "' and '" + sorted(
+                 CylcSuiteDBChecker.STATE_ALIASES.keys())[-1] + "'. ")
+    states = ("Valid states to check for include: '" +
+              ("', '").join(sorted(task_state.legal)[:-1]) + "' and '" +
+              sorted(task_state.legal)[-1] + "'.")
+
+    parser.add_option(
+        "-S", "--status",
+        help="Specify a particular status or triggering condition to "
+             "check for. " + conds + states,
+        action="store", dest="status", default=None)
+
+    suite_poller.add_to_cmd_options(parser)
+    (options, args) = parser.parse_args(remove_opts=["--db", "--debug"])
 
-conds = ("Valid triggering conditions to check for include: '" +
-         ("', '").join(sorted(CylcSuiteDBChecker.STATE_ALIASES.keys())[:-1]) +
-         "' and '" + sorted(CylcSuiteDBChecker.STATE_ALIASES.keys())[-1] + "'. ")
-states = ("Valid states to check for include: '" +
-         ("', '").join(sorted(task_state.legal)[:-1]) + "' and '" +
-         sorted(task_state.legal)[-1] + "'.")
+    suite = args[0]
 
-parser.add_option( "-S", "--status",
-        help="Specify a particular status or triggering condition to check for. " + conds + states,
-        action="store", dest="status", default=None )
+    if options.use_task_point and options.cycle:
+        sys.exit(
+            "ERROR: cannot specify a cycle point and use environment variable")
 
-suite_poller.add_to_cmd_options(parser)
-( options, args ) = parser.parse_args( remove_opts=["--db", "--debug"] )
+    if options.use_task_point:
+        if "CYLC_TASK_CYCLE_POINT" in os.environ:
+            options.cycle = os.environ["CYLC_TASK_CYCLE_POINT"]
+        else:
+            sys.exit("ERROR: CYLC_TASK_CYCLE_POINT is not defined")
+
+    if options.offset and not options.cycle:
+        sys.exit("ERROR: You must target a cycle point to use an offset")
+
+    # Attempt to apply specified offset to the targetted cycle
+    if options.offset:
+        my_parser = TimePointParser()
+        my_target_point = my_parser.parse(options.cycle, dump_as_parsed=True)
+        my_offset_parser = DurationParser()
+
+        oper = "+"
+        if options.offset.startswith("-") or options.offset.startswith("+"):
+            oper = options.offset[0]
+            options.offset = options.offset[1:]
+        if options.offset.startswith("P"):
+            try:
+                my_shift = my_offset_parser.parse(options.offset)
+            except ValueError:
+                sys.exit("ERROR: Cannot parse offset: %s" % options.offset)
+            if oper == "-":
+                my_target_point -= my_shift
+            else:
+                my_target_point += my_shift
+        else:
+            sys.exit("ERROR: Unrecognised offset: %s" % options.offset)
+
+        options.cycle = str(my_target_point)
 
-suite = args[0]
+    # Reformat cycle point for querying targetted suite
+    if options.template:
+        if options.cycle:
+            my_parser = TimePointParser()
+            my_target_point = my_parser.parse(options.cycle,
+                                              dump_format=options.template)
+            options.cycle = str(my_target_point)
+        else:
+            sys.exit("ERROR: No cyclepoint to reformat using template")
 
-# this only runs locally (use of --host or --user results in remote re-invocation).
-run_dir = os.path.expandvars( \
-          os.path.expanduser( \
-          options.run_dir or GLOBAL_CFG.get_host_item( 'run directory' )))
+    # this only runs locally (use of --host or --user results in remote
+    # re-invocation).
+    run_dir = os.path.expandvars(
+        os.path.expanduser(
+            options.run_dir or GLOBAL_CFG.get_host_item('run directory')))
 
-spoller = suite_poller( "requested state",
+    spoller = suite_poller(
+        "requested state",
         options.interval, options.max_polls,
-        args={
-            'suite'   : suite,
-            'run_dir' : run_dir,
-            'task'    : options.task,
-            'cycle'   : options.cycle,
-            'status'  : options.status
-            })
-
-if not spoller.connect():
-    sys.exit( "ERROR: cannot connect to the suite DB" )
-
-if options.status:
-    if options.status in task_state.legal or options.status in CylcSuiteDBChecker.STATE_ALIASES:
-        pass
+        args={'suite': suite,
+              'run_dir': run_dir,
+              'task': options.task,
+              'cycle': options.cycle,
+              'status': options.status
+              })
+
+    if not spoller.connect():
+        sys.exit("ERROR: cannot connect to the suite DB")
+
+    if options.status:
+        if (options.status in task_state.legal or options.status in
+                CylcSuiteDBChecker.STATE_ALIASES):
+            pass
+        else:
+            sys.exit("ERROR: invalid status '" + options.status + "'")
+
+    if options.status and options.task and options.cycle:
+        """check a task status"""
+        spoller.condition = options.status
+        if not spoller.poll():
+            sys.exit(1)
     else:
-        sys.exit( "ERROR: invalid status '" + options.status + "'" )
-
-if options.status and options.task and options.cycle:
-    """check a task status"""
-    spoller.condition = options.status
-    if not spoller.poll():
-        sys.exit(1)
-else:
-    """just display query results"""
-    spoller.checker.display_maps(
+        """just display query results"""
+        spoller.checker.display_maps(
             spoller.checker.suite_state_query(
                 task=options.task,
                 cycle=options.cycle,
                 status=options.status))
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-test-battery b/bin/cylc-test-battery
index ce760d7..7c1af41 100755
--- a/bin/cylc-test-battery
+++ b/bin/cylc-test-battery
@@ -16,9 +16,11 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+set -u
+
 usage() {
   cat <<eof
-USAGE: cylc test-battery [OPTIONS] [FILES or DIRECTORIES] -- [prove OPTIONS]
+Usage: cylc test-battery [...]
 
 Run automated cylc and parsec tests under [FILES or DIRECTORIES].
 Test locations default to the following directory tree:
@@ -34,45 +36,47 @@ Requirements:
   * Passwordless ssh must be configured to task host accounts.
   * Some test suites submit jobs to 'at' so atd must be running.
 
-Suite run directories are cleaned up on the suite host (but not on
-remote task hosts) for passing tests - otherwise they are left alone.
+Options and arguments are appended to the "prove -j \$NPROC -s -r \${@:-tests}"
+command, where NPROC is the number of child processes that can be used to run
+the test files.
+
+The command normally uses the "process pool size" setting (default=4) in the
+site/user global configuration file to determine the number of tests to run in
+parallel. You can also change the amount of concurrency with the "-j N" option.
 
-By default the command uses the number of processors in your system to
-determine the number of tests to run in parallel. This may be anti-social
-on systems shared with many other users. On slow or loaded
-machines this may result in some test failures due to timeouts intended
-to catch problems that can prevent a suite from shutting down normally.
-In such cases, you can change the amount of concurrency by setting either:
-  * the "-j N" option, or
-  * the "process pool size = N" option in the site/user global
-    configuration.
+Suite run directories are cleaned up on the suite host for passing tests -
+otherwise they are left alone.
 
-To output stderr from failed tests to the terminal, "export CYLC_TEST_DEBUG=true"
-before running this command. By default, it uses "diff -u" to compare files.
-However, if an alternate command such as "xxdiff -D" is desirable (e.g. for
-debugging), "export CYLC_TEST_DIFF_CMD=xxdiff -D".
+To output stderr from failed tests to the terminal,
+"export CYLC_TEST_DEBUG=true" before running this command.
+
+The command normally uses "diff -u" to compare files.  However, if an alternate
+command such as "xxdiff -D" is desirable (e.g. for debugging), "export
+CYLC_TEST_DIFF_CMD=xxdiff -D".
 
 For more information see "Reference Tests" in the User Guide.
 
 Options:
   -h, --help   Print this help message and exit.
 
-Options after '--' are passed through to "prove".
-E.g. to run N tests at once, in parallel:
-  cylc test-battery -- -j N      
+Examples:
+
+Run the full test suite with the default options.
+  cylc test-battery
+Run the full test suite with 12 processes.
+  cylc test-battery- j 12
+Run only tests under "tests/cyclers/" with 12 processes.
+  cylc test-battery -j 12 tests/cyclers
+Run only "tests/cyclers/16-weekly.t" in verbose mode
+  cylc test-battery -v tests/cyclers/16-weekly.t
 eof
 }
 
 TESTS=""
 for ARG in "$@"; do
-    shift
-    if [[ "$ARG" == '--' ]]; then
-        break
-    elif [[ "$ARG" == '--help' ]]; then
+    if [[ "$ARG" == '--help' || "$ARG" == '-h' ]]; then
         usage
         exit 0
-    else
-        TESTS="$TESTS $ARG"
     fi
 done
 
@@ -80,13 +84,20 @@ if [[ "$PWD" != "$CYLC_DIR" ]]; then
     echo "[INFO] cd \"$CYLC_DIR\""
     cd "$CYLC_DIR"
 fi
+
+# Recompile *.pyc files to ensure we are running the current code.
+if [[ -w "$CYLC_DIR/lib" ]]; then
+    find "$CYLC_DIR/lib" -name '*.pyc' -type 'f' -delete
+    python -mcompileall -q "$CYLC_DIR/lib"
+fi
+
 if perl -e 'use Test::Harness 3.00' 2>/dev/null; then
     NPROC=$(cylc get-global-config '--item=process pool size')
     if [[ -z "${NPROC}" ]]; then
         NPROC=$(python -c 'import multiprocessing as mp; print mp.cpu_count()')
     fi
-    exec prove -j "$NPROC" -s -r ${TESTS:-tests}
+    exec prove -j "$NPROC" -s -r ${@:-tests}
 else
     echo "WARNING: cannot run tests in parallel (Test::Harness < 3.00)" >&2
-    exec prove -s -r ${TESTS:-tests}
+    exec prove -s -r ${@:-tests}
 fi
diff --git a/bin/cylc-test-db b/bin/cylc-test-db
index 842642a..37e3f6c 100755
--- a/bin/cylc-test-db
+++ b/bin/cylc-test-db
@@ -19,7 +19,7 @@
 set -e
 
 usage() {
-    echo "USAGE: cylc [admin] test-db [--help]"
+    echo "Usage: cylc [admin] test-db [--help]"
     echo "A thorough test of suite registration database functionality."
     echo "Options:"
     echo "  --help   Print this usage message."
diff --git a/bin/cylc-trigger b/bin/cylc-trigger
index a57d3ca..f16e873 100755
--- a/bin/cylc-trigger
+++ b/bin/cylc-trigger
@@ -16,6 +16,18 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [control] trigger [OPTIONS] ARGS
+
+Manually trigger one or more tasks. Waiting tasks will be queued (cylc internal
+queues) and will submit as normal when released by the queue; queued tasks will
+submit immediately even if that violates the queue limit (so you may need to
+trigger a queue-limited task twice to get it to submit).
+
+For single tasks you can use "--edit" to edit the generated job script before
+it submits, to apply one-off changes. A diff between the original and edited
+job script will be saved to the task job log directory.
+"""
+
 import sys
 
 if '--host' in sys.argv[1:] and '--edit' in sys.argv[1:]:
@@ -35,149 +47,159 @@ import time
 import shutil
 import difflib
 import subprocess
+
+import cylc.flags
 from cylc.prompt import prompt
-from cylc import cylc_pyro_client
+from cylc.network.suite_command import SuiteCommandClient
+from cylc.network.suite_info import SuiteInfoClient
 from cylc.CylcOptionParsers import cop, multitask_usage
-from cylc.command_prep import prep_pyro
-import cylc.flags
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.job_logs import CommandLogger
 from cylc.task_id import TaskID
 
-parser = cop("""cylc [control] trigger [OPTIONS] ARGS
-
-Manually trigger a task or tasks. For single tasks you can choose to edit the
-generated job script first, to apply one-off changes (--edit).  Triggering a
-waiting task queues it for execution (cylc internal queues). If its queue is
-not limited it will submit immediately, otherwise it will submit when released
-by its queue.  Triggering a queued task causes it to submit immediately even if
-that violates the queue limit.
-
-In an edit run a diff between the original and edited job script is saved to
-the task job log directory.
-""" + multitask_usage, pyro=True, multitask=True, argdoc=[
-    ('REG', 'Suite name'),
-    ('MATCH', 'Task or family name matching regular expression'),
-    ('POINT', 'Task cycle point (e.g. date-time or integer)')
-    ]
-)
-
-parser.add_option(
-    "-e", "--edit",
-    help="Manually edit the job script before running it.",
-    action="store_true", default=False, dest="edit_run")
-
-parser.add_option(
-    "-g", "--geditor",
-    help="(with --edit) force use of the configured GUI editor.",
-    action="store_true", default=False, dest="geditor")
-
-(options, args) = parser.parse_args()
-suite, pphrase = prep_pyro(args[0], options).execute()
-name = args[1]
-point_string = args[2]
-
-msg = 'Trigger task(s) %s  at %s in %s' % (name, point_string, suite)
-prompt(msg, options.force)
-
-# Get the suite daemon command interface.
-try:
-    cmd_proxy = cylc_pyro_client.client(
-        suite, pphrase, options.owner, options.host, options.pyro_timeout,
-        options.port).get_proxy('command-interface')
-except Exception as exc:
-    if cylc.flags.debug:
-        raise
-    sys.exit(exc)
-
-if options.edit_run:
-    # Check that TASK is a unique task.
-    task_id = TaskID.get(name, point_string)
-    try:
-        info_proxy = cylc_pyro_client.client(
-            suite, pphrase, options.owner, options.host, options.pyro_timeout,
-            options.port).get_proxy('suite-info')
-        res, msg = info_proxy.get('ping task', task_id, True)
-    except Exception as exc:
-        if cylc.flags.debug:
-            raise
-        sys.exit(exc)
-    if not res:
-        sys.exit('ERROR: %s' % msg)
 
-    # Get the current job file mtime, if the file exists.
-    jobfile_path = CommandLogger.get_latest_job_log(
-        suite, name, point_string)
-    try:
-        jobfile_mtime = os.stat(jobfile_path).st_mtime
-    except OSError:
-        # Does not exist.
-        jobfile_mtime = None
+def main():
+    """CLI for "cylc trigger"."""
+    parser = cop(
+        __doc__ + multitask_usage, pyro=True, multitask=True,
+        argdoc=[
+            ('REG', 'Suite name'),
+            ('MATCH', 'Task or family name matching regular expression'),
+            ('POINT', 'Task cycle point (e.g. date-time or integer)')])
+
+    parser.add_option(
+        "-e", "--edit",
+        help="Manually edit the job script before running it.",
+        action="store_true", default=False, dest="edit_run")
+
+    parser.add_option(
+        "-g", "--geditor",
+        help="(with --edit) force use of the configured GUI editor.",
+        action="store_true", default=False, dest="geditor")
+
+    (options, args) = parser.parse_args()
+    suite = args[0]
+
+    name = args[1]
+    point_string = args[2]
+
+    msg = 'Trigger task(s) %s at %s in %s' % (name, point_string, suite)
+    prompt(msg, options.force)
+
+    cmd_client = SuiteCommandClient(
+        suite, options.owner, options.host, options.pyro_timeout,
+        options.port, options.db, my_uuid=options.set_uuid,
+        print_uuid=options.print_uuid)
+
+    if options.edit_run:
+        # Check that TASK is a unique task.
+        task_id = TaskID.get(name, point_string)
+        info_client = SuiteInfoClient(
+            suite, options.owner, options.host, options.pyro_timeout,
+            options.port, options.db, my_uuid=cmd_client.my_uuid)
+        success, msg = info_client.get_info('ping_task', task_id, True)
+        if not success:
+            sys.exit('ERROR: %s' % msg)
+
+        # Get the job filename from the suite daemon - the task cycle point may
+        # need standardising to the suite cycle point format.
+        success, job_dir = info_client.get_info(
+            'get_task_jobfile_path', task_id)
+        if not success:
+            sys.exit('ERROR: %s' % job_dir)
+
+        # Add the 'NN' symlink for latest job file regardless of submit number.
+        jobfile_path = os.path.join(job_dir, 'NN', 'job')
+
+        # Note: localhost time and file system time may be out of sync,
+        #       so the safe way to detect whether a new file is modified
+        #       or is to detect whether time stamp has changed or not.
+        #       Comparing the localhost time with the file timestamp is unsafe
+        #       and may cause the "while True" loop that follows to sys.exit
+        #       with an error message after MAX_TRIES.
+        try:
+            old_mtime = os.stat(jobfile_path).st_mtime
+        except OSError:
+            old_mtime = None
 
-    try:
         # Tell the suite daemon to generate the job file.
-        result = cmd_proxy.put('dry run task', name, point_string)
+        cmd_client.put_command('dry_run_task', name, point_string)
+
+        # Wait for the new job file to be written. Use mtime because the same
+        # file could potentially exist already, left from a previous run.
+        count = 0
+        MAX_TRIES = 10
+        while True:
+            count += 1
+            try:
+                mtime = os.stat(jobfile_path).st_mtime
+            except OSError:
+                pass
+            else:
+                if old_mtime is None or mtime > old_mtime:
+                    break
+            if count > MAX_TRIES:
+                sys.exit('ERROR: no job file after %s seconds' % MAX_TRIES)
+            time.sleep(1)
+
+        # Make a pre-edit copy to allow a post-edit diff.
+        jobfile_copy_path = "%s.ORIG" % jobfile_path
+        shutil.copy(jobfile_path, jobfile_copy_path)
+
+        # Edit the new job file.
+        if options.geditor:
+            editor = GLOBAL_CFG.get(['editors', 'gui'])
+        else:
+            editor = GLOBAL_CFG.get(['editors', 'terminal'])
+        # The editor command may have options, e.g. 'emacs -nw'.
+        command_list = re.split(' ', editor)
+        command_list.append(jobfile_path)
+        command = ' '.join(command_list)
+        try:
+            # Block until the editor exits.
+            retcode = subprocess.call(command_list)
+            if retcode != 0:
+                sys.exit(
+                    'ERROR, command failed with %d:\n %s' % (retcode, command))
+        except OSError:
+            sys.exit('ERROR, unable to execute:\n %s' % command)
+
+        # Get confirmation after editing is done.
+        # Don't allow force-no-prompt in this case.
+        if options.geditor:
+            # Alert stdout of the dialog window, in case it's missed.
+            print "Editing done. I'm popping up a confirmation dialog now."
+
+        # Save a diff to record the changes made.
+        log_dir = os.path.dirname(jobfile_path)
+        with open("%s-edit.diff" % jobfile_path, 'wb') as diff_file:
+            for line in difflib.unified_diff(
+                    open(jobfile_copy_path).readlines(),
+                    open(jobfile_path).readlines(),
+                    fromfile="original",
+                    tofile="edited"):
+                diff_file.write(line)
+        os.unlink(jobfile_copy_path)
+
+        log_dir = os.path.dirname(jobfile_path)
+        msg = "Trigger edited task %s?" % task_id
+        if not prompt(msg, gui=options.geditor, no_force=True, no_abort=True):
+            # Generate placeholder log files for the aborted run.
+            for log in ["job.out", "job.err"]:
+                lf = os.path.join(log_dir, log)
+                with open(lf, 'wb') as log_file:
+                    log_file.write("This edit run was aborted\n")
+                    print "Run aborted."
+                    sys.exit(0)
+
+    # Trigger the task proxy(s).
+    cmd_client.put_command('trigger_task', name, point_string,
+                           options.is_family)
+
+
+if __name__ == "__main__":
+    try:
+        main()
     except Exception as exc:
         if cylc.flags.debug:
             raise
         sys.exit(exc)
-    if not result[0]:
-        sys.exit(result[1])
-
-    # Wait for the new job file to be written.
-    count = 0
-    MAX_TRIES = 10
-    while True:
-        count += 1
-        try:
-            if os.stat(jobfile_path).st_mtime > jobfile_mtime:
-                break
-        except:
-            pass
-        if count > MAX_TRIES:
-            sys.exit('ERROR: no job file after %s seconds' % MAX_TRIES)
-        time.sleep(1)
-
-    # Make a pre-edit copy to allow a post-edit diff.
-    jobfile_copy_path = "%s.ORIG" % jobfile_path
-    shutil.copy(jobfile_path, jobfile_copy_path)
-
-    # Edit the new job file.
-    if options.geditor:
-        editor = GLOBAL_CFG.get(['editors', 'gui'])
-    else:
-        editor = GLOBAL_CFG.get(['editors', 'terminal'])
-    # The editor command may have options, e.g. 'emacs -nw'.
-    command_list = re.split(' ', editor)
-    command_list.append(jobfile_path)
-    command = ' '.join(command_list)
-    try:
-        # Block until the editor exits.
-        retcode = subprocess.call(command_list)
-        if retcode != 0:
-            sys.exit(
-                'ERROR, command failed with %d:\n %s' % (retcode, command))
-    except OSError:
-        sys.exit('ERROR, unable to execute:\n %s' % command)
-
-    # Save a diff to record the changes made.
-    log_dir = os.path.dirname(jobfile_path)
-    with open("%s-edit.diff" % jobfile_path, 'wb') as diff_file:
-        for line in difflib.unified_diff(
-                open(jobfile_copy_path).readlines(),
-                open(jobfile_path).readlines(),
-                fromfile="original",
-                tofile="edited"):
-            diff_file.write(line)
-    os.unlink(jobfile_copy_path)
-
-# Trigger the task proxy(s).
-try:
-    result = cmd_proxy.put(
-        'trigger task', name, point_string, options.is_family)
-except Exception as exc:
-    if cylc.flags.debug:
-        raise
-    sys.exit(exc)
-if not result[0]:
-    sys.exit(result[1])
diff --git a/bin/cylc-unregister b/bin/cylc-unregister
index 2c646c3..cbe59dc 100755
--- a/bin/cylc-unregister
+++ b/bin/cylc-unregister
@@ -16,6 +16,15 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [db] unregister [OPTIONS] ARGS
+
+Remove one or more suites from your suite database. The REGEX pattern
+must match whole suite names to avoid accidental de-registration of
+partial matches (e.g. 'bar.baz' will not match 'foo.bar.baz').
+
+Associated suite definition directories will not be deleted unless the
+'-d,--delete' option is used."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
@@ -27,74 +36,75 @@ from cylc.CylcOptionParsers import cop
 from cylc.registration import localdb
 import cylc.flags
 
-parser = cop( usage = """cylc [db] unregister [OPTIONS] ARGS
-
-Remove one or more suites from your suite database. The REGEX pattern
-must match whole suite names to avoid accidental de-registration of
-partial matches (e.g. 'bar.baz' will not match 'foo.bar.baz').
 
-Associated suite definition directories will not be deleted unless the
-'-d,--delete' option is used.""",
-    argdoc=[('REGEX', 'Regular expression to match suite names.')] )
+def main():
+    parser = cop(
+        __doc__,
+        argdoc=[('REGEX', 'Regular expression to match suite names.')])
 
-parser.add_option( "-d", "--delete",
+    parser.add_option(
+        "-d", "--delete",
         help="Delete the suite definition directory too (!DANGEROUS!).",
-        action="store_true", default=False, dest="obliterate" )
+        action="store_true", default=False, dest="obliterate")
 
-parser.add_option( "-f", "--force",
+    parser.add_option(
+        "-f", "--force",
         help="Don't ask for confirmation before deleting suite definitions.",
-        action="store_true", default=False, dest="force" )
+        action="store_true", default=False, dest="force")
 
-( options, args ) = parser.parse_args()
+    (options, args) = parser.parse_args()
 
-arg = args[0]
+    db = localdb(file=options.db)
 
-db = localdb( file=options.db )
+    dirs = db.unregister(args[0])
 
-try:
-    dirs = db.unregister( arg )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
-
-n = len(dirs)
-if n == 0:
-    print 'No suites unregistered.'
-    sys.exit(0)
+    n = len(dirs)
+    if n == 0:
+        print 'No suites unregistered.'
+        sys.exit(0)
 
-print len(dirs), 'suites unregistered.'
+    print len(dirs), 'suites unregistered.'
 
-if options.obliterate and len(dirs) > 0:
-    for dir in dirs:
-        print 'DELETE ', dir
+    if options.obliterate and len(dirs) > 0:
+        for dir in dirs:
+            print 'DELETE ', dir
 
-    really_obliterate = False
-    if options.force:
-        really_obliterate = True
-    else:
-        if len(dirs) == 1:
-            words = "THIS SUITE DEFINITION"
-        else:
-            words = "THESE SUITE DEFINITIONS"
-        response = raw_input( "DO YOU REALLY WANT TO DELETE " + words + "? (y/n) " )
-        if response == 'y':
+        really_obliterate = False
+        if options.force:
             really_obliterate = True
-    if really_obliterate and len(dirs)>0:
-        for dir in dirs:
-            try:
-                rmtree(dir)
-            except OSError, x:
-                print >> sys.stderr, "ERROR, could not remove directory: " + dir
-                print >> sys.stderr, x
-                continue
-            # recursively remove empty superdirs
-            tmp = dir
-            while True:
-                tmp = os.path.split(tmp)[0]
+        else:
+            if len(dirs) == 1:
+                words = "THIS SUITE DEFINITION"
+            else:
+                words = "THESE SUITE DEFINITIONS"
+            response = raw_input(
+                "DO YOU REALLY WANT TO DELETE " + words + "? (y/n) ")
+            if response == 'y':
+                really_obliterate = True
+        if really_obliterate and len(dirs) > 0:
+            for dir in dirs:
                 try:
-                    os.rmdir(tmp)
-                except OSError:
-                    break
-                else:
-                    print 'Removed empty directory:', tmp
+                    rmtree(dir)
+                except OSError as exc:
+                    print >> sys.stderr, (
+                        "ERROR, could not remove directory: " + dir)
+                    print >> sys.stderr, str(exc)
+                    continue
+                # recursively remove empty superdirs
+                tmp = dir
+                while True:
+                    tmp = os.path.split(tmp)[0]
+                    try:
+                        os.rmdir(tmp)
+                    except OSError:
+                        break
+                    else:
+                        print 'Removed empty directory:', tmp
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-upgrade-db b/bin/cylc-upgrade-db
index cc5caf4..725ef3c 100755
--- a/bin/cylc-upgrade-db
+++ b/bin/cylc-upgrade-db
@@ -16,15 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys, os
-from optparse import OptionParser
-import pickle
-from cylc.registration import localdb, RegistrationError
-
-oldpath = os.path.join( os.environ['HOME'], '.cylc', 'DB' )
-newpath = os.path.join( os.environ['HOME'], '.cylc', 'REGDB' )
-
-parser = OptionParser( usage = """cylc upgrade-db
+"""cylc upgrade-db
 
 Upgrade a pre-cylc-5.4 suite name database to the new cylc-5.4+ format.
 This will create a new-format DB if necessary, or if one already exists
@@ -37,28 +29,54 @@ $HOME/.cylc/REGDB/ containing one file per registered suite. The
 filenames are the suite names, and the file contains key=value pairs:
   shell$ cat $HOME/.cylc/REGDB/my.suite
   title=my suite title
-  path=/path/to/my/suite/""")
+  path=/path/to/my/suite/"""
+
+import sys
+import os
+from optparse import OptionParser
+import pickle
+
+import cylc.flags
+from cylc.registration import localdb, RegistrationError
+
+
+def main():
+
+    oldpath = os.path.join(os.environ['HOME'], '.cylc', 'DB')
+    newpath = os.path.join(os.environ['HOME'], '.cylc', 'REGDB')
+
+    parser = OptionParser(__doc__)
+
+    parser.add_option(
+        "--from",
+        help="Path to pre-cylc-5.4 db; default:" + oldpath,
+        metavar="PATH", action="store", default=oldpath)
+
+    parser.add_option(
+        "--to", help="Path to new cylc-5.4+ db; default:" + newpath,
+        metavar="PATH", action="store", default=newpath)
 
-parser.add_option( "--from", help="Path to pre-cylc-5.4 db; "
-        "default:" + oldpath,
-        metavar="PATH", action="store", default=oldpath )
+    (options, args) = parser.parse_args()
 
-parser.add_option( "--to", help="Path to new cylc-5.4+ db; "
-        "default:" + newpath,
-        metavar="PATH", action="store", default=newpath )
+    if not os.path.isfile(oldpath):
+        sys.exit("ERROR, old DB not found: " + oldpath)
 
-( options, args ) = parser.parse_args()
+    # load old DB
+    olditems = pickle.load(open(oldpath, 'r'))
 
-if not os.path.isfile( oldpath ):
-    sys.exit( "ERROR, old DB not found: " + oldpath )
+    # new db
+    db = localdb(file=newpath)
+    for suite, (dir, title) in olditems.items():
+        try:
+            db.register(suite, dir)
+        except RegistrationError, x:
+            print >> sys.stderr, x
 
-# load old DB
-olditems = pickle.load( open( oldpath, 'r' ))
 
-# new db
-db = localdb( file=newpath )
-for suite, (dir,title) in olditems.items():
+if __name__ == "__main__":
     try:
-        db.register( suite, dir )
-    except RegistrationError, x:
-        print >> sys.stderr, x
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-upgrade-run-dir b/bin/cylc-upgrade-run-dir
index 5e9f33e..0abe866 100755
--- a/bin/cylc-upgrade-run-dir
+++ b/bin/cylc-upgrade-run-dir
@@ -16,6 +16,13 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [admin] upgrade-run-dir SUITE
+
+For one-off conversion of a suite run directory to cylc-6 format.
+
+Arguments:
+     SUITE    suite name or run directory path"""
+
 import re
 import os
 import sys
@@ -25,31 +32,31 @@ from collections import defaultdict
 
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.mkdir_p import mkdir_p
+import cylc.flags
+
+OLD_LOGFILE_RE = re.compile(
+    """^
+    ([\w]+)  # task name
+    \.
+    ([^.]+)  # any cycle time format
+    \.
+    (\d+)    # submit number
+    (\.
+      (.*)
+    )?       # optional extension
+    $""",
+    re.VERBOSE
+)
+
+OLD_WORKDIR_RE = re.compile(
+    """^
+    ([\w]+)  # task name
+    \.
+    ([^.]+)  # any cycle time format
+    $""",
+    re.VERBOSE
+)
 
-OLD_LOGFILE_RE = re.compile("""
-            ^
-            ([\w]+)  # task name
-            \.
-            ([^.]+)  # any cycle time format
-            \.
-            (\d+)    # submit number
-            (\.
-              (.*)
-            )?       # optional extension
-            $
-            """,
-            re.VERBOSE
-        )
-
-OLD_WORKDIR_RE = re.compile("""
-            ^
-            ([\w]+)  # task name
-            \.
-            ([^.]+)  # any cycle time format
-            $
-            """,
-            re.VERBOSE
-        )
 
 def upgrade_logdir(jobdir):
     """Upgrade a pre cylc-6 suite job log directory."""
@@ -58,13 +65,12 @@ def upgrade_logdir(jobdir):
         return
     os.chdir(jobdir)
     print "Upgrading %s" % jobdir
-    max_subnums = defaultdict(lambda:defaultdict(int))
+    max_subnums = defaultdict(lambda: defaultdict(int))
     for old_jobfile in os.listdir("."):
         m = re.match(OLD_LOGFILE_RE, old_jobfile)
         if not m:
             print >> sys.stderr, (
-                    "WARNING: skipping non-standard log file: %s" % old_jobfile
-                    )
+                "WARNING: skipping non-standard log file: %s" % old_jobfile)
             continue
         sys.stdout.write(".")
         task_name, cycle_point, subnum, dot, extn = m.groups()
@@ -100,6 +106,7 @@ def upgrade_logdir(jobdir):
                 raise exc
     sys.stdout.write("\n")
 
+
 def upgrade_workdir(workdir):
     """Upgrade a pre cylc-6 suite work directory."""
 
@@ -111,8 +118,7 @@ def upgrade_workdir(workdir):
         m = re.match(OLD_WORKDIR_RE, old_workdir)
         if not m:
             print >> sys.stderr, (
-                    "WARNING: skipping non-standard workdir %s" % old_workdir
-                    )
+                "WARNING: skipping non-standard workdir %s" % old_workdir)
             continue
         sys.stdout.write(".")
         task_name, cycle_point = m.groups()
@@ -122,28 +128,33 @@ def upgrade_workdir(workdir):
     sys.stdout.write("\n")
 
 
-parser = OptionParser(
-    usage = """cylc [admin] upgrade-run-dir SUITE
+def main():
 
-For one-off conversion of a suite run directory to cylc-6 format.
+    parser = OptionParser(__doc__)
 
-Arguments:
-     SUITE    suite name or run directory path""")
+    (options, args) = parser.parse_args()
+
+    arg0 = args[0]
+    if os.path.isdir(arg0):
+        rundir = arg0
+    else:
+        rundir = GLOBAL_CFG.get_derived_host_item(arg0, "suite run directory")
+    if not os.path.isdir(rundir):
+        sys.exit("ERROR: Directory not found: %s" % rundir)
 
-(options, args) = parser.parse_args()
+    logdir = os.path.join(rundir, "log", "job")
+    upgrade_logdir(logdir)
 
-arg0 = args[0]
-if os.path.isdir(arg0):
-    rundir = arg0
-else:
-    rundir = GLOBAL_CFG.get_derived_host_item(arg0, "suite run directory")
-if not os.path.isdir(rundir):
-    sys.exit("ERROR: Directory not found: %s" % rundir)
+    workdir = os.path.join(rundir, "work")
+    upgrade_workdir(workdir)
 
-logdir = os.path.join( rundir, "log", "job" )
-upgrade_logdir(logdir)
+    print "Done"
 
-workdir = os.path.join( rundir, "work" )
-upgrade_workdir(workdir)
 
-print "Done"
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-validate b/bin/cylc-validate
index c71922e..7d71d68 100755
--- a/bin/cylc-validate
+++ b/bin/cylc-validate
@@ -16,59 +16,114 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+"""cylc [prep] validate [OPTIONS] ARGS
+
+Validate a suite definition against the official specification
+files held in $CYLC_DIR/conf/suiterc/.
+
+If the suite definition uses include-files reported line numbers
+will correspond to the inlined version seen by the parser; use
+'cylc view -i,--inline SUITE' for comparison."""
+
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
+import cylc.flags
 from cylc.CylcOptionParsers import cop
 from cylc.version import CYLC_VERSION
-from cylc.config import config
-import cylc.flags
+from cylc.config import SuiteConfig, SuiteConfigError
+from cylc.prerequisite import TriggerExpressionError
+from cylc.task_proxy import TaskProxy
 
-parser = cop( """cylc [prep] validate [OPTIONS] ARGS
 
-Validate a suite definition against the official specification
-files held in $CYLC_DIR/conf/suiterc/.
-
-If the suite definition uses include-files reported line numbers
-will correspond to the inlined version seen by the parser; use
-'cylc view -i,--inline SUITE' for comparison.""",
-jset=True, prep=True )
+def main():
 
+    parser = cop(__doc__, jset=True, prep=True)
 
-parser.add_option( "--ict",
+    parser.add_option(
+        "--ict",
         help="Set an initial cycle time to validate against. This "
-        "may be required if the suite does not supply one.")
+             "may be required if the suite does not supply one.")
 
-parser.add_option( "--strict",
+    parser.add_option(
+        "--strict",
         help="Fail any use of unsafe or experimental features. "
-        "Currently this just means naked dummy tasks (tasks with no "
-        "corresponding runtime section) as these may result from "
-        "unintentional typographic errors in task names.",
-        action="store_true", default=False, dest="strict" )
+             "Currently this just means naked dummy tasks (tasks with no "
+             "corresponding runtime section) as these may result from "
+             "unintentional typographic errors in task names.",
+        action="store_true", default=False, dest="strict")
 
-parser.add_option( "--no-write", 
+    parser.add_option(
+        "--no-write",
         help="Don't write out the processed suite definition.",
-        action="store_true", default=False, dest="nowrite" )
+        action="store_true", default=False, dest="nowrite")
+
+    (options, args) = parser.parse_args()
+
+    suite, suiterc, junk = parser.get_suite()
+
+    cfg = SuiteConfig.get_inst(
+        suite, suiterc,
+        cli_initial_point_string=options.ict,
+        template_vars=options.templatevars,
+        template_vars_file=options.templatevars_file,
+        validation=True, strict=options.strict,
+        write_proc=not options.nowrite)
+
+    # Instantiate tasks and force evaluation of trigger expressions.
+    # (Taken from config.py to avoid circular import problems.)
+    # TODO - This is not exhaustive, it only uses the initial cycle point.
+    if cylc.flags.verbose:
+        print "Instantiating tasks to check trigger expressions"
+    for name in cfg.taskdefs.keys():
+        try:
+            itask = TaskProxy(
+                cfg.taskdefs[name],
+                cfg.start_point,
+                'waiting',
+                is_startup=True,
+                validate_mode=True)
+        except Exception as exc:
+            print >> sys.stderr, str(exc)
+            raise SuiteConfigError(
+                'ERROR, failed to instantiate task %s' % name)
+        if itask.point is None:
+            if cylc.flags.verbose:
+                print >> sys.stderr, (
+                    " + Task out of bounds for " + str(cfg.start_point) +
+                    ": " + itask.name)
+            continue
 
-(options, args) = parser.parse_args()
+        # Warn for purely-implicit-cycling tasks (these are deprecated).
+        if itask.tdef.sequences == itask.tdef.implicit_sequences:
+            print >> sys.stderr, (
+                "WARNING, " + name + ": not explicitly defined in " +
+                "dependency graphs (deprecated)"
+            )
 
-suite, suiterc, junk = parser.get_suite()
+        # force trigger evaluation now
+        try:
+            itask.prerequisites_eval_all()
+        except TriggerExpressionError as exc:
+            print >> sys.stderr, str(exc)
+            raise SuiteConfigError(
+                "ERROR, " + name + ": invalid trigger expression.")
+        except Exception as exc:
+            print >> sys.stderr, str(exc)
+            raise SuiteConfigError(
+                'ERROR, ' + name + ': failed to evaluate triggers.')
+        if cylc.flags.verbose:
+            print "  + " + itask.identity + " ok"
 
-if cylc.flags.verbose:
-    print "Parsing suite " + suite + ":", suiterc
+    print "Valid for cylc-" + CYLC_VERSION
 
-try:
-    config( suite, suiterc,
-            cli_initial_point_string=options.ict,
-            template_vars=options.templatevars,
-            template_vars_file=options.templatevars_file,
-            validation=True, strict=options.strict,
-            write_proc=not options.nowrite )
-except Exception,x:
-    if cylc.flags.debug:
-        raise
-    raise SystemExit(x)
 
-print "Valid for cylc-" + CYLC_VERSION
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-version b/bin/cylc-version
index 402e946..f6116b7 100755
--- a/bin/cylc-version
+++ b/bin/cylc-version
@@ -16,30 +16,38 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-"""Print the cylc release or git repository version number."""
+"""cylc [info] version
+
+Print the cylc version invoked at the command line.
+
+Note that "cylc -v,--version" just prints the version string from the main
+command interface, whereas this is a proper cylc command that can take the
+standard --host and --user options, etc.
+
+For the cylc version of running a suite daemon see
+  "cylc get-suite-version"."""
 
 import sys
 from cylc.remote import remrun
 if remrun().execute():
     sys.exit(0)
 
+import cylc.flags
 from cylc.CylcOptionParsers import cop
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.version import CYLC_VERSION
-import cylc.flags
-
 
-parser = cop("""cylc [info] version
 
-Print the cylc version invoked at the command line.
-
-Note that "cylc -v,--version" just prints the version string from the main
-command interface, whereas this is a proper cylc command that can take the
-standard --host and --user options, etc.
-
-For the cylc version of running a suite daemon see
-  "cylc get-suite-version".""")
+def main():
+    parser = cop(__doc__, argdoc=[])
+    (options, args) = parser.parse_args()
+    print CYLC_VERSION
 
-(options, args) = parser.parse_args()
 
-print CYLC_VERSION
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-view b/bin/cylc-view
index b178234..1ba2ccc 100755
--- a/bin/cylc-view
+++ b/bin/cylc-view
@@ -16,20 +16,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys
-from cylc.remote import remrun
-if remrun().execute():
-    sys.exit(0)
-
-import os, re
-import tempfile
-import subprocess
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.CylcOptionParsers import cop
-from parsec.fileparse import read_and_proc
-import cylc.flags
-
-parser = cop( """cylc [prep] view [OPTIONS] ARGS 
+"""cylc [prep] view [OPTIONS] ARGS
 
 View a read-only temporary copy of suite NAME's suite.rc file, in your
 editor, after optional include-file inlining and Jinja2 preprocessing.
@@ -42,130 +29,157 @@ Where <editor> is defined in the cylc site and user config files
 For remote host or owner, the suite will be printed to stdout unless
 the '-g,--gui' flag is used to spawn a remote GUI edit session.
 
-See also 'cylc [prep] edit'.""", jset=True, prep=True )
+See also 'cylc [prep] edit'."""
 
-parser.add_option( "--inline", "-i",
-        help="Inline include-files.",
-        action="store_true", default=False, dest="inline" )
+import sys
+from cylc.remote import remrun
+if remrun().execute():
+    sys.exit(0)
 
-parser.add_option( "--jinja2", "-j",
+import os
+import re
+import tempfile
+import subprocess
+
+import cylc.flags
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.CylcOptionParsers import cop
+from parsec.fileparse import read_and_proc
+
+
+def main():
+    parser = cop(__doc__, jset=True, prep=True)
+
+    parser.add_option(
+        "--inline", "-i", help="Inline include-files.", action="store_true",
+        default=False, dest="inline")
+
+    parser.add_option(
+        "--jinja2", "-j",
         help="View after Jinja2 template processing "
-        "(implies '-i/--inline' as well).",
-        action="store_true", default=False, dest="jinja2" )
+             "(implies '-i/--inline' as well).",
+        action="store_true", default=False, dest="jinja2")
 
-parser.add_option( "-p", "--process", 
+    parser.add_option(
+        "-p", "--process",
         help="View after all processing (Jinja2, inlining, "
-        "line-continuation joining).",
-        action="store_true", default=False, dest="process" )
+             "line-continuation joining).",
+        action="store_true", default=False, dest="process")
 
-parser.add_option( "--mark", "-m",
+    parser.add_option(
+        "--mark", "-m",
         help="(With '-i') Mark inclusions in the left margin.",
-        action="store_true", default=False, dest="mark" )
+        action="store_true", default=False, dest="mark")
 
-parser.add_option( "--label", "-l",
-        help="(With '-i') Label file inclusions with the file name. "
-        "Line numbers will not correspond to those reported by the parser.",
-        action="store_true", default=False, dest="label" )
+    parser.add_option(
+        "--label", "-l",
+        help="(With '-i') Label file inclusions with the file name. Line "
+             "numbers will not correspond to those reported by the parser.",
+        action="store_true", default=False, dest="label")
 
-parser.add_option( "--single",
+    parser.add_option(
+        "--single",
         help="(With '-i') Inline only the first instances of any "
-        "multiply-included files. Line numbers will not correspond to "
-        "those reported by the parser.",
-        action="store_true", default=False, dest="single" )
+             "multiply-included files. Line numbers will not correspond to "
+             "those reported by the parser.",
+        action="store_true", default=False, dest="single")
 
-parser.add_option( "--cat", "-c",
+    parser.add_option(
+        "--cat", "-c",
         help="Concatenate continuation lines (line numbers will "
-        "not correspond to those reported by the parser).",
-        action="store_true", default=False, dest="cat" )
+             "not correspond to those reported by the parser).",
+             action="store_true", default=False, dest="cat")
 
-parser.add_option( "--gui", "-g",
-        help="Force use of the configured GUI editor.",
-        action="store_true", default=False, dest="geditor" )
+    parser.add_option(
+        "--gui", "-g", help="Force use of the configured GUI editor.",
+        action="store_true", default=False, dest="geditor")
 
-parser.add_option( "--stdout",
-        help="Print the suite definition to stdout.",
-        action="store_true", default=False, dest="stdout" )
+    parser.add_option(
+        "--stdout", help="Print the suite definition to stdout.",
+        action="store_true", default=False, dest="stdout")
 
-parser.add_option( "--mark-for-edit",
+    parser.add_option(
+        "--mark-for-edit",
         help="(With '-i') View file inclusion markers as "
-        "for 'cylc edit --inline'.",
-        action="store_true", default=False, dest="asedit" )
-
-( options, args ) = parser.parse_args()
-
-suite, suiterc, junk = parser.get_suite()
-
-cylc_tmpdir = GLOBAL_CFG.get_tmpdir()
-if options.geditor:
-    editor = GLOBAL_CFG.get( ['editors','gui'] )
-else:
-    editor = GLOBAL_CFG.get( ['editors','terminal'] )
-
-global suitedir
-suitedir = os.path.dirname(suiterc)
-
-# read in the suite.rc file
-viewcfg = {
-        'mark'   : options.mark,
-        'single' : options.single,
-        'label'  : options.label,
-        'jinja2' : options.jinja2 or options.process,
-        'contin' : options.cat or options.process,
-        'inline' : options.inline or options.jinja2 or options.process,
-        }
-try:
-    lines = read_and_proc( suiterc,
-            options.templatevars, options.templatevars_file,
-            viewcfg=viewcfg, asedit=options.asedit )
-except Exception, x:
-    if cylc.flags.debug:
-        raise
-    sys.exit( x )
-
-if options.stdout:
+             "for 'cylc edit --inline'.",
+        action="store_true", default=False, dest="asedit")
+
+    (options, args) = parser.parse_args()
+
+    suite, suiterc, junk = parser.get_suite()
+
+    cylc_tmpdir = GLOBAL_CFG.get_tmpdir()
+    if options.geditor:
+        editor = GLOBAL_CFG.get(['editors', 'gui'])
+    else:
+        editor = GLOBAL_CFG.get(['editors', 'terminal'])
+
+    global suitedir
+    suitedir = os.path.dirname(suiterc)
+
+    # read in the suite.rc file
+    viewcfg = {'mark': options.mark,
+               'single': options.single,
+               'label': options.label,
+               'jinja2': options.jinja2 or options.process,
+               'contin': options.cat or options.process,
+               'inline': options.inline or options.jinja2 or options.process,
+               }
+    lines = read_and_proc(
+        suiterc, options.templatevars, options.templatevars_file,
+        viewcfg=viewcfg, asedit=options.asedit)
+
+    if options.stdout:
+        for line in lines:
+            print line
+        sys.exit(0)
+
+    # write to a temporary file
+    viewfile = tempfile.mktemp(suffix=".suite.rc", prefix=suite + '.',
+                               dir=cylc_tmpdir)
+    h = open(viewfile, 'wb')
     for line in lines:
-        print line
-    sys.exit(0)
+        h.write(line + '\n')
+    h.close()
 
-# write to a temporary file
-viewfile = tempfile.mktemp( suffix = ".suite.rc", prefix = suite + '.', dir = cylc_tmpdir )
-h = open( viewfile, 'wb' )
-for line in lines:
-    h.write( line + '\n' )
-h.close()
-
-# set the file to be read only
-os.chmod( viewfile, 0444 )
-
-# capture the temp file's mod time in case the user edits it
-# and overrides the readonly mode.
-modtime1 = os.stat( viewfile ).st_mtime
-
-# in case editor has options, e.g. 'emacs -nw':
-command_list = re.split( ' ', editor )
-command_list.append( viewfile )
-command = ' '.join( command_list )
-try:
+    # set the file to be read only
+    os.chmod(viewfile, 0444)
+
+    # capture the temp file's mod time in case the user edits it
+    # and overrides the readonly mode.
+    modtime1 = os.stat(viewfile).st_mtime
+
+    # in case editor has options, e.g. 'emacs -nw':
+    command_list = re.split(' ', editor)
+    command_list.append(viewfile)
+    command = ' '.join(command_list)
     # THIS BLOCKS UNTIL THE COMMAND COMPLETES
-    retcode = subprocess.call( command_list )
+    retcode = subprocess.call(command_list)
     if retcode != 0:
         # the command returned non-zero exist status
         print >> sys.stderr, command, 'failed:', retcode
         sys.exit(1)
-except OSError:
-    # the command was not invoked
-    print >> sys.stderr, 'ERROR: unable to execute:', command
-    sys.exit(1)
-
-# !!!VIEWING FINISHED!!!
-
-# Did the user edit the file
-modtime2 = os.stat( viewfile ).st_mtime
-
-if modtime2 > modtime1:
-    print
-    print >> sys.stderr, 'WARNING: YOU HAVE EDITED A TEMPORARY READ-ONLY SUITE COPY:'
-    print >> sys.stderr, viewfile
-    print >> sys.stderr, 'In future use \'cylc [prep] edit\' to edit a suite.'
-    print
-# DONE
+
+    # !!!VIEWING FINISHED!!!
+
+    # Did the user edit the file
+    modtime2 = os.stat(viewfile).st_mtime
+
+    if modtime2 > modtime1:
+        print
+        print >> sys.stderr, (
+            'WARNING: YOU HAVE EDITED A TEMPORARY READ-ONLY SUITE COPY:')
+        print >> sys.stderr, viewfile
+        print >> sys.stderr, (
+            'In future use \'cylc [prep] edit\' to edit a suite.')
+        print
+    # DONE
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/cylc-warranty b/bin/cylc-warranty
index 1e7a22f..e645382 100755
--- a/bin/cylc-warranty
+++ b/bin/cylc-warranty
@@ -16,36 +16,41 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-
-import sys
-
-usage = """
-USAGE: cylc [license] warranty [--help]
+"""Usage: cylc [license] warranty [--help]
    Cylc is released under the GNU General Public License v3.0
 This command prints the GPL v3.0 disclaimer of warranty.
 Options:
-  --help   Print this usage message.
-"""
+  --help   Print this usage message."""
+
+import sys
+import cylc.flags
+
 
-if len(sys.argv) != 1:
-    print usage
-    if sys.argv[1] == '--help':
-        sys.exit(0)
+def main():
+
+    if len(sys.argv) == 1:
+        print """GNU General Public License v3.0, Section 15:
+
+15. Disclaimer of Warranty.
+
+  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION."""
     else:
-        print "ERROR: illegal command line arguments"
-        sys.exit(1)
-
-print ""
-print "GNU General Public License v3.0, Section 15:"
-print ""
-print "15. Disclaimer of Warranty."
-print ""
-print "  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY"
-print "APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT"
-print "HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY"
-print "OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,"
-print "THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR"
-print "PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM"
-print "IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF"
-print "ALL NECESSARY SERVICING, REPAIR OR CORRECTION."
-print ""
+        print __doc__
+        if sys.argv[1] not in ['help', '--help']:
+            sys.exit(1)
+
+
+if __name__ == "__main__":
+    try:
+        main()
+    except Exception as exc:
+        if cylc.flags.debug:
+            raise
+        sys.exit(exc)
diff --git a/bin/gcapture b/bin/gcapture
index a442cb8..ecbbd48 100755
--- a/bin/gcapture
+++ b/bin/gcapture
@@ -16,10 +16,20 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys
+"""gcapture [options] COMMAND
+Run a command as a subprocess and capture the resulting stdout and
+stderr to display in a dialog. Examples:
+    $ capture "echo foo" &
+
+Arguments:
+   COMMAND    - the command line to run"""
+
+import os
+import sys
 from optparse import OptionParser
 
-sys.path.append(os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../lib')
+sys.path.append(
+    os.path.dirname(os.path.realpath(os.path.abspath(__file__))) + '/../lib')
 
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.gui.gcapture import gcapture, gcapture_tmpfile
@@ -27,30 +37,28 @@ from cylc.gui.gcapture import gcapture, gcapture_tmpfile
 # This is a unit test for $CYLC_DIR/lib/cylc/gui/gcapture.py
 # but it may be more generally useful.
 
-parser = OptionParser( """gcapture [options] COMMAND
-Run a command as a subprocess and capture the resulting stdout and
-stderr to display in a dialog. Examples:
-    $ capture "echo foo" &
-
-Arguments:
-   COMMAND    - the command line to run""")
+parser = OptionParser(__doc__)
 
-parser.add_option( "--width",
+parser.add_option(
+    "--width",
     help="dialog window width in pixels (default 400)",
-    metavar='INT', action="store", default=400, dest="width" )
+    metavar='INT', action="store", default=400, dest="width")
 
-parser.add_option( "--height",
+parser.add_option(
+    "--height",
     help="dialog window height in pixels (default 200)",
-    metavar='INT', action="store", default=200, dest="height" )
+    metavar='INT', action="store", default=200, dest="height")
 
-parser.add_option( "--file",
+parser.add_option(
+    "--file",
     help="(optional) capture stdout and stderr in FILE"
     "(full path, e.g. /path/to/foo/output.txt).",
-    metavar='FILE', action="store", default=None, dest="filep" )
+    metavar='FILE', action="store", default=None, dest="filep")
 
-parser.add_option( "--other",
+parser.add_option(
+    "--other",
     help="(optional) view the output (--prefix) of another gcapture process.",
-    action="store_true", default=False, dest="other" )
+    action="store_true", default=False, dest="other")
 
 (options, args) = parser.parse_args()
 
@@ -61,7 +69,7 @@ import gtk
 import gobject
 
 if options.other and not options.filep:
-    parser.error( '--other requires --prefix' )
+    parser.error('--other requires --prefix')
 
 command = ' '.join(args)
 
@@ -77,12 +85,15 @@ if options.filep:
     try:
         if ignore_command:
             # open existing file
-            stdout = open( options.filep, 'rb' )
+            stdout = open(options.filep, 'rb')
         else:
-            stdout = open( options.filep, 'wb' )
+            stdout = open(options.filep, 'wb')
     except IOError, x:
         raise SystemExit(x)
-    gcapture( command, stdout, width=int(options.width), height=int(options.height), standalone=True, ignore_command=ignore_command ).run()
+    gcapture(command, stdout, width=int(options.width),
+             height=int(options.height), standalone=True,
+             ignore_command=ignore_command).run()
 else:
-    gcapture_tmpfile( command, cylc_tmpdir, width=int(options.width), height=int(options.height), standalone=True ).run()
+    gcapture_tmpfile(command, cylc_tmpdir, width=int(options.width),
+                     height=int(options.height), standalone=True).run()
 gtk.main()
diff --git a/conf/cylc-mode.el b/conf/cylc-mode.el
index f5b5288..e1cee26 100644
--- a/conf/cylc-mode.el
+++ b/conf/cylc-mode.el
@@ -1,10 +1,10 @@
 ;; Simple syntax highlighting for cylc suite definition files.
 ;; Author: Luis Kornblueh, 2012
 ;;
-;; 1. copy this file to $HOME/.emacs.d
+;; 1. copy this file to $HOME/.emacs.d/lisp
 ;; 2. add in $HOME/.emacs the following lines:
 ;;
-;;   (setq load-path (cons (expand-file-name "~/.emacs.d") load-path))
+;;   (add-to-list 'load-path "~/.emacs.d/lisp/")
 ;;   (require 'cylc-mode)
 ;;   (setq auto-mode-alist (append auto-mode-alist 
 ;;			      (list '("\\.rc$" . cylc-mode))))
diff --git a/conf/cylc.lang b/conf/cylc.lang
index ce745b1..787923a 100644
--- a/conf/cylc.lang
+++ b/conf/cylc.lang
@@ -116,6 +116,7 @@
       <keyword>script</keyword>
       <keyword>run time range</keyword>
       <keyword>run-dir</keyword>
+      <keyword>template</keyword>
       <keyword>runahead limit</keyword>
       <keyword>root</keyword>
       <keyword>retry handler</keyword>
diff --git a/conf/cylc.xml b/conf/cylc.xml
index 2802702..9bf7485 100644
--- a/conf/cylc.xml
+++ b/conf/cylc.xml
@@ -42,6 +42,7 @@
         <RegExpr attribute='Keyword' String=' script '/>
         <RegExpr attribute='Keyword' String=' run time range '/>
         <RegExpr attribute='Keyword' String=' run-dir '/>
+        <RegExpr attribute='Keyword' String=' template '/>
         <RegExpr attribute='Keyword' String=' runahead limit '/>
         <RegExpr attribute='Keyword' String=' root '/>
         <RegExpr attribute='Keyword' String=' retry handler '/>
diff --git a/conf/gcylcrc/gcylc.rc.eg b/conf/gcylcrc/gcylc.rc.eg
index fa2114f..e330125 100644
--- a/conf/gcylcrc/gcylc.rc.eg
+++ b/conf/gcylcrc/gcylc.rc.eg
@@ -7,9 +7,10 @@
 #    (b) inheriting from an existing theme and overriding specific
 #     colors (see the "PinkRun" example below).
 
-initial views = text, dot     # set your default views
-ungrouped views = graph       # set your initially ungrouped views
-use theme = PinkRun           # set your default theme
+initial views = text, dot      # set your default views
+ungrouped views = graph        # set your initially ungrouped views
+use theme = PinkRun            # set your default theme
+initial side-by-side views = False # set your default orientation
 
 [themes]
     [[PinkRun]] # override the 'running' color in the 'default' theme
diff --git a/conf/gcylcrc/themes.rc b/conf/gcylcrc/themes.rc
index 0ece0ff..030d2bb 100644
--- a/conf/gcylcrc/themes.rc
+++ b/conf/gcylcrc/themes.rc
@@ -24,6 +24,7 @@
         held      = "color=#fe83ff"
         queued    = "color=#dcd901"
         ready     = "color=#a08f49"
+        expired   = "color=#000000"
         submitted = "color=#a1cf25", "style=filled"
         submit-failed = "color=#ff007e", "style=filled", "fontcolor=white"
         running   = "color=#00c140", "style=filled"
@@ -36,7 +37,8 @@
     [[solid]]
         # a solid version of the default theme
         inherit = "default"
-        defaults  = "color=#bbbbbb", "style=filled", "fontcolor=black"
+        defaults = "color=#bbbbbb", "style=filled", "fontcolor=black"
+        expired  = "color=#000000", "fontcolor=white"
 
     [[high-contrast]]
         inherit = "default"
@@ -45,6 +47,7 @@
         held      = "color=#fc00ff"
         queued    = "color=#e6c522", "fontcolor=black"
         ready     = "color=#9c8320"
+        expired   = "color=#000000", "fontcolor=white"
         submitted = "color=#76a200", "fontcolor=black"
         submit-failed = "color=#ff6f00"
         running   = "color=#008e27"
@@ -64,6 +67,7 @@
         waiting   = "color=#00ccff" # blue
         queued    = "color=#336600" # moss green
         ready     = "color=#d2800a",  "style=unfilled"
+        expired   = "color=#000000", "fontcolor=white"
         submitted = "color=#ff9933" # orange
         submit-failed = "color=#ff3333", "style=unfilled"
         running   = "color=#66ff33" # lime green
diff --git a/dev/ToDo/ToDo.txt b/dev/ToDo/ToDo.txt
index 253a0a2..3e2ab54 100644
--- a/dev/ToDo/ToDo.txt
+++ b/dev/ToDo/ToDo.txt
@@ -77,16 +77,6 @@ messages such as remote error tracebacks to be sent to the handler.
   broker) for satisfaction. We could then eliminate the broker, and
   may significantly increase efficiency for very large suites?
 
-* RECURSIVE PURGE OVERHAUL? see documentation of limitations in the
-  purge command help. Post cylc-3.0 we could easily determine, via the
-  suite dependency graph, if any tasks that should be affected by the
-  purge have not yet reached the purge start time. Also: see TO DO in
-  the in-method comments: ensure that ALL tasks whose prerequisites get
-  satisfied in the virtual system evolution that occurs in the purge
-  algorithm, get unsatisfied again at the end of it.
- (consider also: clock-triggered tasks downstream of the purge root -
- they won't trigger in the purge simulation if their time is not up yet?)
-
 * a 'nudge' that just updates task state info, without going through
   the full task processing loop?
 
diff --git a/dev/bin/n-suites-start.sh b/dev/bin/n-suites-start.sh
new file mode 100755
index 0000000..7da0eff
--- /dev/null
+++ b/dev/bin/n-suites-start.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+# Usage: $0 PREFIX N
+#   Create, register, and start, N suites registered as ${PREFIX}_$n
+# The suites are started with --hold to minimize system load.
+# See also stop-n-suites.sh.
+
+set -eu
+
+PREFIX=$1
+N=$2
+
+TOP_DIR=$TMPDIR/$$
+
+mkdir -p $TOP_DIR/${PREFIX}_1
+cat >> $TOP_DIR/${PREFIX}_1/suite.rc << __END__
+[cylc]
+    cycle point format = %Y-%m
+[scheduling]
+    initial cycle point = 2015-08
+    [[dependencies]]
+        [[[P1M]]]
+            graph = "foo => bar & baz & qux"
+__END__
+
+cylc reg ${PREFIX}_1 $TOP_DIR/${PREFIX}_1
+cylc val ${PREFIX}_1
+
+for I in $(seq 2 $N); do
+    cylc cp ${PREFIX}_1 ${PREFIX}_$I $TOP_DIR
+done
+
+for I in $(seq 1 $N); do
+    cylc run --hold ${PREFIX}_$I
+done
+
+cylc scan
diff --git a/dev/bin/n-suites-stop.sh b/dev/bin/n-suites-stop.sh
new file mode 100755
index 0000000..caf72b7
--- /dev/null
+++ b/dev/bin/n-suites-stop.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# Usage: $0 PREFIX
+#   Stop, unregister, and delete suites registered as ${PREFIX}_$n
+# Companion of start-n-suites.sh.
+
+set -eu
+
+PREFIX=$1
+
+echo
+for SUITE in $(cylc scan | egrep "^${PREFIX}_" | awk '{print $1}'); do
+    echo $SUITE
+    cylc stop --max-polls=30 --interval=2 $SUITE &
+done
+wait
+
+cylc db unreg -d "^${PREFIX}_.*"
diff --git a/dev/filewalk.py b/dev/filewalk.py
deleted file mode 100644
index 6bdbf2a..0000000
--- a/dev/filewalk.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-
-# How to traverse a directory tree using Python
-
-import os, re
-
-pwd = os.getcwd()
-match = os.path.join( 'CRAP', 'JUNK', 'foo', 'YYYYMMDDHH', 'foo', 'munge.*' )
-print match
-cpts = re.split( '/', match )
-
-pre = ''
-for cpt in cpts:
-    print '*', cpt
-    if re.search( 'YYYY', cpt ):
-        break
-    else:
-        pre = os.path.join( pre, cpt )
-
-print pre
-
-for root, dirs, files in os.walk( pre ):
-    print ':::', root, dirs, files
-
-    index = len( re.split('/', root ))
-    print index, cpts[index]
-
-    mfoo = re.sub( 'YYYYMMDDHH', '(\d{10})', cpts[index] )
-
-    ddirs = dirs
-    for dir in ddirs:
-        if not re.match( mfoo, dir ):
-            print 'rejecting', dir
-            dirs.remove( dir )
diff --git a/dev/suites/busy/suite.rc b/dev/suites/busy/suite.rc
index 6db7d8c..66e6af0 100644
--- a/dev/suites/busy/suite.rc
+++ b/dev/suites/busy/suite.rc
@@ -26,7 +26,7 @@ description = A suite of 1000 tasks per cycle
             """
 [runtime]
     [[root]]
-        script = sleep 1
+        command scripting = sleep 1
         [[[job submission]]]
             method = at
         [[[event hooks]]]
diff --git a/doc/changes-old.txt b/doc/changes-old.txt
index ce83f57..6d27034 100644
--- a/doc/changes-old.txt
+++ b/doc/changes-old.txt
@@ -472,7 +472,7 @@ by the runtime namespace hierarchy.
 + Asynchronous (non-cycling) suites can now be stopped and restarted
 mid-run.
 
-+ Clock-triggered tasks can now be triggered manually before their
++ Clock-trigger tasks can now be triggered manually before their
 normal trigger time is up.
 
 + Improved graph string validation: dangling arrows are now caught;
@@ -785,7 +785,7 @@ altered while the GUI was still running.
 ________________________________________________________________________
 4.5.1
 
-+ Task retry delays now work with clock-triggered tasks.
++ Task retry delays now work with clock-trigger tasks.
 
 + Cleaned up use of Pyro connection timeouts. Prior to 4.5.0 there was
 no timeout. At 4.5.0 we set a 1 seconds default, but depending on
diff --git a/doc/changes.html b/doc/changes.html
index 4cab9d8..6eb37a6 100644
--- a/doc/changes.html
+++ b/doc/changes.html
@@ -16,113 +16,420 @@
 <div class="rbox">
 <h3 style="margin:10px">versions</h3>
 <ul>
-    <li><a href="#">TOP</a></li>
-    <li><a href="#6.4.1">6.4.1</a></li>
-    <li><a href="#6.4.0">6.4.0</a></li>
-    <li><a href="#6.3.1">6.3.1</a></li>
-    <li><a href="#6.3.0">6.3.0</a></li>
-    <li><a href="#6.2.0">6.2.0</a></li>
-    <li><a href="#6.1.2">6.1.2</a></li>
-    <li><a href="#6.1.1">6.1.1</a></li>
-    <li><a href="#6.1.0">6.1.0</a></li>
-    <li><a href="#6.0.2">6.0.2</a></li>
-    <li><a href="#6.0.1">6.0.1</a></li>
-    <li><a href="#6.0.0">6.0.0</a></li>
-    <li><a href="#5.4.14">5.4.14</a></li>
-    <li><a href="#5.4.13">5.4.13</a></li>
-    <li><a href="#5.4.12">5.4.12</a></li>
-    <li><a href="#5.4.11">5.4.11</a></li>
-    <li><a href="#5.4.10">5.4.10</a></li>
-    <li><a href="#5.4.9">5.4.9</a></li>
-    <li><a href="#5.4.8">5.4.8</a></li>
-    <li><a href="#5.4.7">5.4.7</a></li>
-    <li><a href="#5.4.6">5.4.6</a></li>
-    <li><a href="#5.4.5">5.4.5</a></li>
-    <li><a href="#5.4.4">5.4.4</a></li>
-    <li><a href="#5.4.3">5.4.3</a></li>
-    <li><a href="#5.4.2">5.4.2</a></li>
-    <li><a href="#5.4.1">5.4.1</a></li>
-    <li><a href="#5.4.0">5.4.0</a></li>
-    <li><a href="#5.3.0">5.3.0</a></li>
-    <li><a href="#5.2.0">5.2.0</a></li>
-    <li><a href="#5.1.1">5.1.1</a></li>
-    <li><a href="#5.1.0">5.1.0</a></li>
-    <li><a href="#5.0.3">5.0.3</a></li>
-    <li><a href="#5.0.2">5.0.2</a></li>
-    <li><a href="#5.0.1">5.0.1</a></li>
-    <li><a href="#5.0.0">5.0.0</a></li>
-    <li><a href="#old">old</a></li>
+    <li><a href="#">TOP</a>
+    <li><a href="#6.7.4">6.7.4</a>
+    <li><a href="#6.7.3">6.7.3</a>
+    <li><a href="#6.7.2">6.7.2</a>
+    <li><a href="#6.7.1">6.7.1</a>
+    <li><a href="#6.7.0">6.7.0</a>
+    <li><a href="#6.6.1">6.6.1</a>
+    <li><a href="#6.6.0">6.6.0</a>
+    <li><a href="#6.5.0">6.5.0</a>
+    <li><a href="#6.4.1">6.4.1</a>
+    <li><a href="#6.4.0">6.4.0</a>
+    <li><a href="#6.3.1">6.3.1</a>
+    <li><a href="#6.3.0">6.3.0</a>
+    <li><a href="#6.2.0">6.2.0</a>
+    <li><a href="#6.1.2">6.1.2</a>
+    <li><a href="#6.1.1">6.1.1</a>
+    <li><a href="#6.1.0">6.1.0</a>
+    <li><a href="#6.0.2">6.0.2</a>
+    <li><a href="#6.0.1">6.0.1</a>
+    <li><a href="#6.0.0">6.0.0</a>
+    <li><a href="#5.4.14">5.4.14</a>
+    <li><a href="#5.4.13">5.4.13</a>
+    <li><a href="#5.4.12">5.4.12</a>
+    <li><a href="#5.4.11">5.4.11</a>
+    <li><a href="#5.4.10">5.4.10</a>
+    <li><a href="#5.4.9">5.4.9</a>
+    <li><a href="#5.4.8">5.4.8</a>
+    <li><a href="#5.4.7">5.4.7</a>
+    <li><a href="#5.4.6">5.4.6</a>
+    <li><a href="#5.4.5">5.4.5</a>
+    <li><a href="#5.4.4">5.4.4</a>
+    <li><a href="#5.4.3">5.4.3</a>
+    <li><a href="#5.4.2">5.4.2</a>
+    <li><a href="#5.4.1">5.4.1</a>
+    <li><a href="#5.4.0">5.4.0</a>
+    <li><a href="#5.3.0">5.3.0</a>
+    <li><a href="#5.2.0">5.2.0</a>
+    <li><a href="#5.1.1">5.1.1</a>
+    <li><a href="#5.1.0">5.1.0</a>
+    <li><a href="#5.0.3">5.0.3</a>
+    <li><a href="#5.0.2">5.0.2</a>
+    <li><a href="#5.0.1">5.0.1</a>
+    <li><a href="#5.0.0">5.0.0</a>
+    <li><a href="#old">old</a>
 </ul>
 </div>
 
-<p>Some minor enhancements and bug fixes may be omitted from this page.
-For the definitive record see the cylc git repository change log.</p>
+<p>Selected enhancements and bug fixes.  Some changes may be omitted from this
+page; for the definitive record see the cylc repository change log.</p>
 
-<a name="6.4.1"/>
-    <h2>6.4.1</h2>
+<a name="6.7.4"/>
+    <h2>6.7.4</h2>
 
-		<h3>6.4.1 enhancements</h3>
+    <h3>6.7.4 enhancements</h3>
+    <ul>
+        <li> <code>cylc monitor</code> - sort tasks by definition order or
+        alphanumeric order.  The default can be set in global config, and
+        overridden by command line option.
+        <li> <code>gcylc</code> - friendlier message dialog for remote suite
+        port file not found, which normally just indicates that the suite is
+        not running: "Port file not found - suite not running?".
+        <li> configurable retries for registration of job logs in suite runtime
+        databases, to handle batch systems that don't immediately write job
+        logs to their final locations.
+        <li> configurable job log retrieval command. The default is
+            <code>rsync -a</code>.
+    </ul>
 
-		<li> Suite and task URLs, browseable from <code>gcylc</code> and <code>cylc
-				doc</code>.
+    <h3>6.7.4 fixes</h3>
+    <ul>
+        <li> <code>gcylc</code> - restored job poll and kill result
+        notification in the text view message column.
+        <li> suite visualization - don't plot graph nodes beyond a suite final
+        cycle point.
+        <li> strip any leading whitespace from <code>cylc broadcast</code>
+        settings, which could lead to a broadcast setting not being recognized
+        as the same setting as its target item.
+        <li> Safer write to <code>NN/job-activity.log</code>. It was previously
+        possible, though unlikely, on very slow filesystems for the suite
+        daemon to attempt to write to the job activity log while the "NN" link
+        (to the log directory for the latest job submit number) was still being
+        created by the job submission process.
+    </ul>
 
-		<li> Suite "scripting" items can be shortened: <code>command
-				scripting</code> to <code>script</code>; <code>post-command
-				scripting</code> to <code>post-script</code>; etc.
 
-		<li> Filtering of graph nodes by <code>cylc graph --filter</code>.
+<a name="6.7.3"/>
+    <h2>6.7.3</h2>
 
-		<li> <code>cylc graph</code> and <code>gcylc</code> graph view: nodes that
-		are filtered out are now grouped and replaced by dotted "scissor nodes"
-		rather than breaking the graph into separate chunks.
+    <h3>6.7.3 enhancements</h3>
+    <ul>
+	    <li> Better diagnostics for remote job submission problems.
+	  <div style="background:yellow">Note that any
+	    output printed from non-interactive shell login scripts such as
+	    <code>.bashrc</code> and <code>.tcshrc</code> will break remote job
+	    submission - this is due to a known deficiency of
+	    <code>scp</code>.</div>
+    </ul>
+
+    <h3>6.7.3 fixes</h3>
+    <ul>
+	    <li> Restored the right-click group/ungroup menu in the <code>cylc
+		    graph</code> viewer (broken at 6.7.2).
+	    <li> Fixed delayed 
+	    <code>gcylc</code> text view update after changing task filter
+	    settings (broken at 6.7.0).
+    </ul>
+
+	
+<a name="6.7.2"/>
+    <h2>6.7.2</h2>
 
-		<li> <code>gcylc</code> text tree view,
+    <h3>6.7.2 enhancements</h3>
 		<ul>
-				<li> display job submit number, retry number, and retry delay interval,
-				in the "latest message" column.
-				<li> display [user@]host and batch system name even after job submission
-				failure.
-				<li> report failure of poll and kill commands (e.g. due to task host down) in
-				the "latest message" column.
+				<li> You can now live "tail-follow" local and remote suite and task job
+				logs in the terminal with <code>cylc cat-log --tail</code>.
+				<li> <code>cylc suite-state</code> can now take a custom cycle point
+				format if the target suite runs a different format.  Plus a
+				corresponding option for automatic suite-polling tasks.
+				<li> Configurable job log tailer command templates, for task hosts with
+				non Gnu-compatible 'tail' and 'ps' commands.
 		</ul>
+	
+    <h3>6.7.2 fixes</h3>
+		<ul>
+				<li> Restored validation on hosts with graphing disabled.
+				<li> Restored backward compatibility with pre-6.5.0 suite clients
+				(principally for any old versions of <code>cylc scan</code> still in
+				use).
+				<li> Enabled host-specific task event handler retry delays (global
+				config).
+				<li> A sensible validation error for a missing suite graph.
+				<li> (AIX hosts) Fixed polling of loadleveler jobs (broken since 6.7.0)
+				<li> (AIX hosts) Tail-following task job logs can now be made to work
+				on AIX hosts via a custom job log tailer command template (documented).  
+		</ul>
+	
+<a name="6.7.1"/>
+    <h2>6.7.1</h2>
 
-		<li> Improved documentation of suite share and task work directory location
-		and use.
+    <h3>6.7.1 fixes</h3>
+		<ul>
+				<li> Avoid remote "tail" processes left behind by the GUI remote job
+				log viewer (since 6.7.0).
+				<li> gcylc GUI - fixed a bug that under heavy load conditions could
+				lead to warning dialogs (non-fatal) associated with stopping the
+				progress bar.
+				<li> Use remote job log retrieval retries as configured.
+				<li> Fixed a problem (since 6.7.0) with updating CYLC_DIR for remote
+				hosts.
+				<li> Fixed a problem that could affect the ignoring of pre-initial
+				triggers (i.e. of dependence on tasks prior to the initial cycle point)
+				in conditional trigger expressions.
+				<li> Better error messages, for bad suite visualization config and bad
+				use of the "cylc cat-log" command.
+    </ul>
+ 
+<a name="6.7.0"/>
+    <h2>6.7.0</h2>
 
-		<li> Better error message for job submission failure due to choice of a
-		batch system that is not available on the task host.
+    <h3>6.7.0 enhancements</h3>
+    <ul>
+        <li> Suite identity requests from <code>cylc scan</code> and <code>cylc
+        gscan</code> are now only logged in debug mode, or in aggregrate if
+        exceeding 1 connection per second over an hour.
+        <li> Suite database interaction further optimized to speed up elapsed
+        times for fast-running suites.
+        <li> Built-in task event handlers are now grouped where possible - e.g.
+        to generate a single email for multiple task failures; and to group
+        rsync commands by host for remote job log retrieval.
+        <li> Job submit, job poll, and job kill actions are now grouped by
+        user at host
+        <li> 'background' job submission now detaches with <code>nohup</code> -
+                this should prevent creation of zombie processes (but it may
+                prevent use of 'background' jobs on remote hosts that don't
+                permit non terminal-attached processes on login nodes - submit
+                jobs to 'at' or your batch scheduler instead). 
+        <li> Polling a task now returns custom "message outputs" as well as job
+        status.
+        <li> <code>gcylc</code> can now tail-follow remote job log files.
+        <li> If your batch scheduler - e.g. PBS - hides the final job logs
+        until completion, <code>gcylc</code> and <code>cylc cat-log</code> can
+        now be configured to view job out and err files using special commands
+        while the job is running.
+        <li> Allow suite-state polling even if the target suite database does
+        not exist yet.
+        <li> Improved reporting of Jinja2 errors.
+        <li> The <code>cylc gscan</code> GUI (a.k.a. <code>cylc
+            gsummary</code>) can now display most of the information now
+        available from <code>cylc scan</code> (and more to come).
+        <li> Prompt for confirmation before actioning a job kill.
+        <li> New gcylc.rc configuration option to start <code>gcylc</code> with
+        side-by-side suite views.
+    </ul>
+ 
+    <h3>6.7.0 fixes</h3>
+    <ul>
+        <li> Fixed a bug that could cause a KeyError in the gcylc text tree
+        view.
+        <li> Corrected emacs cylc-mode installation instructions.
+        <li> Prevented a gcylc abort caused by clicking on the task state
+        filtering icon before any suite is connected.
+        <li> Corrected our (new-ish) support for <b>moab</b> job submission.
+        <li> Prevent occasional job submissions after a suite shutdown is
+        ordered.
+    </ul>
+ 
+<a name="6.6.1"/>
+    <h2>6.6.1</h2>
+
+    <h3>6.6.1 fixes</h3>
+    <ul>
+        <li> At sites using ssh task messaging, 
+        prevent fall-through to Pyro RPC messaging after successful ssh
+        messaging (this would cause error messages in task output since
+        cylc-6.5.0, if using ssh messaging)
+        <li> Clearer error messages for invalid <code>cylc broadcast</code>
+        commands.
+        <li> Allow suite and global config files to have different
+        date-time syntax (pre and post ISO-8601) for time intervals.
+    </ul>
+ 
+<a name="6.6.0"/>
+    <h2>6.6.0</h2>
+
+    <h3>6.6.0 enhancements</h3>
+    <ul>
+        <li> <b>Finer-grained authentication to suite daemons:</b>
+        <ul>
+            <li> All suites now reveal identity (name and owner) publicly.
+            <li> Public access can also be granted, via suite or global config,
+            to suite titles and descriptions, state total summaries, full
+            read-only information, and shutdown priveleges.
+            <li> Possession of the suite passphrase is still required for full
+            control access (in the future, suite passphrases will be replaced
+            with proper user accounts in some form).
+        </ul>
+        <li> <b>Improved <code>cylc scan</code></b>: for all 6.6.0+ suites on
+        scanned hosts, scanning is faster, and suite identity can be printed
+        along with any other information revealed pubicly including state total
+        summaries.
+        <li> <b>Edit run (<code>cylc trigger --edit</code>) now prompts
+        for confirmation</b> before triggering the edited task.
+        <li> <b>Reduced memory use</b> by up to a factor of 2: default task runtime
+        settings are now shared instead of duplicated across tasks.
+        <li> <code>cylc gsummary</code> columns are now resizable again.
+        <li> <b>Bad global config files are no longer silently ignored</b>: a
+        bad site file (which users can't change) is ignored with a warning, and
+        a bad user config file causes cylc to abort.
+        <li> Message outputs are no longer automatically completed when a task
+        finishes, so <b>different message outputs can now be used to trigger
+            different workflows</b>.
+        <li> <b>Task event hook defaults</b> can now be set in global (site,
+        user) config files (as for suite event hooks in 6.5.0).
+        <li> Added a new example to the User Guide "Advanced Cycling" section,
+        on how to run R1 tasks in the final cycle point.
+    </ul>
+
+    <h3>6.6.0 fixes</h3>
+    <ul>
+        <li> Fail validation if the suite cycle point format is not
+        sufficiently fine-grained for the suite's cycling sequences (e.g.
+        if you try to do daily cycling with cycle point format truncated
+        to month (%Y-%m).
+        <li> Fixed a rare bug that could cause a suite daemon to crash due
+        if a new broadcast command was actioned at the same time as the suite 
+        was writing broadcast settings to the state dump.
+        <li> Fixed insertion of a task family into a suite, broken at 6.5.0.
+    </ul>
 
-		<li> Documented use of global (site and user) job-init scripts.
 
+<a name="6.5.0"/>
+    <h2>6.5.0</h2>
 
-		<h3>6.4.1 fixes</h3>
+    <h3>6.5.0 enhancements</h3>
+    <ul>
+        <li> Detect and fail cyclic graphs.
+
+        <li> Set the default process pool size to 4, rather than number of
+        cores on the suite host (global config).
+
+        <li> General external event triggers: tasks can be triggered by
+        external events as well as by other tasks and the wall clock. The
+        external triggering system only needs to know the suite and host name.
+
+        <li> Log the origin of all suite commands: username, host, client
+        program name and UUID.
+
+        <li> Expiring tasks - tasks that become ready to run too far behind the
+        wall clock (this is optional and configurable) can be made to enter the
+        'expired' state and not submit their job.  Other tasks can trigger off
+        the expired state.
+
+        <li> General task event handlers: specify handlers and a list of events
+        to call them on. This is in addition to current event-specific handler
+        config. Built-in:
+        <ul>
+            <li> Remote task job logs retrieval.
+            <li> Task event email.
+            <li> Task event handler can now be script templates.
+        </ul>
 
-		<li> Validate final cycle point after command line override (if supplied), not before.
+        <li> Event handlers can now retry on failure.
 
-		<li> Prevent <code>cylc gsummary</code> crashing if it can't get state
-		info from a suite because (for e.g.) the suite run directory has been
-		deleted.
+        <li> Allow setting of suite event hooks (under <code>[cylc]</code>) in
+        global (site/user) config.
+        <li> gcylc
+        <ul>
+            <li> active progress bars during suite initialisation, reload, and shutdown.
+            <li> tree view - task completion progress bars based on mean run times.
+        </ul>
+        <li> Configurable suite hold time.
+        <li> More information logged during suite reloads.
+        <li> Lock-breaking recovery mechanism for a public suite database.
+        locked by an external process.
+        <li>  <code>cylc suite-state</code> - allow use of
+        <code>$CYLC_TASK_CYCLE_POINT</code> and cycle point offsets.
+        <li> gcylc graph view - suite daemon caches graph edges for better
+        performance.
+        <li> <code>cylc stop --kill</code>
+        <ul>
+            <li> if a job kill fails, poll the job to see if it was
+            hard-killed externally
+            <li> shut down the suite even if the job still exists but cannot be
+            killed.
+        </ul>
+    </ul>
 
-		<li> Don't let <code>cylc broadcast</code> to an invalid cycle point bring
-		down the suite.
 
-		<li> Clock-triggering syntax: allow the time offset to be omitted
-		completely if no offset is required.
+    <h3>6.5.0 fixes</h3>
+    <ul>
+        <li> Allow custom Jinja2 filter module names ending in 'y' or 'p'.
+        <li> Fixed "Stopping at None" gcylc status bar message for suites
+        restarted with no final cycle point.
+        <li> Disallow attempted use of both runhead limiting mechanisms at once
+        (the older <code>runahead limit</code> and newer <code>max active cycle
+            points</code>).
+        <li> Fixed errors in gcylc and monitor due to attempted connection during
+        suite initialization.
+				<li> Fixed a bug in the cylc-6.4.1 job kill mechanism: it reported "job
+				killed failed" even on a successful kill. A killed 'running' task would
+				still enter the 'failed' state due to task runtime error trapping, but
+				a killed 'submitted' task would stay in the 'submitted' state.
+    </ul>
 
-		<li> Better error message for a probable missing R1 graph section, if an
-		initial cycle time is given for a non-cycling graph.
 
-		<li> Prevent a DatabaseIntegrityError warning when an old task is
-		re-inserted into a suite.
+<a name="6.4.1"/>
+    <h2>6.4.1</h2>
 
-		<li> Corrected some command documentation for the poll, kill, hold,
-		release, and reset commands. 
+    <h3>6.4.1 enhancements</h3>
+    <ul>
 
-		<li> <code>gcylc</code> graph view: fixed filtering of family base nodes.
+    <li> Suite and task URLs, browseable from <code>gcylc</code> and <code>cylc
+        doc</code>.
 
-		<li> Fixed broadcast of float values (e.g. timeout settings).
+    <li> Suite "scripting" items can be shortened: <code>command
+        scripting</code> to <code>script</code>; <code>post-command
+        scripting</code> to <code>post-script</code>; etc.
 
-		<li> Fail validation of mismatched parentheses in the graph.
+    <li> Filtering of graph nodes by <code>cylc graph --filter</code>.
+
+    <li> <code>cylc graph</code> and <code>gcylc</code> graph view: nodes that
+    are filtered out are now grouped and replaced by dotted "scissor nodes"
+    rather than breaking the graph into separate chunks.
+
+    <li> <code>gcylc</code> text tree view,
+    <ul>
+        <li> display job submit number, retry number, and retry delay interval,
+        in the "latest message" column.
+        <li> display [user@]host and batch system name even after job submission
+        failure.
+        <li> report failure of poll and kill commands (e.g. due to task host down) in
+        the "latest message" column.
+    </ul>
+
+    <li> Improved documentation of suite share and task work directory location
+    and use.
+
+    <li> Better error message for job submission failure due to choice of a
+    batch system that is not available on the task host.
+
+    <li> Documented use of global (site and user) job-init scripts.
+    </ul>
+
+
+    <h3>6.4.1 fixes</h3>
+    <ul>
+
+    <li> Validate final cycle point after command line override (if supplied), not before.
+
+    <li> Prevent <code>cylc gsummary</code> crashing if it can't get state
+    info from a suite because (for e.g.) the suite run directory has been
+    deleted.
+
+    <li> Don't let <code>cylc broadcast</code> to an invalid cycle point bring
+    down the suite.
+
+    <li> Clock-triggering syntax: allow the time offset to be omitted
+    completely if no offset is required.
+
+    <li> Better error message for a probable missing R1 graph section, if an
+    initial cycle time is given for a non-cycling graph.
+
+    <li> Prevent a DatabaseIntegrityError warning when an old task is
+    re-inserted into a suite.
+
+    <li> Corrected some command documentation for the poll, kill, hold,
+    release, and reset commands.
+
+    <li> <code>gcylc</code> graph view: fixed filtering of family base nodes.
+
+    <li> Fixed broadcast of float values (e.g. timeout settings).
+
+    <li> Fail validation of mismatched parentheses in the graph.
+    </ul>
 
 
 <a name="6.4.0"/>
@@ -163,10 +470,10 @@ For the definitive record see the cylc git repository change log.</p>
         <li> If gcylc is connected to a remote suite, allow it to view the logs
         of tasks on the (remote) suite host.
     </ul>
- 
+
 <a name="6.3.1"/>
     <h2>6.3.1</h2>
-  		<ul>
+      <ul>
             <li> Support use of conditional dependencies involving the initial
             cycle point.
             <li> Create missing parts of the suite run directory tree if
@@ -184,85 +491,85 @@ For the definitive record see the cylc git repository change log.</p>
 <a name="6.3.0"/>
     <h2>6.3.0</h2>
 
-		<h3>6.3.0 enhancements</h3>
-  		<ul>
-					<li> Support <em>edit run</em> functionality:
-					<code>cylc trigger --edit</code> or
-					right-click on a task in the GUI to edit the task job script
-					just before manual triggering.
-					<li> Support "hold after cycle point" via the run, restart, and hold
-					commands.
-					<li> New utility to compare suite graphs <code>cylc
-							graph-diff</code>.
-					<li> If a suite run database lock is encountered and does not
-					resolve in the allowed time (unlikely but possible) cylc now writes
-					any un-executed db operations to disk before shutdown and loads them
-					on restart to prevent an incomplete database.
-			</ul>
-	
-		<h3>6.3.0 fixes</h3>
-  		<ul>
-					<li> Fixed blank graph views that could result from rerunning a
-					suite from an existing GUI instance.
-					<li> Hold tasks that are killed manually, to prevent an immediate
-					retry (if retries are configured).
-					<li> Ensure that polling a SLURM job returns the right result even
-					if the job is still in SLURM's active job memory.
-					<li> Sort the GUI treeview execution time (dT) column, which is
-					displayed in ISO 8601 format, by time-value rather than
-					alphanumerically.
-			</ul>
-	
+    <h3>6.3.0 enhancements</h3>
+       <ul>
+          <li> Support <em>edit run</em> functionality:
+          <code>cylc trigger --edit</code> or
+          right-click on a task in the GUI to edit the task job script
+          just before manual triggering.
+          <li> Support "hold after cycle point" via the run, restart, and hold
+          commands.
+          <li> New utility to compare suite graphs <code>cylc
+              graph-diff</code>.
+          <li> If a suite run database lock is encountered and does not
+          resolve in the allowed time (unlikely but possible) cylc now writes
+          any un-executed db operations to disk before shutdown and loads them
+          on restart to prevent an incomplete database.
+      </ul>
+
+    <h3>6.3.0 fixes</h3>
+      <ul>
+          <li> Fixed blank graph views that could result from rerunning a
+          suite from an existing GUI instance.
+          <li> Hold tasks that are killed manually, to prevent an immediate
+          retry (if retries are configured).
+          <li> Ensure that polling a SLURM job returns the right result even
+          if the job is still in SLURM's active job memory.
+          <li> Sort the GUI treeview execution time (dT) column, which is
+          displayed in ISO 8601 format, by time-value rather than
+          alphanumerically.
+      </ul>
+
 <a name="6.2.0"/>
     <h2>6.2.0</h2>
 
-		<h3>6.2.0 enhancements</h3>
-  		<ul>
-					<li> Task state and name filtering now applies to all
-					<code>gcylc</code> views (previously it was just the text tree view).
-					<li> Optimized <code>gcylc</code> tree view updating - it is now
-					faster and smoother for very large active suites.
-					<li> Sort <code>gcylc</code> tree view date-time columns by date-time
-					value rather than as character strings.
-					<li> Added an option to <b>poll all active tasks</b> at once (via
-					the <code>gcylc</code> menu and the <code>cylc poll</code> command).
-					<li> Write parsed suite definitions, date-stamped, to the suite log
-					directory, on run, reload, and restart.
-					<li> A display-once option for the <code>cylc monitor</code>
-					in-terminal suite state display.
-					<li> Cleaned up the GUI suite shutdown dialog window.
-					<li> Improved error trapping for SLURM jobs.
-					<li> Added documentation of special/restricted/cold-start-only
-					initial cycle point behaviour, to the user guide.
-					<li> Accept ISO 8601 syntax for global configuration of task
-					messaging retry intervals.
-					<li> Fail validation of tasks with no explicit cycling defined (since
-					cylc-6.0.0, cycle point offsets in the graph no longer define 
-					cycling sequences for a task).
-					<li> New <em>extra large</em> dot size option for <code>gcylc</code>
-					task state icons (configured via <code>$HOME/.cylc/gcylc.rc</code>).
-					<li> Detect and fail circular runtime inheritance.
-					<li> Fail validation if a suite's initial cycle point is greater than
-					the final.
-					<li> Generate "graph movie" frames via a gcylc graph view menu option.
-			</ul>
-  	
-		<h3>6.2.0 fixes</h3>
-  		<ul>
-					<li> Fixed a bug (since 6.0.0) where jobs submitted to a batch
-					scheduler would have no output, error, or name directives, if no
-					directives were specified in the suite definition. 
-					<li> In simulation mode, properly stop the simulated submission of
-					jobs at suite shutdown.
-					<li> Fixed hold and release of retrying tasks.
-					<li> Fixed <code>cylc gpanel</code> start-up (broken in 6.1.2).
-			</ul>
-  
+    <h3>6.2.0 enhancements</h3>
+      <ul>
+          <li> Task state and name filtering now applies to all
+          <code>gcylc</code> views (previously it was just the text tree view).
+          <li> Optimized <code>gcylc</code> tree view updating - it is now
+          faster and smoother for very large active suites.
+          <li> Sort <code>gcylc</code> tree view date-time columns by date-time
+          value rather than as character strings.
+          <li> Added an option to <b>poll all active tasks</b> at once (via
+          the <code>gcylc</code> menu and the <code>cylc poll</code> command).
+          <li> Write parsed suite definitions, date-stamped, to the suite log
+          directory, on run, reload, and restart.
+          <li> A display-once option for the <code>cylc monitor</code>
+          in-terminal suite state display.
+          <li> Cleaned up the GUI suite shutdown dialog window.
+          <li> Improved error trapping for SLURM jobs.
+          <li> Added documentation of special/restricted/cold-start-only
+          initial cycle point behaviour, to the user guide.
+          <li> Accept ISO 8601 syntax for global configuration of task
+          messaging retry intervals.
+          <li> Fail validation of tasks with no explicit cycling defined (since
+          cylc-6.0.0, cycle point offsets in the graph no longer define
+          cycling sequences for a task).
+          <li> New <em>extra large</em> dot size option for <code>gcylc</code>
+          task state icons (configured via <code>$HOME/.cylc/gcylc.rc</code>).
+          <li> Detect and fail circular runtime inheritance.
+          <li> Fail validation if a suite's initial cycle point is greater than
+          the final.
+          <li> Generate "graph movie" frames via a gcylc graph view menu option.
+      </ul>
+
+    <h3>6.2.0 fixes</h3>
+      <ul>
+          <li> Fixed a bug (since 6.0.0) where jobs submitted to a batch
+          scheduler would have no output, error, or name directives, if no
+          directives were specified in the suite definition.
+          <li> In simulation mode, properly stop the simulated submission of
+          jobs at suite shutdown.
+          <li> Fixed hold and release of retrying tasks.
+          <li> Fixed <code>cylc gpanel</code> start-up (broken in 6.1.2).
+      </ul>
+
 <a name="6.1.2"/>
     <h2>6.1.2</h2>
 
-		<h3>6.1.2 enhancements</h3>
-  		<ul>
+    <h3>6.1.2 enhancements</h3>
+      <ul>
                 <li> In the <code>cylc gsummary</code> GUI, suite state
                 summaries can now be expanded to show the summary state for
                 each active cycle point.
@@ -271,9 +578,9 @@ For the definitive record see the cylc git repository change log.</p>
                 daemon.
                 <li> Updated some example suites to cylc-6 format.
         </ul>
-    
+
         <h3>6.1.3 fixes</h3>
-  		<ul>
+      <ul>
             <li> Fixed the <code>suite host self-identification = address</code>
             global config setting (it causes the suite host to be identified
             by IP address rather than host name in task job scripts).
@@ -286,109 +593,109 @@ For the definitive record see the cylc git repository change log.</p>
 <a name="6.1.1"/>
     <h2>6.1.1</h2>
 
-		<h3>6.1.1 enhancements</h3>
-  		<ul>
-				<li> new restricted monitoring mode for very large suites (by command
-				line option) for <code>cylc gui</code> and <code>cylc monitor</code>
-				<li> display clock triggers with finer resolution than the suite cycle
-				point format, if necessary
-		</ul>
-    
-		<h3>6.1.1 fixes</h3>
-  		<ul>
-				<li> if reloading a held suite, don't set finished tasks to the held
-				state.
-				<li> fixed the <code>cylc list</code> command option for listing 
-				specific task instances between given cycle points
-				<li> prevent "database integrity" warnings when reloading tasks in the
-				'waiting' state
-				<li> renamed a potentially conflicting internal-use variable called
-				<code>$DOMAIN</code> in task job scripts
-				<li> other minor fixes
-		</ul>
-  	
+    <h3>6.1.1 enhancements</h3>
+      <ul>
+        <li> new restricted monitoring mode for very large suites (by command
+        line option) for <code>cylc gui</code> and <code>cylc monitor</code>
+        <li> display clock triggers with finer resolution than the suite cycle
+        point format, if necessary
+    </ul>
+
+    <h3>6.1.1 fixes</h3>
+      <ul>
+        <li> if reloading a held suite, don't set finished tasks to the held
+        state.
+        <li> fixed the <code>cylc list</code> command option for listing
+        specific task instances between given cycle points
+        <li> prevent "database integrity" warnings when reloading tasks in the
+        'waiting' state
+        <li> renamed a potentially conflicting internal-use variable called
+        <code>$DOMAIN</code> in task job scripts
+        <li> other minor fixes
+    </ul>
+
 <a name="6.1.0"/>
     <h2>6.1.0</h2>
-    
-		<h3>6.1.0 enhancements</h3>
+
+    <h3>6.1.0 enhancements</h3>
     <ul>
-				<li> Suite daemon memory footprint reduced by 7-8%.
-				<li> <code>- + % @</code> characters are now allowed in task names.
-				<li> Print more information about Jinja2 errors.
-				<li> String templates for overriding batch system specific commands
-				have changed.
-				<li> Updated syntax highlighting for the Kate and gedit editors.
-				<li> Show job batch system name in gcylc text view.
-				<li> If using the legacy cycle point format <code>%Y%m%d%H</code>,
-				allow <code>initial/final cycle point</code> in the same format. 
-				<li> The <code>cylc scan</code> command now scans multiple hosts, like
-				<code>cylc gsummary</code> as configured in site/user config files.
-				<li> There are now two suite-run databases: the original is for
-				read-only use by external processes; a new one under the suite state
-				directory is for private use by the suite daemon.
-				<li> Job submission improvements:
-				<ul>
-						<li> new site/user config item <code>cylc executable</code> allows
-						you to explicitly configure the location of cylc on remote hosts.
-						<li> job files now site/user configurable via
-						<code>$HOME/.cylc/job-init-env.sh</code>, 
-						<code>$CYLC_DIR/conf/job-init-env.sh</code>, 
-						<code>$CYLC_DIR/conf/job-init-env-default.sh</code>
-						<li> (i.e. we no longer rely on sourcing <code>/etc/profile</code>
-						and <code>$HOME/.profile</code> - but still done for back compat)
-						<li> job submission is now handled by a cylc command on the task
-						host rather than an elaborate shell command generated on the suite
-						host
-						<li> new site/user config item <code>remote copy template</code>
-						for configuration the <code>scp</code> command for copying files to
-						remote hosts
-						<li> the site/user config items <code>remote shell template</code>
-						and <code>use login shell</code> are now used for job submission.
-						<li> new site/user config item <code>suite to job
-								environment</code>: a list of environment variables to be
-						passed from a suite to its jobs.
-				</ul>
-		</ul>
-	
-		<h3>6.1.0 fixes</h3>
+        <li> Suite daemon memory footprint reduced by 7-8%.
+        <li> <code>- + % @</code> characters are now allowed in task names.
+        <li> Print more information about Jinja2 errors.
+        <li> String templates for overriding batch system specific commands
+        have changed.
+        <li> Updated syntax highlighting for the Kate and gedit editors.
+        <li> Show job batch system name in gcylc text view.
+        <li> If using the legacy cycle point format <code>%Y%m%d%H</code>,
+        allow <code>initial/final cycle point</code> in the same format.
+        <li> The <code>cylc scan</code> command now scans multiple hosts, like
+        <code>cylc gsummary</code> as configured in site/user config files.
+        <li> There are now two suite-run databases: the original is for
+        read-only use by external processes; a new one under the suite state
+        directory is for private use by the suite daemon.
+        <li> Job submission improvements:
+        <ul>
+            <li> new site/user config item <code>cylc executable</code> allows
+            you to explicitly configure the location of cylc on remote hosts.
+            <li> job files now site/user configurable via
+            <code>$HOME/.cylc/job-init-env.sh</code>,
+            <code>$CYLC_DIR/conf/job-init-env.sh</code>,
+            <code>$CYLC_DIR/conf/job-init-env-default.sh</code>
+            <li> (i.e. we no longer rely on sourcing <code>/etc/profile</code>
+            and <code>$HOME/.profile</code> - but still done for back compat)
+            <li> job submission is now handled by a cylc command on the task
+            host rather than an elaborate shell command generated on the suite
+            host
+            <li> new site/user config item <code>remote copy template</code>
+            for configuration the <code>scp</code> command for copying files to
+            remote hosts
+            <li> the site/user config items <code>remote shell template</code>
+            and <code>use login shell</code> are now used for job submission.
+            <li> new site/user config item <code>suite to job
+                environment</code>: a list of environment variables to be
+            passed from a suite to its jobs.
+        </ul>
+    </ul>
+
+    <h3>6.1.0 fixes</h3>
     <ul>
-				<li> Fixed crash after a reload that removes tasks from the suite
-				definition (required a restart to recover).
-				<li> Fixed display of new tasks in the gcylc dot and text views
-				after a suite reload that adds tasks to the suite definition.
-				<li> Fixed automatic upgrade by cylc-6 of cylc-5
-				<code>startup-task:finish</code> triggers.
-				<li> Fixed a bug introduced to gcylc at 6.0.2: the graph view
-				displayed nothing in grouped mode if all nodes were families.
-				<li> Fixed backward compatibility for cylc-5 <code>gcylc.rc</code>
-				files (was broken by the old cylc-5 'runahead' task state).
-		</ul>
-	
+        <li> Fixed crash after a reload that removes tasks from the suite
+        definition (required a restart to recover).
+        <li> Fixed display of new tasks in the gcylc dot and text views
+        after a suite reload that adds tasks to the suite definition.
+        <li> Fixed automatic upgrade by cylc-6 of cylc-5
+        <code>startup-task:finish</code> triggers.
+        <li> Fixed a bug introduced to gcylc at 6.0.2: the graph view
+        displayed nothing in grouped mode if all nodes were families.
+        <li> Fixed backward compatibility for cylc-5 <code>gcylc.rc</code>
+        files (was broken by the old cylc-5 'runahead' task state).
+    </ul>
+
 <a name="6.0.2"/>
     <h2>6.0.2</h2>
-    
+
     <p>Minor bug fixes and improvements since cylc-6.0.1</p>
     <ul>
-				<li> handle tasks in the cylc-5 <code>runahead</code> state properly,
-				when restarting from a cylc-5 suite, 
-				<li> fixed polling of tasks whose job submission failed
-				<li> fixed reporting of task log file locations after a suite reload
-				<li> fixed use of a family name regex in the <code>cylc release</code>
-				command.
-				<li> gcylc graph view: crop entire cycle points from the graph if they
-				contain only base nodes (i.e. nodes with no corresponding task proxy),
-				even if earlier cycle points are not empty (typically R1 tasks rendered
-				immortal by ongoing dependence on them)
-				<li> fixed job polling under the SGE batch scheduler
-				<li> stopped newly spawned tasks from running if the suite is held
+        <li> handle tasks in the cylc-5 <code>runahead</code> state properly,
+        when restarting from a cylc-5 suite,
+        <li> fixed polling of tasks whose job submission failed
+        <li> fixed reporting of task log file locations after a suite reload
+        <li> fixed use of a family name regex in the <code>cylc release</code>
+        command.
+        <li> gcylc graph view: crop entire cycle points from the graph if they
+        contain only base nodes (i.e. nodes with no corresponding task proxy),
+        even if earlier cycle points are not empty (typically R1 tasks rendered
+        immortal by ongoing dependence on them)
+        <li> fixed job polling under the SGE batch scheduler
+        <li> stopped newly spawned tasks from running if the suite is held
     </ul>
 
 <a name="6.0.1"/>
     <h2>6.0.1</h2>
-    
+
     <p>Minor bug fixes and improvements since cylc-6.</p>
     <ul>
-        <li><b>standardise cycle point formats</b> entered on the CLI for 
+        <li><b>standardise cycle point formats</b> entered on the CLI for
         commands such as <code>cylc kill</code>, so that the exact same format
         as the running suite is not required.
         <li><b>provide a workaround for
@@ -418,7 +725,7 @@ For the definitive record see the cylc git repository change log.</p>
 
 <a name="6.0.0"/>
     <h2>6.0.0</h2>
-    
+
     <p>6.0.0 saw the <b>ISO 8601 date-time cycling syntax refactor</b>, which
     introduced ISO 8601-derived replacement syntax for date-times, dependency
     sections, graph offsets, and timeouts, amongst other things. This allowed:
@@ -426,59 +733,59 @@ For the definitive record see the cylc git repository change log.</p>
     <ul>
         <li><b>specific task dependencies at any cycle point</b> - special
         behaviour at the last cycle point, the second cycle point, midway
-        through...</li>
+        through...
         <li><b>replacement of start-up and mixed-async tasks</b> with tasks
-        that run only at initial/near-initial cycle points</li>
+        that run only at initial/near-initial cycle points
         <li><b>extremely flexible cycling</b> - cycle <b>sub-hourly</b>, every
-        1 day 1 second, every Christmas...</li>
+        1 day 1 second, every Christmas...
         <li><b>360-day, 365-day, 366-day calendar support</b> for date-time
-        cycling</li>
-        <li>using prehistoric or futuristic date-times</li>
-        <li>formal time zone awareness</li>
+        cycling
+        <li>using prehistoric or futuristic date-times
+        <li>formal time zone awareness
         <li>specify a final cycle point relative to the initial cycle point,
-        e.g. 3 months later</li>
+        e.g. 3 months later
         <li>specify dependence on tasks near the initial cycle point from
-        other cycle points</li>
+        other cycle points
         <li>backwards compatibility with the cylc 5 syntax (now deprecated).
-        </li>
-        <li><b>cylc 5to6</b> command to help convert cylc 5 suites to cylc 6.</li>
+        
+        <li><b>cylc 5to6</b> command to help convert cylc 5 suites to cylc 6.
     </ul>
     <p>Other changes:</p>
     <ul>
         <li><b>work/ and log/job restructure</b> - now based around CYCLE
-        subdirectories</li>
+        subdirectories
         <li><b>new <samp>log/job/CYCLE/TASK_NAME/SUBMIT_NUM/job-activity.log
         </samp> file</b> to store a job's submission, poll, kill, and event
-        hook output</li>
+        hook output
         <li><b>rename site.rc and user.rc configs to <samp>global.rc</samp>
-        </b></li>
+        </b>
         <li><b>runahead task pool</b> - tasks that used to be in the
         <em>runahead</em> state are now in a separate internal pool and
-        aren't visible in the GUI</li>
-        <li><b>use processes instead of threads</b></li>
+        aren't visible in the GUI
+        <li><b>use processes instead of threads</b>
         <li><b>use ~30% less CPU time</b> for busy suites compared to 5.4.14
-        </li>
+        
         <li><b>integer cycling</b> - cycle over integers, not just date-times
         <li><b>cycle point subgraphs</b> - support grouping by cycle point in
-        cylc graph and the cylc gui Graph View</li>
-        <li><b>cylc gui family state icon</b></li>
-        <li><b>cylc gui text view layout improvement</b></li>
+        cylc graph and the cylc gui Graph View
+        <li><b>cylc gui family state icon</b>
+        <li><b>cylc gui text view layout improvement</b>
         <li><b>remove async repeating tasks</b> in favour of integer cycling
-        solutions</li>
+        solutions
         <li>export <samp>CYLC_TASK_SUBMIT_NUMBER</samp> for use in task
-        scripts</li>
-        <li>(developer) automated Travis testing support</li>
-        <li>implicit cycling is no longer supported</li>
+        scripts
+        <li>(developer) automated Travis testing support
+        <li>implicit cycling is no longer supported
         <li>more flexible runahead limiting algorithm, based around a maximum
-        number of active cycle points</li>
+        number of active cycle points
         <li>deprecated <samp>--tag</samp> options to some cylc commands in
-        favour of <samp>--point</samp></li>
-        <li>deprecated cylc cycletime in favour of cylc cycle-point</li>
-        <li>improved vim syntax file</li>
-        <li>fix passphrase creation on reregister</li>
-        <li>remove lockserver</li>
-        <li>cylc scan: default timeout</li>
-        <li>fix handling of missing nested include-files</li>
+        favour of <samp>--point</samp>
+        <li>deprecated cylc cycletime in favour of cylc cycle-point
+        <li>improved vim syntax file
+        <li>fix passphrase creation on reregister
+        <li>remove lockserver
+        <li>cylc scan: default timeout
+        <li>fix handling of missing nested include-files
     </ul>
 
 <a name="5.4.14"/>
@@ -493,612 +800,612 @@ For the definitive record see the cylc git repository change log.</p>
         cold-start OR construct is not used with the inter-cycle
         triggers, restarting the suite would wipe out the inter-cycle
         triggers
-        <li> bug fix: task job scripts could infrequently be corrupted 
+        <li> bug fix: task job scripts could infrequently be corrupted
         if suite and task host shared a common filesystem
         <li> bug fix: manually retriggering a task now resets its
         outputs to not completed, so that a dependent downstream task,
-        if reset to 'waiting', will wait on it again 
+        if reset to 'waiting', will wait on it again
         <li> maintain Jinja2 validity during inlined edit sessions (when
         include-files are temporarily inlined in the live suite
         definition)
     </ul>
 
-	
+
 <a name="5.4.13"/>
-		<h2>5.4.13</h2>
+    <h2>5.4.13</h2>
 
-		<ul>
-				<li>bug fix: reloading the suite definition after changes was
-				causing tasks in the 'retrying' state at reload time not to
-				resubmit.</li>
-				<li>If an OSError or IOError occurs when attempting to write a
-				state dump file, cylc will retry up to 5 times before shutting
-				down the suite.</li> 
-				<li>full support - including job poll and kill - for the SLURM
-				resource manager.</li>
-				<li>timeout after 10 sec if network problems prevent cylc from
-				coping the suite environment file to task hosts on suite
-				restart.</li>
-		</ul>
+    <ul>
+        <li>bug fix: reloading the suite definition after changes was
+        causing tasks in the 'retrying' state at reload time not to
+        resubmit.
+        <li>If an OSError or IOError occurs when attempting to write a
+        state dump file, cylc will retry up to 5 times before shutting
+        down the suite.
+        <li>full support - including job poll and kill - for the SLURM
+        resource manager.
+        <li>timeout after 10 sec if network problems prevent cylc from
+        coping the suite environment file to task hosts on suite
+        restart.
+    </ul>
 
 <a name="5.4.12"/>
-		<h2>5.4.12</h2>
+    <h2>5.4.12</h2>
 
-		<ul>
-				<li>A choice of three sizes for the gcylc task state dot icon:
-				<em>small</em> (original), <em>medium</em> (new default),
-				and <em>large</em>.</li>
+    <ul>
+        <li>A choice of three sizes for the gcylc task state dot icon:
+        <em>small</em> (original), <em>medium</em> (new default),
+        and <em>large</em>.
 
-				<li>Added documentation of <code>gcylc.rc</code> configuration
-				items to the user guide.</li>
+        <li>Added documentation of <code>gcylc.rc</code> configuration
+        items to the user guide.
 
-				<li>The default ordering of tasks and families in the gcylc text
-				and dot views is now the order in which they are defined in the
-				<code>suite.rc</code> file. </li>
+        <li>The default ordering of tasks and families in the gcylc text
+        and dot views is now the order in which they are defined in the
+        <code>suite.rc</code> file. 
 
-				<li>Fixed a bug which led to a <code>state</code> file in the
-				current working directory being loaded for a suite restart
-				instead of the correct state dump under the suite run directory.</li>
+        <li>Fixed a bug which led to a <code>state</code> file in the
+        current working directory being loaded for a suite restart
+        instead of the correct state dump under the suite run directory.
 
-				<li>Fixed a bug (since 5.4.10) in which the
-				<code>-j/--jinja2</code> option to <code>cylc view</code> did
-				not automatically imply the <code>-i/--inline</code></li>
-				option.
-		</ul>
+        <li>Fixed a bug (since 5.4.10) in which the
+        <code>-j/--jinja2</code> option to <code>cylc view</code> did
+        not automatically imply the <code>-i/--inline</code>
+        option.
+    </ul>
 
 <a name="5.4.11"/>
-		<h2>5.4.11</h2>
+    <h2>5.4.11</h2>
 
-		<ul>
-				<li>Fixed a recent bug (affecting 5.4.8 - 5.4.10) that prevents
-				successful <code>cylc restart SUITE</code> after using
-						<code>cylc reload SUITE</code>.</li>
-
-				<li>Fixed a Python traceback caused by attempting to
-				transpose an empty gcylc dot view prior to starting a
-				suite.</li>
-
-				<li>Minor simulation mode changes:</li>
-				<ul>
-						<li>simulation mode now runs on the real time clock</li>
-						<li>made the suite summary reflect task state changes to
-						'running' even if nothing else is happening at the time.</li>
-				</ul>
-		</ul>
+    <ul>
+        <li>Fixed a recent bug (affecting 5.4.8 - 5.4.10) that prevents
+        successful <code>cylc restart SUITE</code> after using
+            <code>cylc reload SUITE</code>.
+
+        <li>Fixed a Python traceback caused by attempting to
+        transpose an empty gcylc dot view prior to starting a
+        suite.
+
+        <li>Minor simulation mode changes:
+        <ul>
+            <li>simulation mode now runs on the real time clock
+            <li>made the suite summary reflect task state changes to
+            'running' even if nothing else is happening at the time.
+        </ul>
+    </ul>
 
 <a name="5.4.10"/>
-		<h2>5.4.10</h2>
+    <h2>5.4.10</h2>
 
-		<ul>
+    <ul>
 
-		<li>Allow multiple event handlers per event hook.</li>
+    <li>Allow multiple event handlers per event hook.
 
-		<li>Fixed a task polling bug affecting 5.4.8 - 5.4.9: poll results
-		for a hard killed task were logged correctly but did not
-		automatically set the task proxy to the 'failed' state.</li>
+    <li>Fixed a task polling bug affecting 5.4.8 - 5.4.9: poll results
+    for a hard killed task were logged correctly but did not
+    automatically set the task proxy to the 'failed' state.
 
-		<li>Fixed a <code>cylc submit</code> bug affecting 5.4.5 - 5.4.9: 
-		If used <em>with the suite running at the same time</em> (not
-		recommended) the submit command would wipe out the port number in
-		the <code>cylc-suite-env</code> file, resulting in tasks losing
-		contact with their suite daemon.
-		</li>
+    <li>Fixed a <code>cylc submit</code> bug affecting 5.4.5 - 5.4.9:
+    If used <em>with the suite running at the same time</em> (not
+    recommended) the submit command would wipe out the port number in
+    the <code>cylc-suite-env</code> file, resulting in tasks losing
+    contact with their suite daemon.
+    
 
-		<li>Better handling of OSError on job submission commands.</li>
+    <li>Better handling of OSError on job submission commands.
 
-		<li>gcylc - correct log file sorting above 10 retries.</li>
+    <li>gcylc - correct log file sorting above 10 retries.
 
-		</ul>
+    </ul>
 
 <a name="5.4.9"/>
-		<h2>5.4.9</h2>
+    <h2>5.4.9</h2>
 
-		<ul>
-				<li>Fixed a bug affecting <b>5.4.5</b> through <b>5.4.8</b>: use
-				of <code>cylc submit</code> would cause task messaging failures if
-				the suite involved was running at the time.</li>
-		</ul>
+    <ul>
+        <li>Fixed a bug affecting <b>5.4.5</b> through <b>5.4.8</b>: use
+        of <code>cylc submit</code> would cause task messaging failures if
+        the suite involved was running at the time.
+    </ul>
 
 <a name="5.4.8"/>
-		<h2>5.4.8</h2>
+    <h2>5.4.8</h2>
 
-		<ul>
-				<li>Support job vacation in loadleveler (tasks killed by SIGUSR1 and 
-				then restarted later)</li>
-				<li>Reimplemented the <code>sequential</code> special task type
-				using automatically-generated inter-cycle triggers rather than
-				delayed spawning - they now behave "sequentially" under
-				all circumstances.</li>
-				<li>Delete the port file if suite start-up aborts early on</li>
-				<li>A new command, "cylc check-versions SUITE" checks that
-				the version of cylc invoked on each of SUITE's task hosts is the
-				same as *this* version, or else reports the remote versions.</li>
-				<li>Added task and family titles and descriptions to task
-				mouse-hover popups in the GUI.</li>
-				<li>Abort immediately if the suite database fails</li>
-				<li>Dropped redundant pre-action state dumps</li>
-				<li>Allow overlapping inheritance in a broadcast command</li>
-				<li>Fixed a bug that could (rarely) cause a wrong poll result on
-				running tasks at restart.</li>
-				<li>Restored support for user-defined job submission methods</li>
-				<li>Stopped ignoring stop cycle in tasks inserted via the GUI </li>
-				<li>Discard, rather than hold, inserted tasks that have passed
-				their private stop cycle (if they have one)</li>
-				<li>Ignore poll results if tasks have moved on since the
-				poll was initiated.</li>
-				<li>Better detect if a task host is the suite host</li>
-				<li>Fixed a bug that could cause proliferation of GUI error dialogs</li>
-				<li>Fix validation of hyphenated family-triggers where the
-				pre-hyphen part of the name matches another name.</li>
-				<li>Allow auto suite-polling tasks to explicitly unset inherited
-				command scripting. </li>
-		</ul>
+    <ul>
+        <li>Support job vacation in loadleveler (tasks killed by SIGUSR1 and
+        then restarted later)
+        <li>Reimplemented the <code>sequential</code> special task type
+        using automatically-generated inter-cycle triggers rather than
+        delayed spawning - they now behave "sequentially" under
+        all circumstances.
+        <li>Delete the port file if suite start-up aborts early on
+        <li>A new command, "cylc check-versions SUITE" checks that
+        the version of cylc invoked on each of SUITE's task hosts is the
+        same as *this* version, or else reports the remote versions.
+        <li>Added task and family titles and descriptions to task
+        mouse-hover popups in the GUI.
+        <li>Abort immediately if the suite database fails
+        <li>Dropped redundant pre-action state dumps
+        <li>Allow overlapping inheritance in a broadcast command
+        <li>Fixed a bug that could (rarely) cause a wrong poll result on
+        running tasks at restart.
+        <li>Restored support for user-defined job submission methods
+        <li>Stopped ignoring stop cycle in tasks inserted via the GUI 
+        <li>Discard, rather than hold, inserted tasks that have passed
+        their private stop cycle (if they have one)
+        <li>Ignore poll results if tasks have moved on since the
+        poll was initiated.
+        <li>Better detect if a task host is the suite host
+        <li>Fixed a bug that could cause proliferation of GUI error dialogs
+        <li>Fix validation of hyphenated family-triggers where the
+        pre-hyphen part of the name matches another name.
+        <li>Allow auto suite-polling tasks to explicitly unset inherited
+        command scripting. 
+    </ul>
 
 
 <a name="5.4.7"/>
-		<h2>5.4.7</h2>
+    <h2>5.4.7</h2>
 
-		<ul>
-				<li>Fixed a bug introduced in 5.4.5 that prevents releasing a
-				held suite with asynchronous (non-cycling) tasks that have not
-				run yet (e.g. with use of <code>cylc run --hold</code>)</li>
+    <ul>
+        <li>Fixed a bug introduced in 5.4.5 that prevents releasing a
+        held suite with asynchronous (non-cycling) tasks that have not
+        run yet (e.g. with use of <code>cylc run --hold</code>)
 
-				<li>Task messaging does not bother retrying if the corresponding
-				task proxy has been removed from the suite.</li>
+        <li>Task messaging does not bother retrying if the corresponding
+        task proxy has been removed from the suite.
 
-				<li>Suicide triggering is now logged like other triggering
-				events.</li>
+        <li>Suicide triggering is now logged like other triggering
+        events.
 
-				<li>A warning is logged if an active task suicides.</li>
+        <li>A warning is logged if an active task suicides.
 
-				<li>Fixed an error in the suicide trigger tutorial suite.</li>
+        <li>Fixed an error in the suicide trigger tutorial suite.
+
+        <li>Pre-initial-cycle triggers are now ignored for
+        message-output triggers.
+    </ul>
 
-				<li>Pre-initial-cycle triggers are now ignored for
-				message-output triggers.</li>
-		</ul>
-	
 
 <a name="5.4.6"/>
-		<h2>5.4.6</h2>
+    <h2>5.4.6</h2>
 
-		<ul>
-				<li>Improved suite state-dumping with atomic updates - avoids
-				state file corruption even in the event of a power failure.</li>
+    <ul>
+        <li>Improved suite state-dumping with atomic updates - avoids
+        state file corruption even in the event of a power failure.
 
-				<li>Run <code>background</code> and <code>at</code> jobs in 
-				their own process groups so that task job kill can get even
-				their sub-processes.</li>
+        <li>Run <code>background</code> and <code>at</code> jobs in
+        their own process groups so that task job kill can get even
+        their sub-processes.
 
-				<li>Dependency graphs: fixed an abort caused by use of
-				unnecessary brackets around pre-initial-cycle triggers.</li>
+        <li>Dependency graphs: fixed an abort caused by use of
+        unnecessary brackets around pre-initial-cycle triggers.
 
-				<li>Catch <code>inherit = None</code> in the runtime inheritance
-				hierarchy (this is illegal without one or more secondary
-				parents).</li>
+        <li>Catch <code>inherit = None</code> in the runtime inheritance
+        hierarchy (this is illegal without one or more secondary
+        parents).
+
+        <li>Handle unknown task states in the GUI (provides backward
+        compatibity for pre-5.4.5 suite daemons).
+    </ul>
 
-				<li>Handle unknown task states in the GUI (provides backward
-				compatibity for pre-5.4.5 suite daemons).</li>
-		</ul>
-	
 <a name="5.4.5"/>
-		<h2>5.4.5</h2>
+    <h2>5.4.5</h2>
 
-		<ul>
-				<li>Simplified polling for tasks submitted to <em>loadleveler</em>,
-				<em>pbs</em>, <em>sge</em>, <em>background</em>, or <em>at</em>: </li>
-				<ul>
-						<li> <em>submitted</em> - job in the queueing system, but has no
-						task status file. </li>
-						<li> <em>submission failed</em> - job not in the queueing system, and has no
-						task status file. </li>
-						<li> <em>running</em> - job in the queueing system, and has a 
-						task status file. </li>
-						<li> <em>succeeded</em> or <em>failed</em> - job not in the queueing system but has a 
-						task status file. </li>
-				</ul>
-				
-				<p/>
-				Additionally, an old task status file, if present, will be deleted at submission time.
+    <ul>
+        <li>Simplified polling for tasks submitted to <em>loadleveler</em>,
+        <em>pbs</em>, <em>sge</em>, <em>background</em>, or <em>at</em>: 
+        <ul>
+            <li> <em>submitted</em> - job in the queueing system, but has no
+            task status file. 
+            <li> <em>submission failed</em> - job not in the queueing system, and has no
+            task status file. 
+            <li> <em>running</em> - job in the queueing system, and has a
+            task status file. 
+            <li> <em>succeeded</em> or <em>failed</em> - job not in the queueing system but has a
+            task status file. 
+        </ul>
 
-				<li>More cylc daemon performance enhancements for large suites.</li>
+        <p/>
+        Additionally, an old task status file, if present, will be deleted at submission time.
 
-				<li>The <code>cylc cat-log</code> command can now print remote task logs.</li>
+        <li>More cylc daemon performance enhancements for large suites.
 
-				<li>Changed the <em>submitting</em> task state name to
-				<em>ready</em> - meaning the task is ready to run and has been
-				passed to the job submission thread in the cylc daemon.</li>
+        <li>The <code>cylc cat-log</code> command can now print remote task logs.
 
-				<li>New User Guide sections under <em>Suite Design Principles:</em></li>
-				<ul>
-						<li> <em>Factor Out Common Configuration</em></li>
-						<li> <em>Use the Graph for Scheduling</em></li>
-						<li> <em>Use Suite Visualization</em> </li>
-				</ul>
+        <li>Changed the <em>submitting</em> task state name to
+        <em>ready</em> - meaning the task is ready to run and has been
+        passed to the job submission thread in the cylc daemon.
 
-				<li>New User Guide section - <em>Style Guide</em> </li>
+        <li>New User Guide sections under <em>Suite Design Principles:</em>
+        <ul>
+            <li> <em>Factor Out Common Configuration</em>
+            <li> <em>Use the Graph for Scheduling</em>
+            <li> <em>Use Suite Visualization</em> 
+        </ul>
 
-				<li>Added a command wrapper script, and documentation, to
-				facilate use of multiple versions of cylc at once - so you can
-				run new suites at new versions of cylc without having to upgrade
-				existing long-running suites.</li>
+        <li>New User Guide section - <em>Style Guide</em> 
 
-				<li>Handle a known problem with the ext4 filesystem that could wipe
-				out cylc state dump files in the event of a power failure. </li>
+        <li>Added a command wrapper script, and documentation, to
+        facilate use of multiple versions of cylc at once - so you can
+        run new suites at new versions of cylc without having to upgrade
+        existing long-running suites.
 
-				<li>Graceful shutdown on loading a corrupted restart state dump file.</li>
+        <li>Handle a known problem with the ext4 filesystem that could wipe
+        out cylc state dump files in the event of a power failure. 
 
-				<li>Use the normal suite run directory for tasks executed by the 
-				<code>cylc submit</code> command </li>
+        <li>Graceful shutdown on loading a corrupted restart state dump file.
 
+        <li>Use the normal suite run directory for tasks executed by the
+        <code>cylc submit</code> command 
 
-				<li>Moved some common log messages to debug level.</li>
 
-		</ul>
-	
+        <li>Moved some common log messages to debug level.
+
+    </ul>
+
 <a name="5.4.4"/>
-		<h2>5.4.4</h2>
+    <h2>5.4.4</h2>
 
-		<ul>
-				<li>New <code>[runtime][NAME][environment filter]</code> allows
-				inherited variables to be explicitly included or excluded from
-				task environments.</li>
+    <ul>
+        <li>New <code>[runtime][NAME][environment filter]</code> allows
+        inherited variables to be explicitly included or excluded from
+        task environments.
 
-				<li><code>cylc stop</code> command help improved.</li>
+        <li><code>cylc stop</code> command help improved.
 
-				<li>Shut down cleanly if access to the suite run directory is
-				lost.</li>
+        <li>Shut down cleanly if access to the suite run directory is
+        lost.
+
+        <li>Fixed graphing of suite start up when several subsequent
+        cycles depend on an initial asynchronous task.
+    </ul>
 
-				<li>Fixed graphing of suite start up when several subsequent
-				cycles depend on an initial asynchronous task.</li>
-		</ul>
-	
 <a name="5.4.3"/>
-		<h2>5.4.3</h2>
+    <h2>5.4.3</h2>
+
+    <ul>
+        <li>The <code>cylc get-config</code> and <code>cylc
+            get-global-config</code> commands now print blank values
+        instead of the Pythonic <code>None</code> for unset items. This
+        means newly generated user config files are usable immediately
+        without modification.
+
+        <li>Sensible suite-dependent defaults are now computed for the
+        <code>[visualization]</code> initial and final cycle times:
+        from the suite's initial cycle time out to its default runahead
+        limit
+
+        <li>For inlined editing of suites with include-files, the
+        file inclusion markers are now comments so that the suite
+        definition can still be used (validated etc.) during the
+        edit session.
+
+        <li>Fixed a recent bug that breaks polling of tasks with
+        an explicit 'owner' configured (as opposed to defaulting
+        to the suite owner's username, or using <code>.ssh/config</code>
+        to translate usernames for remote tasks). This bug would
+        break suite restart if any such tasks were present in the
+        submitted or running states.
+    </ul>
 
-		<ul>
-				<li>The <code>cylc get-config</code> and <code>cylc
-						get-global-config</code> commands now print blank values 
-				instead of the Pythonic <code>None</code> for unset items. This
-				means newly generated user config files are usable immediately
-				without modification.</li>
-
-				<li>Sensible suite-dependent defaults are now computed for the 
-				<code>[visualization]</code> initial and final cycle times:
-				from the suite's initial cycle time out to its default runahead
-				limit</li>
-
-				<li>For inlined editing of suites with include-files, the 
-				file inclusion markers are now comments so that the suite
-				definition can still be used (validated etc.) during the 
-				edit session.</li>
-
-				<li>Fixed a recent bug that breaks polling of tasks with  
-				an explicit 'owner' configured (as opposed to defaulting 
-				to the suite owner's username, or using <code>.ssh/config</code>
-				to translate usernames for remote tasks). This bug would 
-				break suite restart if any such tasks were present in the
-				submitted or running states.</li>
-		</ul>
 
-	
 <a name="5.4.2"/>
-		<h2>5.4.2</h2>
+    <h2>5.4.2</h2>
 
-		<ul>
-				<li>Fixes a bug that could cause an error in gcylc if it tried
-				to interrogate a very large suite before it had finished
-				initializing.</li>
+    <ul>
+        <li>Fixes a bug that could cause an error in gcylc if it tried
+        to interrogate a very large suite before it had finished
+        initializing.
 
-				<li>Allow the suite name delimiter '.' (dot) character in 
-				the names of the new automatic suite-polling tasks.</li>
+        <li>Allow the suite name delimiter '.' (dot) character in
+        the names of the new automatic suite-polling tasks.
 
-				<li>Fixed a recently introduced bug that caused the <code>cylc
-						db refresh</code> command to crash if a suite's name had
-						been changed.</li>
+        <li>Fixed a recently introduced bug that caused the <code>cylc
+            db refresh</code> command to crash if a suite's name had
+            been changed.
 
-				<li>Fixed gcylc graph view right-click family Group/Ungroup 
-				behaviour.</li>
+        <li>Fixed gcylc graph view right-click family Group/Ungroup
+        behaviour.
 
-				<li>Documented automatic suite-polling tasks (triggering off 
-				tasks in other suites) in the user guide.</li>
+        <li>Documented automatic suite-polling tasks (triggering off
+        tasks in other suites) in the user guide.
 
-				<li>Updated suite definition syntax documentation in the 
-				user guide.</li>
+        <li>Updated suite definition syntax documentation in the
+        user guide.
 
-				<li>Further expansion of the automated test battery.</li>
+        <li>Further expansion of the automated test battery.
+
+    </ul>
 
-		</ul>
-	
 
 <a name="5.4.1"/>
-		<h2>5.4.1</h2>
+    <h2>5.4.1</h2>
 
-			<ul>
-				<li>Fixes a bug introduced just before the 5.4.0 release: cylc 
-				would abort if a task in a suite with no final cycle time hit 
-				the runahead limit.
-		   </ul>	
+      <ul>
+        <li>Fixes a bug introduced just before the 5.4.0 release: cylc
+        would abort if a task in a suite with no final cycle time hit
+        the runahead limit.
+       </ul>
 
 <a name="5.4.0"/>
-		<h2>5.4.0</h2>
+    <h2>5.4.0</h2>
 
-		<ul>
-				<li>dependence on tasks before the initial cycle time is now
-				ignored, so cold-start tasks are no longer strictly required
-				in suites with inter-cycle dependence</li>
+    <ul>
+        <li>dependence on tasks before the initial cycle time is now
+        ignored, so cold-start tasks are no longer strictly required
+        in suites with inter-cycle dependence
 
-				<li>graph notation support for triggering off tasks in other suites</li>
+        <li>graph notation support for triggering off tasks in other suites
 
-				<li>a comprehensive new User Guide <em>Tutorial</em> section replaces
-				the old <em>Quick Start Guide</em> </li>
+        <li>a comprehensive new User Guide <em>Tutorial</em> section replaces
+        the old <em>Quick Start Guide</em> 
 
-				<li>a multi-suite summary gui, <code>cylc gsummary</code></li>
+        <li>a multi-suite summary gui, <code>cylc gsummary</code>
 
-				<li>a new <code>submit-retry</code> task state (the
-				<code>retry</code> state is now exclusively for execution retry)</li>
+        <li>a new <code>submit-retry</code> task state (the
+        <code>retry</code> state is now exclusively for execution retry)
 
-				<li>processed suite definitions are now written out to the suite directory</li>
+        <li>processed suite definitions are now written out to the suite directory
 
-				<li>a simpler and more robust suite name registration database 
-				(to upgrade run <code>cylc upgrade-db</code>)</li>
+        <li>a simpler and more robust suite name registration database
+        (to upgrade run <code>cylc upgrade-db</code>)
 
-				<li>many performance enhancements for large suites</li>
+        <li>many performance enhancements for large suites
 
-				<li>gcylc</li>
-				<ul>
-						<li> single updater thread for multiple suite views</li>
-						<li> updates only if the suite state has changed (lower network traffic)</li>
-						<li> optional transposed dot view</li>
-						<li> clickable suite error notification in the task bar</li>
-						<li> in the graph view, runahead tasks and tasks held beyond
-						the suite final cycle time - and potentially many base graph
-						nodes to join the graph - are now cropped by default.</li>
-				</ul>
+        <li>gcylc
+        <ul>
+            <li> single updater thread for multiple suite views
+            <li> updates only if the suite state has changed (lower network traffic)
+            <li> optional transposed dot view
+            <li> clickable suite error notification in the task bar
+            <li> in the graph view, runahead tasks and tasks held beyond
+            the suite final cycle time - and potentially many base graph
+            nodes to join the graph - are now cropped by default.
+        </ul>
 
-				<li>extract specific site/user config items with <code>cylc
-				get-global-config</code></li>
+        <li>extract specific site/user config items with <code>cylc
+        get-global-config</code>
 
-				<li>replaced ConfigObj with a faster custom config file parser and
-				validator, "parsec"</li>
+        <li>replaced ConfigObj with a faster custom config file parser and
+        validator, "parsec"
 
-				<li>repeated sections and items are now allowed in suite definitions</li>
+        <li>repeated sections and items are now allowed in suite definitions
 
-				<li>task poll and kill for Sun Grid Engine (other job submission
-				methods got this at 5.3.0)</li>
+        <li>task poll and kill for Sun Grid Engine (other job submission
+        methods got this at 5.3.0)
 
-				<li><code>cylc cat-log</code> now prints task as well as suite logs</li>
+        <li><code>cylc cat-log</code> now prints task as well as suite logs
 
-				<li>a greatly expanded automated test battery using a new Perl
-				<code>prove</code>-based framework</li>
+        <li>a greatly expanded automated test battery using a new Perl
+        <code>prove</code>-based framework
 
-				<li>support added for the Jinja2 <code>do</code> extension
-				(allows manipulation of data structures without having to
-				<code>set</code> a variable)</li>
+        <li>support added for the Jinja2 <code>do</code> extension
+        (allows manipulation of data structures without having to
+        <code>set</code> a variable)
 
-				<li><code>cylc graph</code> now has a "save image" GUI button (previously a
-				command line option had to be used to generated image files)</li>
+        <li><code>cylc graph</code> now has a "save image" GUI button (previously a
+        command line option had to be used to generated image files)
 
-				<li>improved <code>cylc trigger</code> semantics: manual
-				triggering now results in immediate job submission if the target
-				task does not belong to a limited queue or if it is already
-				queued, otherwise it will queue the task immediately.</li>
+        <li>improved <code>cylc trigger</code> semantics: manual
+        triggering now results in immediate job submission if the target
+        task does not belong to a limited queue or if it is already
+        queued, otherwise it will queue the task immediately.
 
-				<li>stop queued job submissions immediately if a shutdown is
-				requested</li>
+        <li>stop queued job submissions immediately if a shutdown is
+        requested
 
-				<li>deprecated the <code>--owner</code> command option to
-				<code>--user</code> and documented more clearly that use of 
-				this and <code>--host</code> results in command re-invocation on
-				the remote account</li>
+        <li>deprecated the <code>--owner</code> command option to
+        <code>--user</code> and documented more clearly that use of
+        this and <code>--host</code> results in command re-invocation on
+        the remote account
 
-				<li>if an owner is not specified for a task <code>$USER</code>
-				is no longer assumed - this allows remote username translation
-				to be done via <code>$HOME/.ssh/config</code> 
+        <li>if an owner is not specified for a task <code>$USER</code>
+        is no longer assumed - this allows remote username translation
+        to be done via <code>$HOME/.ssh/config</code>
 
-				<li>many many bug fixes, including:</li>
-				<ul>
-						<li> the order of cycling graph sections of different kinds (e.g.
-						Daily and Monthly in the same suite) no longer matters </li>
+        <li>many many bug fixes, including:
+        <ul>
+            <li> the order of cycling graph sections of different kinds (e.g.
+            Daily and Monthly in the same suite) no longer matters 
 
-						<li>Monthly cycling now works properly even if the initial
-						cycle time is off-sequence</li>
+            <li>Monthly cycling now works properly even if the initial
+            cycle time is off-sequence
 
-						<li>respect the <code>[visualization]collapsed families</code>
-						suite config item for the initial graph view</li>
+            <li>respect the <code>[visualization]collapsed families</code>
+            suite config item for the initial graph view
 
-				</ul>
+        </ul>
 
 
-		</ul>
-	
+    </ul>
+
 
 <a name="5.3.0"/>
-		<h2>5.3.0</h2>
+    <h2>5.3.0</h2>
 
-		<ul>
-				<li>New <code>submit</code> and <code>submit-fail</code> task
-				triggers.</li>
+    <ul>
+        <li>New <code>submit</code> and <code>submit-fail</code> task
+        triggers.
 
-				<li>The EVENT argument passed to event handler scripts now
-				matches the suite.rc item names exactly (no replacement of
-				spaces with underscores, e.g. in "submission timeout".</li>
+        <li>The EVENT argument passed to event handler scripts now
+        matches the suite.rc item names exactly (no replacement of
+        spaces with underscores, e.g. in "submission timeout".
 
-				<li>Automatic polling as a task communication mechanism for task
-				hosts that do not allow return messaging by network socket
-				(Pyro) or ssh.</li>
+        <li>Automatic polling as a task communication mechanism for task
+        hosts that do not allow return messaging by network socket
+        (Pyro) or ssh.
 
-				<li>On-demand job poll and kill by CLI and GUI.</li>
+        <li>On-demand job poll and kill by CLI and GUI.
 
-				<li>Automatic job poll on task job submission and execution
-				timeouts.</li>
+        <li>Automatic job poll on task job submission and execution
+        timeouts.
 
-				<li>Automatic job polling on restarting a suite, to find out
-				what happened to any orphaned tasks while the suite was
-				down.</li> 
+        <li>Automatic job polling on restarting a suite, to find out
+        what happened to any orphaned tasks while the suite was
+        down.
 
-				<li>Fixed ksh compatibility in task job scripts.</li>
+        <li>Fixed ksh compatibility in task job scripts.
 
-				<li>All gcylc views start in family-grouped mode.</li>
+        <li>All gcylc views start in family-grouped mode.
 
-				<li>Optionally ignore suicide triggers in suite graphs and 
-				the gcylc graph view.</li>
+        <li>Optionally ignore suicide triggers in suite graphs and
+        the gcylc graph view.
 
-				<li>Delay between job submission batches set to zero by default.</li>
+        <li>Delay between job submission batches set to zero by default.
 
-				<li>Option to kill active tasks before shutdown.</li>
+        <li>Option to kill active tasks before shutdown.
 
-				<li>Support for "future triggers": <code>foo[T+24] => bar</code></li>
+        <li>Support for "future triggers": <code>foo[T+24] => bar</code>
 
-				<li>Better spent task proxy clean-up algorithm.</li>
+        <li>Better spent task proxy clean-up algorithm.
 
-				<li>Allow manual triggering of tasks that are retrying.</li>
+        <li>Allow manual triggering of tasks that are retrying.
 
-		</ul>
+    </ul>
 
 <a name="5.2.0"/>
-		<h2>5.2.0</h2>
+    <h2>5.2.0</h2>
 
-		<ul>
-				<li><b>gcylc can now switch between suites</b> - the 
-				former <code>cylc dbviewer</code> GUI has been converted to a
-				"File Open" dialog for gcylc.</li>
+    <ul>
+        <li><b>gcylc can now switch between suites</b> - the
+        former <code>cylc dbviewer</code> GUI has been converted to a
+        "File Open" dialog for gcylc.
 
-				<li><b>Preparation commands</b> (validate, graph, etc.) can now take
-						a suite definition path instead of a registered suite name.</li>
+        <li><b>Preparation commands</b> (validate, graph, etc.) can now take
+            a suite definition path instead of a registered suite name.
 
-    		<li><b>Control commands</b> can now operate on multiple tasks at once,
-				selecting by family name or regular expression.</li>
+        <li><b>Control commands</b> can now operate on multiple tasks at once,
+        selecting by family name or regular expression.
 
-				<li><b>A new task state</b> to distinguish submission failure from 
-						execution failure.</li>
+        <li><b>A new task state</b> to distinguish submission failure from
+            execution failure.
 
-				<li> Configurable <b>job submission retry</b>.</li>
+        <li> Configurable <b>job submission retry</b>.
 
-				<li>A poll-and-wait option for the <b>stop command</b>.</li>
+        <li>A poll-and-wait option for the <b>stop command</b>.
 
-				<li><b>Orphaned running tasks</b> can now reconnect to a
-				restarted suite, even if it starts on a different port.</li>
+        <li><b>Orphaned running tasks</b> can now reconnect to a
+        restarted suite, even if it starts on a different port.
 
-				<li>Automatic suite <b>run-directory housekeeping</b>.</li>
+        <li>Automatic suite <b>run-directory housekeeping</b>.
 
-				<li>The final path component of a <b>task work directory</b> can
-				now be set in the suite definition. This restores the pre cylc-5
-				ability to allow groups of interdependent tasks that all read
-				and write from their current working directories to be given a
-				common file share space.</li>
+        <li>The final path component of a <b>task work directory</b> can
+        now be set in the suite definition. This restores the pre cylc-5
+        ability to allow groups of interdependent tasks that all read
+        and write from their current working directories to be given a
+        common file share space.
 
-				<li>The ssh connection now remains open while <b>remote background
-						tasks</b> run, for sites that automatically kill detached
-				processes on login nodes.</li>
+        <li>The ssh connection now remains open while <b>remote background
+            tasks</b> run, for sites that automatically kill detached
+        processes on login nodes.
 
-				<li>Restored <b>dynamic host selection by environment variable</b>.</li>
+        <li>Restored <b>dynamic host selection by environment variable</b>.
 
-				<li><b>Suicide triggers</b> can now be applied to task families.</li>
+        <li><b>Suicide triggers</b> can now be applied to task families.
 
-				<li>Interactive <b>command prompts</b> have been disabled by default.</li>
+        <li>Interactive <b>command prompts</b> have been disabled by default.
 
-				<li>Dropped support for <b>Python 2.4.</b></li>
+        <li>Dropped support for <b>Python 2.4.</b>
 
-				<li><b>UTC mode</b> now sets <code>$TZ</code> in task environments.</li>
+        <li><b>UTC mode</b> now sets <code>$TZ</code> in task environments.
 
-				<li>The <b>test battery</b> now cleans up temporary files after
-				itself, and it has been parallelized.</li>
+        <li>The <b>test battery</b> now cleans up temporary files after
+        itself, and it has been parallelized.
 
-				<li> <b>syntax hilighting</b> for the <b>kate</b> editor.</li>
+        <li> <b>syntax hilighting</b> for the <b>kate</b> editor.
 
-				<li>Access to the new <b>suite run-db</b> is now retried several
-				times in case of an unlikely access conflict.</li>
+        <li>Access to the new <b>suite run-db</b> is now retried several
+        times in case of an unlikely access conflict.
 
-				<li>A first-parent namespace can now be demoted to prevent its
-				use as the visualization family (e.g. 
-				<code>inherit = None, myparent</code> will keep the namespace 
-				under root for visualization purposes).</li>
+        <li>A first-parent namespace can now be demoted to prevent its
+        use as the visualization family (e.g.
+        <code>inherit = None, myparent</code> will keep the namespace
+        under root for visualization purposes).
 
-				<li><code>cylc list</code> command output has been updated
-				for multiple inheritance, and the option to print namespace
-				(task and family) title information has been restored.</li>
+        <li><code>cylc list</code> command output has been updated
+        for multiple inheritance, and the option to print namespace
+        (task and family) title information has been restored.
 
-				<li>Cylc can now be invoked via symlinked directories in
-				<code>$PATH</code>.</li>
+        <li>Cylc can now be invoked via symlinked directories in
+        <code>$PATH</code>.
 
-		</ul>
+    </ul>
 
 <a name="5.1.1"/>
-		<h2>5.1.1</h2>
+    <h2>5.1.1</h2>
 
-		<ul>
-				<li>Restored use of visualization settings, broken by
-				the task ID delimiter change in 5.0.0.</li>
+    <ul>
+        <li>Restored use of visualization settings, broken by
+        the task ID delimiter change in 5.0.0.
 
-				<li>Restored <b>image file output</b> capability to the gcylc
-				suite graph dialog.</li>
-		</ul>
+        <li>Restored <b>image file output</b> capability to the gcylc
+        suite graph dialog.
+    </ul>
 
 <a name="5.1.0"/>
-		<h2>5.1.0</h2>
+    <h2>5.1.0</h2>
 
 <ul>
 
-		<li>Added <b>site/user config file documentation</b> to the User
-		Guide.</li>
+    <li>Added <b>site/user config file documentation</b> to the User
+    Guide.
 
-		<li>The <b>task ID delimiter has changed</b> from '%' to '.'
-		(foo%2013080800 becomes foo.2013080800) because the '%' character
-		causes trouble for web-based suite output viewers. This affects task
-		IDs on the command line, and task log filenames, but it does not affect
-		 suite definitions.</li>
+    <li>The <b>task ID delimiter has changed</b> from '%' to '.'
+    (foo%2013080800 becomes foo.2013080800) because the '%' character
+    causes trouble for web-based suite output viewers. This affects task
+    IDs on the command line, and task log filenames, but it does not affect
+     suite definitions.
 
-		<li> Updated several example suites that were not validiting after recent changes.</li>
+    <li> Updated several example suites that were not validiting after recent changes.
 
-		<li><b>Multiple inheritance</b> for the runtime namespace hierarchy.</li>
+    <li><b>Multiple inheritance</b> for the runtime namespace hierarchy.
 
-		<li><b>A new command</b> <code>cylc [util] suite-state</code> queries the
-		new suite run databases for task state, and in a polling mode can be
-		used by tasks to wait on tasks in other suites.</li>
+    <li><b>A new command</b> <code>cylc [util] suite-state</code> queries the
+    new suite run databases for task state, and in a polling mode can be
+    used by tasks to wait on tasks in other suites.
 
-		<li>New <b>task job status files</b> in the task job log directory
-		are updated by running tasks. In the future this will allow cylc to
-		determine, on restarting, what happened to running tasks that were
-		orphaned when the suite was down.</li>
+    <li>New <b>task job status files</b> in the task job log directory
+    are updated by running tasks. In the future this will allow cylc to
+    determine, on restarting, what happened to running tasks that were
+    orphaned when the suite was down.
 </ul>
 
 
 <a name="5.0.3"/>
-		<h2>5.0.3</h2>
+    <h2>5.0.3</h2>
 
 <ul>
-		<li><b>Create many fewer temporary directories.</b></li>
+    <li><b>Create many fewer temporary directories.</b>
 </ul>
 
 <a name="5.0.2"/>
-		<h2>5.0.2</h2>
+    <h2>5.0.2</h2>
 
 <ul>
-		<li> <b>Bug fix for warm starts</b> - a function <code>plog()</code>
-		that no longer exists was called on warm starting.</li>
+    <li> <b>Bug fix for warm starts</b> - a function <code>plog()</code>
+    that no longer exists was called on warm starting.
 
-		<li> <b>Bug fixes for doc/Makefile:</b> - "make clean" was not
-		working, and a fatal error occured if htlatex was not installed.</li>
+    <li> <b>Bug fixes for doc/Makefile:</b> - "make clean" was not
+    working, and a fatal error occured if htlatex was not installed.
 
 </ul>
 
 
 <a name="5.0.1"/>
-		<h2>5.0.1</h2>
+    <h2>5.0.1</h2>
 
 <ul>
-		<li> <b>Bug fix for 5.0.0 batched job submission:</b> a job
-		submission failure would cause cylc to abort.</li>
+    <li> <b>Bug fix for 5.0.0 batched job submission:</b> a job
+    submission failure would cause cylc to abort.
 
-		<li> <b>Changed "log root" filenames</b> (i.e. task job script plus
-		stdout and stderr logs, and <code>$CYLC_TASK_LOG_ROOT</code>
-		in task environments): replaced seconds-since-epoch with a simple
-		<i>submit number</i> that increments on retries and manual
-		triggering.</li>
+    <li> <b>Changed "log root" filenames</b> (i.e. task job script plus
+    stdout and stderr logs, and <code>$CYLC_TASK_LOG_ROOT</code>
+    in task environments): replaced seconds-since-epoch with a simple
+    <i>submit number</i> that increments on retries and manual
+    triggering.
 
-		<li> Added a suite definition <b>syntax file</b> for the
-		<b>gedit</b> editor and other gtksourceview programs:
-		<code>conf/cylc.lang</code>.</li>
+    <li> Added a suite definition <b>syntax file</b> for the
+    <b>gedit</b> editor and other gtksourceview programs:
+    <code>conf/cylc.lang</code>.
 
 </ul>
 
@@ -1106,153 +1413,153 @@ For the definitive record see the cylc git repository change log.</p>
 <h2>5.0.0</h2>
 
 <ul>
-		<li> <b>Multi-threading</b>: cylc now does continous request
-		handling, batched task job submission, and batched event handler
-		execution, in background threads. This has big performance
-		benefits for large, busy, suites.</li>
+    <li> <b>Multi-threading</b>: cylc now does continous request
+    handling, batched task job submission, and batched event handler
+    execution, in background threads. This has big performance
+    benefits for large, busy, suites.
 
-		<li> Cylc now runs in <b>daemon mode</b> - no need to use nohup
-		anymore.</li>
+    <li> Cylc now runs in <b>daemon mode</b> - no need to use nohup
+    anymore.
 
-		<li> <b>Major change in suite restart behavior</b>: tasks recorded
-		in the submitted, running, or failed states are no longer triggered
-		automatically on restarting, because that is not always desirable.</li>
+    <li> <b>Major change in suite restart behavior</b>: tasks recorded
+    in the submitted, running, or failed states are no longer triggered
+    automatically on restarting, because that is not always desirable.
 
-		<li> A new command can <b>broadcast runtime settings to tasks in
-		a running suite</b>, overriding their configured settings. One way to
-		use this: <b>tasks can communicate environment variables to other
-				tasks downstream of them</b>.  Broadcast settings persist across
-		restarts.</li> 
+    <li> A new command can <b>broadcast runtime settings to tasks in
+    a running suite</b>, overriding their configured settings. One way to
+    use this: <b>tasks can communicate environment variables to other
+        tasks downstream of them</b>.  Broadcast settings persist across
+    restarts.
 
-		<li> <b>Reloading the suite definition at run time</b> is now
-		supported. Even task definitions can be added and removed without
-		stopping and restarting the suite.</li>
+    <li> <b>Reloading the suite definition at run time</b> is now
+    supported. Even task definitions can be added and removed without
+    stopping and restarting the suite.
 
-		<li> <b>Restarting after deleting task definitions</b> now works 
-		without modifying the state dump.</li>
+    <li> <b>Restarting after deleting task definitions</b> now works
+    without modifying the state dump.
 
-		<li> <b>Dynamic host selection</b> is officially supported.</li>
+    <li> <b>Dynamic host selection</b> is officially supported.
 
-		<li> New <b>family trigger syntax</b> allows any family finish
-		semantics, with use of conditional operators.</li>
+    <li> New <b>family trigger syntax</b> allows any family finish
+    semantics, with use of conditional operators.
 
-		<li> Higher level <b>family triggers</b> (families of
-		families) can now be used in the graph.</li>
+    <li> Higher level <b>family triggers</b> (families of
+    families) can now be used in the graph.
 
-		<li> New <b>site and user config files</b> some former suite
-		definition items, and other global settings, can now be configured 
-		globally.</li> 
+    <li> New <b>site and user config files</b> some former suite
+    definition items, and other global settings, can now be configured
+    globally.
 
-		<li> The <b>runahead limit</b> now automatically defaults to the
-		twice the smallest cycling interval in a suite, and setting it too
-		low can no longer stall a suite.</li>
+    <li> The <b>runahead limit</b> now automatically defaults to the
+    twice the smallest cycling interval in a suite, and setting it too
+    low can no longer stall a suite.
 
-		<li> <b>Self-diagnosing test suites</b> can now be created to test
-		that cylc upgrades will not break your systems; see
-		"Reference Tests" in the User Guide.</li> 
+    <li> <b>Self-diagnosing test suites</b> can now be created to test
+    that cylc upgrades will not break your systems; see
+    "Reference Tests" in the User Guide.
 
-		<li> The <b>suite control GUI "gcontrol" has been renamed
-				"gcylc"</b> as it now has suite preparation capability too
-		(editing, graphing, validating, etc.); the former "gcylc" GUI is
-		still available as <code>cylc dbviewer</code>.</li>
+    <li> The <b>suite control GUI "gcontrol" has been renamed
+        "gcylc"</b> as it now has suite preparation capability too
+    (editing, graphing, validating, etc.); the former "gcylc" GUI is
+    still available as <code>cylc dbviewer</code>.
 
-		<li> Configurable <b>gcylc task state color themes</b> common to all
-		views.</li>
+    <li> Configurable <b>gcylc task state color themes</b> common to all
+    views.
 
-		<li> The <b>gcylc dot view</b> now has hover-over task state
-		information, and right-click menus.</li>
+    <li> The <b>gcylc dot view</b> now has hover-over task state
+    information, and right-click menus.
 
-		<li> The <b>gcylc graph view</b> now has:
-		<ul>
-				<li> <b>family node</b> state coloring and mouse-hover member
-				state information</li>
-				<li> <b>landscape mode</b></li>
-				<li> <b>reduced redraw</b> (i.e. the graph jumps around less)</li>
-				<li> <b>"cycle time focus"</b> now works (broken between 4.2.2 and 4.5.1)</li>
-		</ul>
-		</li>
+    <li> The <b>gcylc graph view</b> now has:
+    <ul>
+        <li> <b>family node</b> state coloring and mouse-hover member
+        state information
+        <li> <b>landscape mode</b>
+        <li> <b>reduced redraw</b> (i.e. the graph jumps around less)
+        <li> <b>"cycle time focus"</b> now works (broken between 4.2.2 and 4.5.1)
+    </ul>
+    
+
+    <li> <b>gcylc: less frequent polling</b> for stopped suites, to
+    reduce network traffic. A polling reset button can be used to
+    reconnect to a restarted suite immediately.
 
-		<li> <b>gcylc: less frequent polling</b> for stopped suites, to
-		reduce network traffic. A polling reset button can be used to
-		reconnect to a restarted suite immediately.</li>
+    <li> <b>Undefined Jinja2 variables</b> now cause an abort.
 
-		<li> <b>Undefined Jinja2 variables</b> now cause an abort.</li>
+    <li> <b>Set Jinja2 variables on the run command line</b>
+    (in-line or by referencing an external file).
 
-		<li> <b>Set Jinja2 variables on the run command line</b>
-		(in-line or by referencing an external file).</li>
+    <li> <b>Removed pseudo backward compatibility</b>
+    (<code>#!cylc-x.y.z</code>).  We will endeavor
+    to provide proper backward compatibility from now on.
 
-		<li> <b>Removed pseudo backward compatibility</b>
-		(<code>#!cylc-x.y.z</code>).  We will endeavor
-		to provide proper backward compatibility from now on.</li>
+    <li> Configurable <b>task messaging retry</b> for resilience to
+    network outages etc.
 
-		<li> Configurable <b>task messaging retry</b> for resilience to
-		network outages etc.</li>
-		
-		<li> <b>Task messaging failure</b> no longer causes tasks to abort.</li>
+    <li> <b>Task messaging failure</b> no longer causes tasks to abort.
 
-		<li> <b>ssh task messaging</b>: the path to cylc on the suite host
-		is now transferred via the task job script (no need to rely on 
-		login scripts).</li>
+    <li> <b>ssh task messaging</b>: the path to cylc on the suite host
+    is now transferred via the task job script (no need to rely on
+    login scripts).
 
-		<li> A new <b>task retry event hook</b> is triggered if a task fails
-		but has a retry lined up. The <b>task failed event hook</b> is only
-		triggered on a final definitive failure.</li>
+    <li> A new <b>task retry event hook</b> is triggered if a task fails
+    but has a retry lined up. The <b>task failed event hook</b> is only
+    triggered on a final definitive failure.
 
-		<li> Handling <b>HPC job preemption:</b> Tasks can be configured 
-		to revive from the dead if they start running again after reporting
-		failure.</li>
+    <li> Handling <b>HPC job preemption:</b> Tasks can be configured
+    to revive from the dead if they start running again after reporting
+    failure.
 
-		<li> A new config item to <b>abort a suite if any task fails</b>.</li>
+    <li> A new config item to <b>abort a suite if any task fails</b>.
 
-		<li> Removed the requirement to prefix the integer TAG of an
-		asynchronous (non-cycling) task, on the command line, with <b>a:</b>.</li>
+    <li> Removed the requirement to prefix the integer TAG of an
+    asynchronous (non-cycling) task, on the command line, with <b>a:</b>.
 
-		<li> Added <b>suite event hooks</b> for suite <b>startup</b> and
-		<b>shutdown</b>.</li>
+    <li> Added <b>suite event hooks</b> for suite <b>startup</b> and
+    <b>shutdown</b>.
 
-		<li> Replaced the original <b>simulation mode</b> which submitted
-		real dummy tasks to run locally with a proper <b>simulation mode</b>
-		that does not even submit dummy tasks, and a <b>dummy mode</b>
-		that simply dummies out command scripting but leaves job
-		submission, hosting, etc. intact.</li>
+    <li> Replaced the original <b>simulation mode</b> which submitted
+    real dummy tasks to run locally with a proper <b>simulation mode</b>
+    that does not even submit dummy tasks, and a <b>dummy mode</b>
+    that simply dummies out command scripting but leaves job
+    submission, hosting, etc. intact.
 
-		<li> Tasks can check <b>$CYLC_TASK_IS_COLDSTART</b> to determine
-		whether or not they are cold-start tasks.</li>
+    <li> Tasks can check <b>$CYLC_TASK_IS_COLDSTART</b> to determine
+    whether or not they are cold-start tasks.
 
-		<li> A new <b>strict validation</b> option protects against
-		inadvertent creation of naked dummy tasks (tasks with no explicit
-		runtime config) by misspelling task names.</li>
+    <li> A new <b>strict validation</b> option protects against
+    inadvertent creation of naked dummy tasks (tasks with no explicit
+    runtime config) by misspelling task names.
 
-		<li> Optional <b>suite host identification by IP address</b> instead
-		of host name.</li>
+    <li> Optional <b>suite host identification by IP address</b> instead
+    of host name.
 
-		<li> A suite-specific <b>sqlite database</b> is populated by
-		cylc with task event and status information. Planned future uses
-		include retrieval of information about long-finished tasks, and
-		replacing the current primitive state dump files.</li>
+    <li> A suite-specific <b>sqlite database</b> is populated by
+    cylc with task event and status information. Planned future uses
+    include retrieval of information about long-finished tasks, and
+    replacing the current primitive state dump files.
 
-		<li> Allow <b>(number-of-retries)*(delay)</b> notation in task
-		<b>task retry</b> configuration.</li>
+    <li> Allow <b>(number-of-retries)*(delay)</b> notation in task
+    <b>task retry</b> configuration.
 
-		<li> The <b>cylc doc</b> command now loads a documentation index in
-		your browser.</li>
+    <li> The <b>cylc doc</b> command now loads a documentation index in
+    your browser.
 
-		<li> Suite <b>block/unblock</b> functionality has been removed.</li>
+    <li> Suite <b>block/unblock</b> functionality has been removed.
 
-		<li> Optionally <b>disable use of a login shell</b> in
-		passwordless ssh remote command invocation (e.g. to submit
-		remote tasks). </li>
+    <li> Optionally <b>disable use of a login shell</b> in
+    passwordless ssh remote command invocation (e.g. to submit
+    remote tasks). 
 
-		<li> The task execution environment variables
-		<b>$CYLC_SUITE_INITIAL_CYCLE_TIME</b> and
-		<b>$CYLC_SUITE_FINAL_CYCLE_TIME</b> now persist across restarts.</li>
+    <li> The task execution environment variables
+    <b>$CYLC_SUITE_INITIAL_CYCLE_TIME</b> and
+    <b>$CYLC_SUITE_FINAL_CYCLE_TIME</b> now persist across restarts.
 
 </ul>
 
 <a name="old"/>
 <h2>Old Change Log (4.5.1 and earlier)</h2>
 <ul>
-		<li>	<a href="changes-old.txt">(doc/changes-old.txt)</a> </li>
+    <li>  <a href="changes-old.txt">(doc/changes-old.txt)</a> 
 </ul>
 
 
diff --git a/doc/cug.tex b/doc/cug.tex
index 9c38bc9..58b5044 100644
--- a/doc/cug.tex
+++ b/doc/cug.tex
@@ -1,4 +1,3 @@
-
 \lstset{language=transcript}
 
 \section{Introduction: How Cylc Works}
@@ -131,7 +130,7 @@ diagrams.
     \end{center}
     \caption[The only safe multi-cycle-point job schedule?]
     {\scriptsize The best that can be done {\em in general} when
-    inter-cycle-point dependence is ignored.}
+    inter-cycle dependence is ignored.}
     \label{fig-job-no-overlap}
 \end{figure}
 
@@ -150,30 +149,30 @@ dangerous - it results in dependency violations in half of the tasks in
 the example suite. In fact the situation could be even worse than this
 - imagine that task {\em b} in the first cycle point is delayed for some
 reason {\em after} the second cycle point has been launched. Clearly we must
-consider handling inter-cycle-point dependence explicitly or else agree not to
+consider handling inter-cycle dependence explicitly or else agree not to
 start the next cycle point early, as is illustrated in
 Figure~\ref{fig-job-no-overlap}.
 
-\subsubsection{Inter-Cycle-Point Dependence}
+\subsubsection{Inter-Cycle Dependence}
 \label{InterCyclePointDependence}
 
 Forecast models typically depend on their own most recent previous
 forecast for background state or restart files of some kind (this is
-called {\em warm cycling}) but there can also be inter-cycle-point dependence
+called {\em warm cycling}) but there can also be inter-cycle dependence
 between different tasks.  In an atmospheric forecast analysis suite, for
 instance, the weather model may generate background states for observation
 processing and data-assimilation tasks in the next cycle point as well as for
-the next forecast model run. In real time operation inter-cycle-point
+the next forecast model run. In real time operation inter-cycle
 dependence can be ignored because it is automatically satisfied when one cycle
 point finishes before the next begins. If it is not ignored it drastically
 complicates the dependency graph by blurring the clean boundary between
 cycle points. Figure~\ref{fig-dep-multi} illustrates the problem for our
-simple example suite assuming minimal inter-cycle-point dependence: the warm
+simple example suite assuming minimal inter-cycle dependence: the warm
 cycled models ($a$, $b$, and $c$) each depend on their own previous instances.
 
 For this reason, and because we tend to see forecasting suites in terms of
 their real time characteristics, other metaschedulers have ignored
-inter-cycle-point dependence and are thus restricted to running entire cycle
+inter-cycle dependence and are thus restricted to running entire cycle
 points in sequence at all times.  This does not affect normal real time
 operation but it can be a serious impediment when advance availability of
 external driving data makes it possible, in principle, to run some tasks from
@@ -184,7 +183,7 @@ etc.) and to an even greater extent in historical case studies and parallel
 test suites started behind a real time operation. It can be a serious problem
 for suites that have little downtime between forecast cycle points and
 therefore take many cycle points to catch up after a delay. Without taking
-account of inter-cycle-point dependence, the best that can be done, in
+account of inter-cycle dependence, the best that can be done, in
 general, is to reduce the gap between cycle points to zero as shown in
 Figure~\ref{fig-job-no-overlap}. A limited crude overlap of the single cycle
 point job schedule may be possible for specific task sets but the allowable
@@ -200,7 +199,7 @@ contention or task failures) won't result in dependency violations.
     \end{center}
     \caption[The complete multi-cycle-point dependency graph]
     {\scriptsize The complete dependency graph for the example suite, assuming
-    the least possible inter-cycle-point dependence: the forecast models ($a$,
+    the least possible inter-cycle dependence: the forecast models ($a$,
     $b$, and $c$) depend on their own previous instances. The dashed arrows
     show connections to previous and subsequent forecast cycle points.}
     \label{fig-dep-multi}
@@ -212,19 +211,19 @@ contention or task failures) won't result in dependency violations.
     \end{center}
     \caption[The optimal two-cycle-point job schedule]
     {\scriptsize The optimal two cycle job schedule when the next cycle's driving data is available in
-    advance, possible in principle when inter-cycle-point dependence is
+    advance, possible in principle when inter-cycle dependence is
     handled explicitly.}
     \label{fig-optimal-two}
 \end{figure}
 
 Figure~\ref{fig-optimal-two} shows, in contrast to
 Figure~\ref{fig-overlap}, the optimal two cycle point job schedule obtained by
-respecting all inter-cycle-point dependence.  This assumes no delays due to
+respecting all inter-cycle dependence.  This assumes no delays due to
 resource contention or otherwise - i.e.\ every task runs
 as soon as it is ready to run. The scheduler running
 this suite must be able to adapt dynamically to external conditions
 that impact on multi-cycle-point scheduling in the presence of
-inter-cycle-point dependence or else, again, risk bringing the system down
+inter-cycle dependence or else, again, risk bringing the system down
 with dependency violations.
 
 \begin{figure}
@@ -233,7 +232,7 @@ with dependency violations.
     \end{center}
     \caption[Comparison of job schedules after a delay]{\scriptsize Job
     schedules for the example suite after a delay of almost one whole
-    forecast cycle point, when inter-cycle-point dependence is
+    forecast cycle point, when inter-cycle dependence is
     taken into account (above the time axis), and when it is not
     (below the time axis). The colored lines indicate the time that
     each cycle point is delayed, and normal ``caught up'' cycle points
@@ -251,16 +250,16 @@ with dependency violations.
     available many cycle points in advance. Above the time axis is the optimal
     schedule obtained when the suite is constrained only by its true
     dependencies, as in Figure \ref{fig-dep-two-linked}, and underneath
-    is the best that can be done, in general, when inter-cycle-point
+    is the best that can be done, in general, when inter-cycle
     dependence is ignored.}
     \label{fig-time-two}
 \end{figure}
 
-To further illustrate the potential benefits of proper inter-cycle-point
+To further illustrate the potential benefits of proper inter-cycle
 dependency handling, Figure~\ref{fig-time-three} shows an operational
 delay of almost one whole cycle point in a suite with little downtime between
 cycle points. Above the time axis is the optimal schedule that is possible in
-principle when inter-cycle-point dependence is taken into account, and below
+principle when inter-cycle dependence is taken into account, and below
 it is the only safe schedule possible {\em in general} when it is ignored.
 In the former case, even the cycle point immediately after the delay is hardly
 affected, and subsequent cycle points are all on time, whilst in the latter
@@ -324,7 +323,7 @@ advance the suite; instead individual task proxies have their own
 private cycle point and spawn their own successors when the time is
 right. Task proxies are self-contained - they know their own
 prerequisites and outputs but are not aware of the wider suite.
-Inter-cycle-point dependence is not treated as special, and the task pool can
+Inter-cycle dependence is not treated as special, and the task pool can
 be populated with tasks with many different cycle points. The task pool
 is illustrated in Figure~\ref{fig-task-pool}. {\em Whenever any task
 changes state due to completion of an output, every task checks to see
@@ -358,10 +357,10 @@ previous section, emerges naturally at run time.
 
 \begin{figure}
     \begin{center}
-        \includegraphics[width=0.5\textwidth]{graphics/png/orig/gsummary.png}
+        \includegraphics[width=0.5\textwidth]{graphics/png/orig/gscan.png}
     \end{center}
-\caption[gsummary multi-suite state summary GUI]{\scriptsize gsummary multi-suite state summary GUI.}
-\label{fig-gsummary}
+\caption[gscan multi-suite state summary GUI]{\scriptsize gscan multi-suite state summary GUI.}
+\label{fig-gscan}
 \end{figure}
 
 
@@ -741,7 +740,7 @@ in the running suite - i.e.\ different tasks may pass through the
 ``current cycle point'' (etc.) at different times as the suite evolves,
 particularly in delayed (catch up) operation.
 
-\section{Site And User Configuration Files}
+\section{Global (Site, User) Configuration Files}
 \label{SiteAndUserConfiguration}
 
 Cylc site and user global configuration files contain settings that affect all
@@ -776,8 +775,8 @@ Settings that do not need to be changed should be deleted or commented
 out of user global config files so that they don't override future changes to
 the site file.
 
-Legal items, values, and system defaults are documented in the
-{\em Site And User Config File Reference} (\ref{SiteRCReference}).
+Legal items, values, and system defaults are documented in
+(\ref{SiteRCReference}).
 
 %\pagebreak
 \section{Tutorial}
@@ -868,10 +867,10 @@ GUI does not affect the suite itself.
 \begin{lstlisting}
 shell$ gcylc & # or:
 shell$ cylc gui & # Use the File menu to switch to specific suite.
-shell$ cylc gsummary & # Summary GUI for multiple running suites.
+shell$ cylc gscan & # Scan GUI for multiple running suites.
 \end{lstlisting}
-Clicking on a suite in the summary GUI, shown in
-Figure~\ref{fig-gsummary}, opens a gcylc instance for it.
+Clicking on a suite in the scan GUI, shown in
+Figure~\ref{fig-gscan}, opens a gcylc instance for it.
 
 \subsection{Suite Definitions}
 
@@ -915,17 +914,19 @@ and~\ref{SuiteStorageEtc}).
 \subsection{Suite Passphrases}
 \label{tutPassphrases}
 
-At registration time a random string of characters is written to a
-file called \lstinline=passphrase= in the suite definition directory.
-At run time any contact from cylc client programs (running tasks, user
-commands, the cylc GUI) must use the same passphrase to authenticate
-with the running suite. This prevents unauthorized users from interfering with
-your suites (network communication between running processes is not
-subject to Unix user account permissions). Local tasks and user commands
-on the suite host automatically use the passphrase in the suite
-definition directory. For remote tasks and commands, however, the
-passphrase must be installed appropriately on the remote
-account - see~\ref{RemoteTasks} below.
+A suite-specific passphrase file is automatically generated in the suite
+definition directory at registration time. It is loaded by the suite daemon at
+start-up and used to authenticate all client connections.  Clients on the suite
+host account automatically load the passphrase from the suite definition
+directory, but on other accounts, including task host accounts, it has to be
+installed manually.
+
+Possession of a suite passphrase gives full control and full read-access to the
+suite.  Without it, client access is determined by the suite's public access
+privilege level.
+
+For more on connection authentication, suite passphrases, and public access,
+see~\ref{ConnectionAuthentication}.
 
 \subsection{Import The Example Suites}
 \label{ImportTheExampleSuites}
@@ -959,10 +960,10 @@ shell$ cylc db pr --tree -x examples.tutorial
 examples
  `-tutorial
    `-cycling
-   | |-four       Inter-cycle-point dependence + a start-up task
+   | |-four       Inter-cycle dependence + a start-up task
    | | ...
-   | |-two        Two cycling tasks with inter-cycle-point dependence
-   | `-three      inter-cycle-point dependence + a cold-start task
+   | |-two        Two cycling tasks with inter-cycle dependence
+   | `-three      inter-cycle dependence + a cold-start task
    `-oneoff
      |-retry      A task with automatic retry on failure
      |-remote     Hello World! on a remote host
@@ -1169,11 +1170,11 @@ shell$ cylc scan
 tut.oneoff.basic oliverh oliverh-34403DL 7766
 
 # GUI summary view of running suites:
-shell$ cylc gsummary &
+shell$ cylc gscan &
 \end{lstlisting}
 
-The summary GUI is shown in Figure~\ref{fig-gsummary}; clicking on a suite in
-it opens gcylc.
+The scan GUI is shown in Figure~\ref{fig-gscan}; clicking on a suite in it
+opens gcylc.
 
 \subsection{Task Identifiers}
 
@@ -1251,9 +1252,9 @@ DONE
 
 Cylc supports a number of different job submission methods. Tasks
 submitted to external batch queuing systems like \lstinline=at=,
-\lstinline=PBS=, \lstinline=SLURM=, or \lstinline=loadleveler=, are
-displayed as {\em submitted} in the cylc GUI until they actually start
-executing.
+\lstinline=PBS=, \lstinline=SLURM=, \lstinline=Moab=, or
+\lstinline=LoadLeveler=, are displayed as {\em submitted} in the cylc GUI until
+they actually start executing.
 
 \begin{myitemize}
 \item For more on task job scripts, see~\ref{JobScripts}.
@@ -1360,6 +1361,10 @@ For remote task hosting to work several requirements must be satisfied:
 \item Passwordless ssh must be enabled from the suite host account to
 the task host account, for task job submission.
 
+\item Your shell initialization (.profile, .bashrc, .cshrc, etc) on the remote
+host must not produce any standard output as it may confuse commands such as
+scp. See \url{http://www.openssh.com/faq.html#2.9} for more information.
+
 \item Networking settings must allow communication {\em back} from
 the task host to the suite host, either by network ports (Pyro) or ssh,
 unless the last-resort one way {\em task polling} communication method
@@ -1400,12 +1405,61 @@ ssh -oBatchMode=yes -oConnectTimeout=10 wrh-1.niwa.co.nz\
 ' "/opt/cylc/bin/cylc" '"'"'job-submit'"'"\
 ' --remote-mode "/home/oliverh/cylc-run/tut.oneoff.remote/log/job/1/hello/01/job"'
 \end{lstlisting}
-(Don't be intimated by this - it really quite straightforward and would appear
-much simpler if the job log path was shorter!). Remote task job logs are
+(Don't be intimated by this - it is really quite straightforward and would
+appear much simpler if the job log path was shorter!). Remote task job logs are
 saved to the suite run directory on the task host, not on the suite host,
-although they can be retrieved by right-clicking on the task in the GUI. Rose
-(section~\ref{Rose}) provides a task event handler to pull logs back to
-the suite host.
+although they can be retrieved by right-clicking on the task in the GUI. If you
+want the job logs pulled back to the suite host automatically, you can set
+"retrieve job logs=True" under the "[[[remote]]]" section:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[hello]]
+        script = "sleep 10; echo Hello World!"
+        [[[remote]]]
+            host = server1.niwa.co.nz
+            retrieve job logs = True
+\end{lstlisting}
+
+The suite will attempt to \lstinline=rsync= the job logs once from the remote
+host each time a task job completes. E.g. if the job file is
+\lstinline=~/cylc-run/tut.oneoff.remote/log/job/1/hello/01/job=, anything under
+\lstinline=~/cylc-run/tut.oneoff.remote/log/job/1/hello/01/= will be retrieved.
+
+Some batch systems have considerable delays between the time when the job
+completes and when it writes the job logs in its normal location. If this is
+the case, you can configure an initial delay and retry delays for job log
+retrieval by setting some delays. E.g.:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[hello]]
+        script = "sleep 10; echo Hello World!"
+        [[[remote]]]
+            host = server1.niwa.co.nz
+            retrieve job logs = True
+            # Retry after 10 seconds, 1 minute and 3 minutes
+            retrieve job logs retry delays = PT10S, PT1M, PT3M
+\end{lstlisting}
+
+Finally, if the disk space of the suite host is limited, you may want to set
+\lstinline@[[[remote]]]retrieve job logs max size=SIZE at . The value of SIZE can
+be anything that is accepted by the \lstinline at --max-size=SIZE@ option of the
+\lstinline=rsync= command. E.g.:
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[hello]]
+        script = "sleep 10; echo Hello World!"
+        [[[remote]]]
+            host = server1.niwa.co.nz
+            retrieve job logs = True
+            # Don't get anything bigger than 10MB
+            retrieve job logs max size = 10M
+\end{lstlisting}
 
 \begin{myitemize}
 \item For more on remote tasks see~\ref{RunningTasksOnARemoteHost}
@@ -1814,13 +1868,13 @@ to the suite to only allow initial cycle points at 00 or 12 hours e.g.
         see~\ref{RunaheadLimit}.
 \end{myitemize}
 
-\subsubsection{Inter-Cycle-Point Triggers}
+\subsubsection{Inter-Cycle Triggers}
 \label{TutInterCyclePointTriggers}
 
 \hilight{ suite: \lstinline=tut.cycling.two= }
 \vspace{3mm}
 
-The \lstinline=tut.cycling.two= suite adds inter-cycle-point dependence
+The \lstinline=tut.cycling.two= suite adds inter-cycle dependence
 to the previous example:
 \begin{lstlisting}
 [scheduling]
@@ -1833,7 +1887,7 @@ For any given cycle point in the sequence defined by the
 cycling graph section heading, \lstinline=bar= triggers off
 \lstinline=foo= as before, but now \lstinline=foo= triggers off its own
 previous instance \lstinline=foo[-PT12H]=.  Date-time offsets in
-inter-cycle-point triggers are expressed as ISO 8601 intervals (12 hours
+inter-cycle triggers are expressed as ISO 8601 intervals (12 hours
 in this case).  Figure~\ref{fig-tut-two} shows how this connects the cycling
 graph sections together.
 \begin{figure}
@@ -1844,11 +1898,11 @@ graph sections together.
 \label{fig-tut-two}
 \end{figure}
 
-Experiment with this suite to see how inter-cycle-point triggers work.
+Experiment with this suite to see how inter-cycle triggers work.
 Note that the first instance of \lstinline=foo=, at suite start-up, will
-trigger immediately in spite of its inter-cycle-point trigger, because cylc
+trigger immediately in spite of its inter-cycle trigger, because cylc
 ignores dependence on points earlier than the initial cycle point.
-However, the presence of an inter-cycle-point trigger usually implies something
+However, the presence of an inter-cycle trigger usually implies something
 special has to happen at start-up. If a model depends on its own previous
 instance for restart files, for example, then some special process has to
 generate the initial set of restart files when there is no previous cycle point
@@ -2140,7 +2194,7 @@ as the suite runs.
 \end{myitemize}
 
 
-\section{Suite Name Registration And Passphrases}
+\section{Suite Name Registration}
 \label{SuiteRegistration}
 
 Cylc commands target suites via names registered in a {\em suite name
@@ -2202,7 +2256,7 @@ exist yet unless you specify the entire target suite name.
 
 \lstinline=cylc db register --help= shows a number of other examples.
 
-\subsection{Database Operations}
+\subsection{Suite Name Registration Commands}
 
 On the command line, the  `database' (or `db') command category contains
 commands to implement the aforementioned operations.
@@ -2259,33 +2313,6 @@ suite name hierarchy. \lstinline=cylc copy --help= has some explicit examples.
 The same functionality is also available by right-clicking on suites
 or groups in the gcylc ``Open Registered Suite'' dialog.
 
-\subsection{Suite Passphrases}
-
-Any client process that connects to a running suite (this includes task
-messaging and user-invoked interrogation and control commands) must
-authenticate with a secure passphrase that has been loaded by the suite.
-A random passphrase is generated automatically in the suite definition
-directory at registration time if one does not already exist there. For
-the default Pyro-based connection method the passphrase file must be
-distributed to other accounts that host running tasks or from which
-you need monitoring or control access to the running suite.
-
-Alternatively, cylc can be configured to,
-\begin{myenumerate}
-\item use ssh to re-invoke task messaging commands on the suite host; or
-\item use a one-way polling mechanism for tracking task progress.
-\end{myenumerate}
-Neither of these methods require the suite passphrase to be installed
-on the task host. For ssh re-invocation ssh keys must be installed for
-the task-to-suite direction in addition to the suite-to-task setup
-already required for job submission. The automatic polling mechanism can
-be used as a last resort for hosts that do not allow routing back to the
-suite host for pyro or ssh. It can also be used as regular health check
-on submitted tasks under the other communications methods.
-
-See~\ref{RunningSuites} for more detail on cylc client/server
-communications, and how to use it.
-
 
 %\pagebreak
 \section{Suite Definition}
@@ -2462,7 +2489,7 @@ configuration items grouped under several top level section headings:
     \item {\bf [cylc] } - {\em non task-specific suite configuration}
     \item {\bf [scheduling] } - {\em determines when tasks are ready to run}
         \begin{myitemize}
-            \item tasks with special behaviour, e.g. clock-triggered tasks
+            \item tasks with special behaviour, e.g. clock-trigger tasks
             \item the dependency graph, which defines the relationships
                 between tasks
         \end{myitemize}
@@ -2611,7 +2638,7 @@ graph section. For example this graph,
 implies that B triggers off A for cycle points in which the hour matches $00$
 or $12$.
 
-To define inter-cycle-point dependencies, attach an offset indicator to the
+To define inter-cycle dependencies, attach an offset indicator to the
 left side of a pair:
 \lstset{language=suiterc}
 \begin{lstlisting}
@@ -2821,6 +2848,10 @@ For example, all these are valid in cylc:
         [[[ R1/T06 ]]]
             graph = "delayed_cold_corge => corge"
 
+        # Repeat once at the final cycle point
+        [[[ R1/P0Y ]]]
+            graph = "end_garply"
+
         # Repeat 3 times, every day at 08:30 after the initial cycle point
         [[[ R3/T0830 ]]]
             graph = "triple_grault => triple_garply"
@@ -3419,7 +3450,7 @@ that trigger if they're needed but otherwise remove themselves from the
 suite (you can run the {\em AutoRecover.async} example suite to see how
 this works).  The dashed graph edges ending in solid dots indicate
 suicide triggers, and the open arrowheads indicate conditional triggers
-as usual.
+as usual. Suicide triggers are ignored by default in the graph view, unless you toggle them on with {\em View} -> {\em Options} -> {\em Ignore Suicide Triggers}. 
 
 \begin{figure}
 \begin{minipage}[b]{0.5\textwidth}
@@ -3570,13 +3601,13 @@ failed):
 \end{lstlisting}
 
 
-\paragraph{Inter-Cycle-Point Triggers}
+\paragraph{Inter-Cycle Triggers}
 \label{InterCyclePointTriggers}
 
 Typically most tasks in a suite will trigger off others in the same
 cycle point, but some may depend on others with other cycle points.
 This notably applies to warm-cycled forecast models, which depend on
-their own previous instances (see below); but other kinds of inter-cycle-point
+their own previous instances (see below); but other kinds of inter-cycle
 dependence are possible too.\footnote{In NWP forecast analysis
 suites parts of the observation processing and data assimilation
 subsystem will typically also depend on model background fields
@@ -3588,18 +3619,18 @@ kind of relationship in cylc:
         # B triggers off A in the previous cycle point
         graph = "A[-PT6H] => B"
 \end{lstlisting}
-inter-cycle-point and trigger type (or message trigger) notation can be
+inter-cycle and trigger type (or message trigger) notation can be
 combined:
 \begin{lstlisting}
     # B triggers if A in the previous cycle point fails:
     graph = "A[-PT6H]:fail => B"
 \end{lstlisting}
 
-At suite start-up inter-cycle-point triggers refer to a previous cycle point
+At suite start-up inter-cycle triggers refer to a previous cycle point
 that does not exist. This does not cause the dependent task to wait
 indefinitely, however, because cylc ignores triggers that reach back
 beyond the initial cycle point. That said, the presence of an
-inter-cycle-point trigger does normally imply that something special has to
+inter-cycle trigger does normally imply that something special has to
 happen at start-up. If a model depends on its own previous instance for
 restart files, for instance, then an initial set of restart files has to be
 generated somehow or the first model task will presumably fail with
@@ -3648,7 +3679,7 @@ satisfied by a previous instance of itself, {\em or} by an initial task with
 
 In effect, the \lstinline=R1= task masquerades as the previous-cycle-point trigger
 of its associated cycling task. At suite start-up initial tasks will
-trigger the first cycling tasks, and thereafter the inter-cycle-point trigger
+trigger the first cycling tasks, and thereafter the inter-cycle trigger
 will take effect.
 
 If a task has a dependency on another task in a different cycle point, the
@@ -3737,7 +3768,7 @@ then successive instances can run in parallel if the opportunity arises.
 However, if such a task would interfere with its own siblings for
 internal reasons (e.g.\ use of a hardwired non cycle dependent
 temporary file or similar) then it can be forced to run sequentially.
-This can be done with explicit inter-cycle-point triggers in the graph:
+This can be done with explicit inter-cycle triggers in the graph:
 \lstset{language=suiterc}
 \begin{lstlisting}
 [scheduling]
@@ -3789,30 +3820,159 @@ is equivalent to this one:
 
 \paragraph{Future Triggers}
 
-Cylc also supports inter-cycle-point triggering off tasks in the future (with
-respect to cycle point date-time, not real time!):
+Cylc also supports inter-cycle triggering off tasks ``in the future'' (with
+respect to cycle point):
 \begin{lstlisting}
 [[dependencies]]
     [[[T00,T06,T12,T18]]]
-        # Run A in this cycle.
-        # B triggers off A in the next cycle.
         graph = """
-            A[+PT6H] => B
+    # A runs in this cycle:
             A
+    # B in this cycle triggers off A in the next cycle.
+            A[PT6H] => B
         """
 \end{lstlisting}
-In contrast to normal inter-cycle-point triggers, future triggers present a
-problem at the suite stop time rather than at start-up - in the final cycle
-point \lstinline=B= wants to to trigger off \lstinline=A= at a future cycle
-point that does not exist. To avoid this problem cylc prevents tasks from
-spawning successors that depend on tasks in a non-existent future cycle point.
+(Recall that \lstinline=A[t+PT6H]= can run before B[t] because tasks in cylc
+have private cycle points).  Future triggers present a problem at the suite
+shutdown rather than at start-up.  Here, \lstinline=B= at the final cycle
+point wants to trigger off an instance of \lstinline=A= that will never exist
+because it is beyond the suite stop point. Consequently cylc prevents tasks
+from spawning successors that depend on other tasks beyond the stop point.
+
+\paragraph{Clock Triggers}
+\label{ClockTriggerTasks}
+
+In addition to depending on other tasks (and on external events -
+see~\ref{ExternalTriggers}) tasks can depend on the wall clock: specifically,
+they can trigger off a wall clock time expressed as an offset from their own
+cycle point:
+\lstset{language=suiterc}
+\begin{lstlisting}
+[scheduling]
+    [[special tasks]]
+        clock-trigger = foo(PT2H)
+    [[dependencies]]
+        [[[T00]]]
+            graph = foo
+\end{lstlisting} 
+Here, \lstinline=foo[2015-08-23T00]= would trigger (other dependencies allowing)
+when the wall clock time reaches \lstinline=2015-08-23T02=. Clock-trigger
+offsets are normally positive, to trigger some time {\em after} the wall-clock
+time is equal to task cycle point.
+
+Clock-triggers have no effect on scheduling if the suite is running sufficiently
+far behind the clock (e.g.\ after a delay, or because it is processing archived
+historical data) that the trigger times, which are relative to task cycle
+point, have already passed.
+
+\paragraph{Clock-Expire Triggers}
+\label{ClockExpireTasks}
+
+Tasks can be configured to {\em expire} - i.e.\ to skip job submission and
+enter the {\em expired} state - if they are too far behind the wall clock when
+they become ready to run, and other tasks can trigger off this.  As a possible
+use case, consider a cycling task that copies the latest of a set of files to
+overwrite the previous set: if the task is delayed by more than one cycle there
+may be no point in running it because the freshly copied files will just be
+overwritten immediately by the next task instance as the suite catches back up
+to real time operation.  Clock-expire tasks are configured like clock-trigger
+tasks, with a date-time offset relative to cycle point (\ref{ClockExpireRef}).
+The offset should be positive to make the task expire if the wall-clock time
+has gone beyond the cycle point.  Triggering off an expired task typically
+requires suicide triggers to remove the workflow that runs if the task has not
+expired. Here a task called \lstinline=copy= expires, and its downstream
+workflow is skipped, if it is more than one day behind the wall-clock (see also
+\lstinline=examples/clock-expire=):
+\lstset{language=suiterc}
+\begin{lstlisting}
+[cylc]
+   cycle point format = %Y-%m-%dT%H
+[scheduling]
+    initial cycle point = 2015-08-15T00
+    [[special tasks]]
+        clock-expire = copy(-P1D)
+    [[dependencies]]
+        [[[P1D]]]
+            graph = """
+        model[-P1D] => model => copy => proc
+              copy:expired => !proc"""
+\end{lstlisting}
+
+\paragraph{External Triggers}
+\label{ExternalTriggers}
+
+In addition to depending on other tasks (and on the wall clock -
+see~\ref{ClockTriggerTasks}) tasks can trigger off events reported by an
+external system.  For example, an external process could detect incoming data
+on an ftp server, and then notify a suite containing a task to retrieve the
+new data for processing. This is an alternative to long-running tasks that poll
+for external events.
+
+Note that cylc does not currently support triggering off ``filesystem events''
+(e.g.\ \lstinline=inotify= on Linux). However, external watcher processes can
+use filesystem events to detect triggering conditions, if that is appropriate,
+before notifying a suite with our general external event system.
+
+External triggers are not normally needed in date-time cycling suites driven
+by real time data that comes in at regular intervals.  In these cases a data
+retrieval task can be clock-triggered to submit at the expected data arrival
+time, so little time if any is wasted in polling.  But in non-cycling or
+integer cycling suites (which can be used for arbitrarily timed repeating
+workflows such as for satellite data processing) external triggers may be
+preferred.
+
+The external triggering process must call \lstinline=cylc ext-trigger= with the
+name of the target suite, the message that identifies this type of event to the
+suite, and an ID that distinguishes this particular event instance from others
+(the name of the target task or its current cycle point is not required).  The
+event ID is just an arbitary string to cylc, but it typically identifies the
+filename(s) of the latest dataset in some way.  When the suite daemon receives
+the external event notification it will trigger the next instance of any task
+waiting on that trigger (whatever its cycle point) and then broadcast
+(see~\ref{cylc-broadcast}) the event ID to the cycle point of the triggered
+task as \lstinline=$CYLC_EXT_TRIGGER_ID=.  Downstream tasks with the same cycle
+point therefore know the new event ID too and can use it, if they need to, to
+identify the same new dataset. In this way a whole workflow can be associated
+with each new dataset, and multiple datasets can be processed in parallel if
+they happen to arrive in quick succession.  
+
+An externally-triggered task must register the event it waits on in the suite
+scheduling section:
+\lstset{language=suiterc}
+\begin{lstlisting}
+# suite "sat-proc"
+[scheduling]
+    cycling mode = integer
+    initial cycle point = 1
+    [[special tasks]]
+        external-trigger = get-data("new sat X data avail")
+    [[dependencies]]
+        [[[P1]]]
+            graph = get-data => conv-data => products
+\end{lstlisting}
+
+Then, each time a new dataset arrives the external detection system should
+notify the suite like this:
+\lstset{language=transcript}
+\begin{lstlisting}
+shell$ cylc ext-trigger sat-proc "new sat X data avail" passX12334a
+\end{lstlisting}
+where ``sat-proc'' is the suite name and ``passX12334a'' is the ID string for
+the new event.  The suite passphrase must be installed on triggering account.
+
+Note that only one task in a suite can trigger off a particular external
+message. Other tasks can trigger off the externally triggered task as required,
+of course.
+
+\lstinline=$CYLC_DIR/examples/satellite/ext-triggers/suite.rc= is a working
+example of a simulated satellite processing suite.
 
 \subsubsection{Model Restart Dependencies}
 \label{ModelRestartDependencies}
 
 Warm-cycled forecast models generate {\em restart files}, e.g.\ model
 background fields, to initialize the next forecast. This kind of
-dependence requires an inter-cycle-point trigger:
+dependence requires an inter-cycle trigger:
 \lstset{language=suiterc}
 \begin{lstlisting}
 [scheduling]
@@ -3848,11 +4008,6 @@ be finished first:
             graph = "A[-PT24H] | A[-PT18H] | A[-PT12H] | A[-PT6H] => A"
 \end{lstlisting}
 
-If you need to skip one or more cycle points in a suite like this, manually
-remove the tasks that cannot run (or use \lstinline=cylc purge= to
-remove their downstream dependents too) and manually trigger the first
-post-gap task.
-
 \subsection{Runtime - Task Configuration}
 \label{NIORP}
 
@@ -4192,6 +4347,12 @@ For this to work,
 \begin{myitemize}
     \item passwordless ssh must be configured between the suite and task
     host accounts.
+
+    \item Your shell initialization (.profile, .bashrc, .cshrc, etc) on the
+    remote host must not produce any standard output as it may confuse commands
+    such as scp. See \url{http://www.openssh.com/faq.html#2.9} for more
+    information.
+
     \item cylc must be installed on task hosts so that remote tasks can
     use cylc messaging and poll or kill commands.
     \begin{myitemize}
@@ -4947,6 +5108,36 @@ These are written to the top of the task job script like this:
 #PBS -l walltime=00:01:00
 \end{lstlisting}
 
+\subsubsection{moab}
+
+Submits tasks to the Moab workload manager by the \lstinline=msub= command.
+Moab directives can be provided in the suite.rc file; the syntax is very
+similar to PBS:
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[my_task]]
+        [[[job submission]]]
+            method = moab
+        [[[directives]]]
+            -V =
+            -q = foo
+            -l nodes = 1
+            -l walltime = 00:01:00
+\end{lstlisting}
+These are written to the top of the task job script like this:
+\lstset{language=bash}
+\begin{lstlisting}
+#!/bin/bash
+# DIRECTIVES
+#PBS -V
+#PBS -q foo
+#PBS -l nodes=1
+#PBS -l walltime=00:01:00
+\end{lstlisting}
+(Moab understands \lstinline=#PBS= directives).
+
+
 \subsubsection{sge}
 
 Submits tasks to Sun/Oracle Grid Engine by the \lstinline=qsub= command.
@@ -5042,6 +5233,28 @@ that submit internal jobs and direct the associated output to other
 files). For less internally complex tasks, however, the files referred
 to here will be complete task job logs.
 
+Some job submission methods, such as \lstinline=pbs=, redirect a job's stdout
+and stderr streams to a separate cache area while the job is running. The
+contents are only copied to the normal locations when the job completes. This
+means that \lstinline=cylc log= or the gcylc GUI will be unable to find the
+job's stdout and stderr streams while the job is running. Some sites with these
+job submission methods are known to provide commands for viewing and/or
+tail-follow a job's stdout and stderr streams that are redirected to these
+cache areas. If this is the case at your site, you can configure cylc to make
+use of the provided commands by adding some settings to the global site/user
+config. E.g.:
+
+\begin{lstlisting}
+[hosts]
+    [[HOST]]  # <= replace this with a real host name
+        [[[batch systems]]]
+            [[[[pbs]]]]
+                err tailer = qcat -f -e \%(job_id)s
+                out tailer = qcat -f -o \%(job_id)s
+                err viewer = qcat -e \%(job_id)s
+                out viewer = qcat -o \%(job_id)s
+\end{lstlisting}
+
 \subsection{Overriding The Job Submission Command}
 \label{CommandTemplate}
 
@@ -5426,45 +5639,59 @@ The gcylc GUI requires direct Pyro connections to its target suite. If
 that is not possible, run gcylc on the suite host.
 
 
-\subsection{Connection Authentication}
+\subsection{Client Authentication and Passphrases}
 \label{ConnectionAuthentication}
 
-All Pyro connections to a running suite (task messaging and
-user-invoked commands) must authenticate with an arbitrary single line of
-text in a file called \lstinline=passphrase=, which will be found and
-used automatically if installed properly - see below.  A secure MD5
-checksum, not the raw passphrase, is passed across the network. A random
-passphrase is generated in the suite definition directory when a suite
-is registered, but you can create your own if you wish.
+Suite daemons listen on dedicated network ports for incoming client requests -
+task messages and user-invoked commands (CLI or GUI). The \lstinline=cylc scan=
+command reveals which suites are running on scanned hosts, and what ports they
+are listening on.
 
-For ssh task messaging and user command re-invocation, on the other
-hand, the suite passphrase is only required on the suite host account
-but ssh keys must be installed for passwordless connections instead.
+Client programs have to authenticate with the target suite daemon before
+issuing commands or requesting information.  Cylc has two authentication
+levels: full read and control via a suite-specific passphrase
+(see~\ref{passphrases}); and configurable free ``public'' access
+(see~\ref{PublicAccess}).
 
-\subsubsection{Suite Pyro Passphrase Locations}
+\subsubsection{Full Control - Suite Passphrases}
 \label{passphrases}
 
-Suite passphrases currently have to be installed manually to all task
-host accounts that use the Pyro communication method (see above); and
-also to accounts used to run commands that interact directly with the
-suite via Pyro.
+A file called \lstinline=passphrase= is generated in the suite definition
+directory at registration time, containing a single line of random text.
+The passphrase is loaded by the suite daemon at start-up and used to
+authenticate client connections. Suite passphrases are used in an encrypted
+challenge-response scheme; they are never sent raw over the network.
 
-Legal passphrase locations, in order of preference, are:
+Clients on the suite host account automatically pick up the passphrase from the
+suite definition directory. On other accounts the passphrase has to be
+installed manually. This includes task host accounts unless the suite uses ssh
+messaging on that host (see~\ref{TaskComms}).  Allowed passphrase locations are:
 \begin{myenumerate}
-    \item \lstinline=$CYLC_SUITE_DEF_PATH/passphrase=
     \item \lstinline=$HOME/.cylc/SUITE_HOST/SUITE_OWNER/SUITE_NAME/passphrase=
     \item \lstinline=$HOME/.cylc/SUITE_HOST/SUITE_NAME/passphrase=
     \item \lstinline=$HOME/.cylc/SUITE_NAME/passphrase=
 \end{myenumerate}
-Remote tasks know the location of the remote suite definition directory
-(if one exists) through their execution environment. Local (suite host)
-user command invocations can find the suite definition directory in the
-suite name database. Remote user command invocations, however,
-cannot interrogate the database on the command host because the suite
-will not be registered there (cylc cannot assume that the command host
-shares a common filesystem with the suite host). Consequently remote
-command host accounts must have the suite passphrase installed in one of
-the secondary locations under \lstinline=$HOME/.cylc/=.
+
+\subsubsection{Public Access - No Passphrase}
+\label{PublicAccess}
+
+Possession of a suite passphrase gives full read and control access to the
+suite.  Without the passphrase the amount of information revealed by a suite
+daemon is determined by the public access privilege level set in global
+site/user config (\ref{GlobalAuth}) and optionally overidden in suites
+(\ref{SuiteAuth}):
+\begin{myitemize}
+    \item {\em identity} - only suite and owner names revealed
+    \item {\em description} - identity plus suite title and description
+    \item {\em state-totals} - identity, description, and task state totals
+    \item {\em full-read} - full read-only access for monitor and GUI
+    \item {\em shutdown} - full read access plus shutdown, but no other
+        control.
+\end{myitemize}
+The default public access level is {\em state-totals}.
+The \lstinline=cylc scan= command can print descriptions and task state totals
+in addition to basic suite identity, if you have the right passphrases or if
+the requested information is revealed publicly.
 
 
 \subsection{How Tasks Get Access To Cylc}
@@ -5518,7 +5745,7 @@ As a suite runs its task proxies may pass through the following states:
 
 \begin{myitemize}
     \item {\bf waiting} - prerequisites not satisfied yet
-    (note that clock-triggered tasks also wait on their trigger time).
+    (note that clock-trigger tasks also wait on their trigger time).
 
     \item {\bf queued} - ready to run (prerequisites satisfied) but
     temporarily held back by an {\em internal cylc queue}
@@ -5589,8 +5816,9 @@ remote port file. Then use the \lstinline=--user= and
 \begin{lstlisting}
 shell$ cylc monitor --user=USER --host=HOST SUITE
 \end{lstlisting}
-If you know the port number of the target suite, give it on the command
-line to prevent the port-retrieving ssh connection being attempted:
+
+Alternatively, you can determine suite port numbers using \lstinline=cylc scan=,
+and use them explicitly on the command line:
 \lstset{language=transcript}
 \begin{lstlisting}
 shell$ cylc monitor --user=USER --host=HOST --port=PORT SUITE
@@ -5689,28 +5917,58 @@ suite listed below under~\ref{EventHandling}.
 \subsection{Suite And Task Event Handling}
 \label{EventHandling}
 
-See also~\ref{SuiteEventHandling} and~\ref{TaskEventHandling}
-in the {\em Suite.rc Reference}.
-
-Cylc can call nominated event handlers when certain suite or task events
-occur. This is intended to facilitate centralized alerting and automated
-handling of critical events. Event handlers can send an email or an SMS,
-call a pager, and so on; or intervene in the operation of their own
-suite using cylc commands.  \lstinline=cylc [hook] email-suite= and
-\lstinline=cylc [hook] email-task= are example event handlers packaged
-with cylc.
+See also~\ref{SuiteEventHandling},~\ref{TaskEventHandling}
+and~\ref{TaskEventHandling2} in the {\em Suite.rc Reference}.
 
-Event handlers can be located in the suite bin directory, otherwise
-it is up to you to ensure their location is in \lstinline=$PATH=
-(in the shell in which cylc runs, on the suite host).
+Cylc can call nominated event handlers when certain suite or task events occur.
+This is intended to facilitate centralized alerting and automated handling of
+critical events. Event handlers can be used to send a message, call a pager,
+and so on; or intervene in the operation of their own suite using cylc
+commands.
 
-Task event handlers are passed the following arguments by the suite daemon:
+To send an email, you can use the built-in setting
+\lstinline=[[[events]]]mail events= to specify a list of events for which
+notifications should be sent. E.g. to send an email on (submission) failed and
+retry:
 
+\lstset{language=suiterc}
 \begin{lstlisting}
-<task-event-handler> EVENT SUITE TASKID MESSAGE
+[runtime]
+    [[foo]]
+        retry delays = PT0S, PT30S
+        script = test "${CYLC_TASK_TRY_NUMBER}" -eq 3
+        [[[events]]]
+            mail events = submission failed, submission retry, failed, retry
 \end{lstlisting}
-where EVENT is one of the following:
 
+By default, the emails will be sent to the current user with:
+
+\begin{myitemize}
+    \item \lstinline=to:= set as \lstinline=$USER=
+    \item \lstinline=from:= set as \lstinline=notifications@$(hostname)=
+    \item SMTP server at \lstinline=localhost:25=
+\end{myitemize}
+
+These can be configured using the settings:
+\begin{myitemize}
+    \item \lstinline=[[[events]]]mail to= (list of email addresses),
+    \item \lstinline=[[[events]]]mail from=
+    \item \lstinline=[[[events]]]mail smtp=.
+\end{myitemize}
+
+Event handler commands can be located in the suite \lstinline=bin/= directory,
+otherwise it is up to you to ensure their location is in \lstinline=$PATH= (in
+the shell in which cylc runs, on the suite host). The commands should require
+very little resource to run and should return quickly. (Each event
+handler is invoked by a child process in a finite process pool that is also
+used to submit, poll and kill jobs.  The child process will wait for the event
+handler to complete before moving on to the next item in the queue. If the
+process pool is saturated with long running event handlers, the suite will
+appear to hang.)
+
+Task event handlers can be specified using the
+\lstinline=[[[event hooks]]]<event> handler= settings, where
+\lstinline=<event>= is one of:
 \begin{myitemize}
     \item `submitted' - the job submit command was successful
     \item `submission failed' - the job submit command failed
@@ -5723,8 +5981,37 @@ where EVENT is one of the following:
     \item `retry' - the task failed but will retry
     \item `execution timeout' - task execution timed out
 \end{myitemize}
-MESSAGE, if provided, describes what has happened, and TASKID identifies
-the task (\lstinline=NAME.CYCLE= for cycling tasks).
+
+The value of each setting should be a list of command lines or command line
+templates (see below).
+
+Alternatively, task event handlers can be specified using the
+\lstinline=[[[events]]]handlers= and the \lstinline=[[[events]]]handler events=
+settings, where the former is a list of command lines or command line templates
+(see below) and the latter is a list of events for which these commands should
+be invoked.
+
+A command line template may have any or all of these patterns which will be
+substituted with actual values:
+\begin{myitemize}
+    \item \%(event)s: event name
+    \item \%(suite)s: suite name
+    \item \%(point)s: cycle point
+    \item \%(name)s: task name
+    \item \%(submit\_num)s: submit number
+    \item \%(id)s: task ID (i.e. \%(name)s.\%(point)s)
+    \item \%(message)s: event message, if any
+\end{myitemize}
+
+Otherwise, the command line will be called with the following command line
+arguments:
+\begin{lstlisting}
+<task-event-handler> %(event)s %(suite)s %(id)s %(message)s
+\end{lstlisting}
+
+For an explanation of the substitution syntax, see
+\href{https://docs.python.org/2/library/stdtypes.html#string-formatting}{String Formatting Operations}
+in the Python documentation.
 
 The retry event occurs if a task fails and has any remaining retries
 configured (see~\ref{TaskRetries}).
@@ -5736,34 +6023,34 @@ running tasks} so if you wish to pass them additional information via
 the environment you must use [cylc] $\rightarrow$ [[environment]],
 not task runtime environments.
 
-Here is an example suite that tests the {\em retry} and {\em failed} events.
-The handler in this case simply echoes its command line arguments to
-suite stdout.
+The following 2 \lstinline=suite.rc= snippets are examples on how to specify
+event handlers using the alternate methods:
 
 \lstset{language=suiterc}
 \begin{lstlisting}
-[scheduling]
-    initial cycle point = 20100808T00
-    final cycle point = 20100810T00
-    [[dependencies]]
-        [[[T00]]]
-            graph = "foo => bar"
 [runtime]
     [[foo]]
         retry delays = PT0S, PT30S
-        script = """
-echo TRY NUMBER: $CYLC_TASK_TRY_NUMBER
-sleep 10
-# retry twice and succeed on the final try,
-# but fail definitively in the final cycle.
-if (( CYLC_TASK_TRY_NUMBER <= 2 )) || \
-    (( CYLC_TASK_CYCLE_POINT == CYLC_SUITE_FINAL_CYCLE_POINT )); then
-    echo ABORTING
-    /bin/false
-fi"""
+        script = test "${CYLC_TASK_TRY_NUMBER}" -eq 2
         [[[event hooks]]]
-            retry handler = "echo !!!!!EVENT!!!!! "
-            failed handler = "echo !!!!!EVENT!!!!! "
+            retry handler = "echo '!!!!!EVENT!!!!!' "
+            failed handler = "echo '!!!!!EVENT!!!!!' "
+\end{lstlisting}
+
+\lstset{language=suiterc}
+\begin{lstlisting}
+[runtime]
+    [[foo]]
+        retry delays = PT0S, PT30S
+        script = test "${CYLC_TASK_TRY_NUMBER}" -eq 2
+        [[[events]]]
+            handlers = "echo '!!!!!EVENT!!!!!' "
+            handler events = retry, failed
+\end{lstlisting}
+
+Note: The handler command is called like this:
+\begin{lstlisting}
+echo '!!!!!EVENT!!!!!' %(event)s %(suite)s %(id)s %(message)s
 \end{lstlisting}
 
 \subsection{Reloading The Suite Definition At Runtime}
@@ -5840,6 +6127,7 @@ you can edit the generated task job script to make one-off changes before the
 task submits.
 
 \subsection{Runtime Settings Broadcast and Communication Between Tasks}
+\label{cylc-broadcast}
 
 The \lstinline=cylc broadcast= command overrides \lstinline=[runtime]=
 settings in a running suite. This can
@@ -6157,78 +6445,83 @@ you wish:
 \end{lstlisting}
 
 
-\subsection{Triggering Off Tasks In Other Suites}
+\subsection{Inter-suite Dependence: Triggering Off Tasks In Other Suites}
 \label{SuiteStatePolling}
 
-The \lstinline=cylc suite-state= command, which interrogates suite run
-databases, has a polling mode that waits on a given task achieving a
-given state.  See \lstinline=cylc suite-state --help= for command
-options and defaults.
+The \lstinline=cylc suite-state= command interrogates suite run databases. It
+has a polling mode that waits for a given task in the target suite to achieve a
+given state. This can be used to make task scripting wait for a remote task
+to succeed (for example). The suite graph notation also provides a way to
+define automatic suite-state polling tasks, which use the same polling command
+under the hood.
 
-The suite graph notation also allows you to define local tasks that, in
-effect, represent tasks in other suites by automatically polling for
-them using the \lstinline=cylc suite-state= command. Here's how to
-trigger a task \lstinline=bar= off a task \lstinline=foo= in another
-suite called \lstinline=other.suite=:
+Here's how to trigger a task \lstinline=bar= off a task \lstinline=foo= in
+a remote suite called \lstinline=other.suite=:
 \begin{lstlisting}
 [scheduling]
     [[dependencies]]
-        [[[T00,T12]]]
-            graph = "FOO<other.suite::foo> => bar"
+        [[[T00, T12]]]
+            graph = "my-foo<other.suite::foo> => bar"
 \end{lstlisting}
-Local task \lstinline=FOO= will poll for the success of \lstinline=foo=
-in suite \lstinline=other.suite= at the same cycle point. Other task states
-can be polled like this,
+Local task \lstinline=my-foo= will poll for the success of \lstinline=foo=
+in suite \lstinline=other.suite=, at the same cycle point, succeeding only when
+or if it succeeds. Other task states can also be polled,
 \begin{lstlisting}
-   graph = "FOO<other.suite::foo:fail> => bar"
+   graph = "my-foo<other.suite::foo:fail> => bar"
 \end{lstlisting}
 
-Default polling parameters (the maximum number of polls and the interval
-between them) are printed by \lstinline=cylc suite-state --help=.
-These can be configured if necessary under the local polling task
-runtime section:
+The default polling parameters (e.g.\ maximum number of polls and the interval
+between them) are printed by \lstinline=cylc suite-state --help= and can be
+configured if necessary under the local polling task runtime section:
 \begin{lstlisting}
 [scheduling]
     [[ dependencies]]
         [[[T00,T12]]]
-            graph = "FOO<other.suite::foo> => bar"
+            graph = "my-foo<other.suite::foo> => bar"
 [runtime]
-    [[FOO]]
+    [[my-foo]]
         [[[suite state polling]]]
             max-polls = 100
             interval = PT10S
 \end{lstlisting}
 
-The remote suite does not have to be running when polling commences (or
-at all if the remote condition has already been achieved) because the
-command interrogates the suite run database, not the suite server
-process.
-
-For suites owned by others or those with run databases in non-standard
-locations use the \lstinline=--run-dir= option or, in-suite,
+For suites owned by others, or those with run databases in non-standard
+locations, use the \lstinline=--run-dir= option, or in-suite:
 \begin{lstlisting}
 [runtime]
-    [[FOO]]
+    [[my-foo]]
         [[[suite state polling]]]
             run-dir = /path/to/top/level/cylc/run-directory
 \end{lstlisting}
 
-To trigger off remote tasks with different cycle points just arrange for
-the local polling task to be on the same cycling sequence as the remote
-task that it represents. For instance, if local task \lstinline=cat=
-cycles 6-hourly at \lstinline=0,6,12,18= but needs to trigger off a
-remote task \lstinline=dog= with cycle points of \lstinline=3,9,15,21=
-hours,
+If the remote task has a different cycling sequence, just arrange for the
+local polling task to be on the same sequence as the remote task that it
+represents. For instance, if local task \lstinline=cat= cycles 6-hourly at
+\lstinline=0,6,12,18= but needs to trigger off a remote task \lstinline=dog=
+at \lstinline=3,9,15,21=,
 \begin{lstlisting}
 [scheduling]
     [[dependencies]]
+        [[[T03,T09,T15,T21]]]
+            graph = "my-dog<other.suite::dog>"
         [[[T00,T06,T12,T18]]]
-            graph = "DOG<other.suite::dog>[-PT3H] => cat"
+            graph = "my-dog[-PT3H] => cat"
 \end{lstlisting}
-This results in \lstinline=DOG= having cycle points of
-\lstinline=3,9,15,21= - the sames as \lstinline=dog= in
-\lstinline=other.suite=.
 
+For suite-state polling the cycle point of the target task is treated as a
+literal string so the polling command has to be told if the remote suite has a
+different cycle point format. Use the \lstinline=--template= option for this,
+or in-suite:
+\begin{lstlisting}
+[runtime]
+    [[my-foo]]
+        [[[suite state polling]]]
+            template = %Y-%m-%dT%H
+\end{lstlisting}
+
+Note that the remote suite does not have to be running when polling commences
+because the command interrogates the suite run database, not the suite server
+process.
 
 \section{Other Topics In Brief}
 
@@ -6250,16 +6543,12 @@ The following topics have yet to be documented in detail.
         a previous state of operation. They are mentioned in the Tutorial
         (\ref{Tutorial}).
 
-    \item Recursive purge: this is a powerful intervention but you need
-        to understand how it works before using it. See
-        \lstinline=cylc purge help= for details.
-
     \item Task insertion (add a new task proxy instance to a running suite):
         see \lstinline=cylc insert --help=.
 
     \item Sub-suites: to run another suite inside a task, just invoke the
         sub-suite, with appropriate start and end cycle points (probably a
-        single cycle point), via the host task's \item=script= item:
+        single cycle point), via the host task's \lstinline=script= item:
 
 \lstset{language=suiterc}
 \begin{lstlisting}
@@ -6320,7 +6609,7 @@ advantages to fine-graining:
         that failed.
 
     \item More code reuse: similar tasks may be able to call the same
-        underlying same script or command with differing input parameters.
+        underlying script or command with differing input parameters.
 
 \end{myitemize}
 
@@ -6482,47 +6771,34 @@ require that. One way to do this is to declare the product upload task to be
 {\em sequential}, which is equivalent to making it depend on its own previous
 instance (see~\ref{SequentialTasks}).
 
-\subsection{Use Of Clock-Triggered Tasks}
-\label{ClockTriggeredTasks}
-
-Most tasks submit as soon as their prerequisites (task triggers) are satisfied,
-but {\em clock-triggered} tasks also wait on a wall clock time expressed as an
-offset from their cycle point. For example, task \lstinline=foo= below will
-trigger (other dependencies allowing) 2 hours after the wall clock time passes
-its cycle point (which in this case must be a date-time).
-\lstset{language=suiterc}
-\begin{lstlisting}
-[scheduling]
-    [[special tasks]]
-        clock-triggered = foo(PT2H)
-\end{lstlisting} 
+\subsection{Use Of Clock-Trigger Tasks}
 
 Tasks that wait on external real time data should have a clock-trigger to delay
-submission until roughly the expected time of data availability, otherwise they
-may clutter up batch scheduler queues by submitting hours earlier. Similarly,
-suite polling tasks (for inter-suite dependence in real time operation) should
-use a clock-trigger to delay their submission until the expected time of the 
-remote suite event.
+submission until roughly the expected time of data availability
+(see~\ref{ClockTriggerTasks}), otherwise they could clutter up your batch
+scheduler queue by submitting hours earlier. Similarly, suite polling tasks
+(for inter-suite dependence in real time operation) should use a clock-trigger
+to delay their submission until the expected time of the remote suite event.
 
 \subsection{Tasks That Wait On Something}
 
 Some tasks wait on external events and therefore need to repeatedly check and
 wait for the event before reporting eventual success (or perhaps failure after
 a timeout). For example, a task that waits for a file to appear on an ftp
-server. Typically these should be clock-triggered tasks (see above), but once
+server. Typically these should be clock-trigger tasks (see above), but once
 triggered there are two ways to handle the repeated checking: the task itself
 could implement a check-and-wait loop; or you could just configure multiple
-retries for the task, in the suite definition (see~\ref{TaskRetries}).
+retries for the task (see~\ref{TaskRetries}).
 
 \subsection{Do Not Treat Real Time Operation As Special}
 
 Cylc suites, without modification, can handle real time and delayed operation
-equally well.  In caught-up real time operation, clock-triggered tasks
+equally well.  In caught-up real time operation, clock-trigger tasks
 constrain the behaviour of the whole suite, or at least of any tasks
 downstream of them in the dependency graph.  In delayed or historical operation
-clock-triggered tasks will not constrain the suite at all, and cylc's cycle
+clock-trigger tasks will not constrain the suite at all, and cylc's cycle
 point interleaving abilities come to the fore, because the clock-trigger times
-have already passed. But if a clock-triggered task catches up to the wall
+have already passed. But if a clock-trigger task catches up to the wall
 clock, it will automatically wait again. In this way cylc suites naturally 
 transition between delayed and real time operation as required.
 
@@ -7241,13 +7517,6 @@ triggering failure rather than create task instances at incorrect cycle points.
 %state-N     # oldest state dump; will be deleted at next update
 %\end{lstlisting}
 %
-%In addition, immediately prior to any system intervention a special
-%uniquely named state dump file is created and logged, e.g.:
-%
-%\begin{lstlisting}
-%2010/03/30 14:54:29 WARNING main - pre-purge state dump: state.2010:3:30:14:54:29
-%\end{lstlisting}
-%
 %If you accidentally intervene wrongly in a suite, just shut it down
 %and restart from the pre-intervention state dump:
 %
@@ -7320,11 +7589,6 @@ triggering failure rather than create task instances at incorrect cycle points.
 %    unable to run. Solution: insert tasks (possibly one-off cold-start
 %    tasks) to get the suite running again.
 %
-%    \ldots This could also happen if you purge enough
-%    cycle points that the difference between the pre- and post-purge tasks
-%    is greater than the runahead limit. Solution: ensure your runahead
-%    limit is large enough to span these gaps.
-%
 %    \item If a failed task has not yet been removed or reset by the
 %    system operator it will eventually stall the suite. Solution:
 %    Fix, or otherwise deal with, failed tasks as quickly as possible.
@@ -7374,30 +7638,6 @@ triggering failure rather than create task instances at incorrect cycle points.
 %        run again, after which it and its downstream dependants will
 %        catch up to the rest of the suite as quickly as possible.
 %
-%    \item {\em An important external task fails, but cannot be fixed.}
-%        In this case, if the task has a lot of downstream dependants,
-%        you will presumably need omit one or more cycle points of the
-%        affected tasks, and cold-start their part of the suite at the
-%        earliest possible subsequent cycle point. To do this, insert the
-%        relevant cold start task, or task group, at the later cycle point,
-%        then purge the failed task and everything that depends on it (and
-%        on them, and so on) down to the cold-start time. Other downstream
-%        forecast models will be able to pick up immediately so long their
-%        most recent previous instance (i.e.\ just before the gap) wrote out
-%        sufficient restart outputs to bridge the gap (otherwise they,
-%        or perhaps the entire suite, will need to be cold-started).
-%
-%    \item {\em HELP, I attempted a drastic intervention in a complex
-%        suite, using the horrifying purge command, and this time I
-%        really screwed the pooch!} Before any operation that alters the
-%        system state, cylc automatically writes out a special state dump
-%        file and reports the filename in the main log. Shut the suite
-%        down and restart it from its pre-intervention state (just
-%        cut-and-paste the state dump filename from the main log file -
-%        the file path is not required because the file will be in the
-%        configured suite state dump directory).  Then {\em retry your
-%        intervention in practice mode} before doing it for real!
-%
 %\end{myitemize}
 %
 %\subsection{Dead Suite Cleanup}
diff --git a/doc/development.tex b/doc/development.tex
deleted file mode 100644
index 52089be..0000000
--- a/doc/development.tex
+++ /dev/null
@@ -1,233 +0,0 @@
-\section{Object Oriented Programming}
-\label{ObjectOrientedProgramming}
-
-Cylc relies heavily on Object Oriented Programming (OOP) concepts,
-particularly the {\em polymorphic} nature of the task proxy objects.
-An absolutely minimal explanation of this follows; 
-please refer to an OOP reference for more detail.
-
-A {\bf class} is a generalisation of data type to include behaviour
-(i.e.\ functions or methods) as well as state. 
-
-%For example, a $shape$ class could define a $position$ data member to
-%hold the location of a shape object, a $move()$ method that by which
-%a shape object can alter its position, and a $draw()$ method that
-%causes it to display itself on screen.
-
-An {\bf object} is a more or less self contained specific instance
-of a class. This is analagous to specific integer variables being 
-instances of the integer data type.
-
-A {\bf derived class} or {\bf subclass} {\em inherits} the properties
-(methods and data members) of its parent class. It can also override
-specific properties, or add new properties that aren't present in the
-parent. Calling a particular method on an object invokes the object's
-own method if one is defined, otherwise the parent class is searched,
-and so on down to the root of the inheritance graph. 
-
-%For example, we could derive a $circle$ class from $shape$, adding a
-%`radius' data member and overriding the $draw()$ to get circle objects
-%to display themselves as actual circles.  Because we didn't override the
-%$move()$ method, calling $circle.move()$ would invoke the base class
-%method, $shape.move()$. 
-
-
-{\bf Polymorphism} is the ability of one type to appear as and be used
-like another type.  In OOP languages with inheritance, this usually
-refers to the ability to treat derived/sub-class objects as if they were
-members of a common base class. In particular, a group of mixed-type
-objects can all be treated as members of a common base class. 
-%For example, a group of %$circles$, $triangles$, and $squares$ could 
-%be manipulated by code designed entirely to handel $shapes$; calling
-%$[shape].draw()$ will invoke the right derived class $draw()$ method. 
-This is a powerful mechanism because it allows existing old code,
-without modification, to manipulate new objects so long as they 
-derive from the original base class.
-%If we later derive an entirely new kind of shape ($hexagon$, say) with
-%it's own unique behaviour, the existing program, without modification,
-%will process the new objects in the proper hexagon-specific way.  
-
-In cylc, all task proxy objects are derived from a base class that 
-embodies the properties and behaviour common to all task proxies. 
-The scheduling algorithm works with instances of the base class so that
-any current or future derived task object can be handled by the program
-without modification (other than deriving the new subclass itself).
-
-\subsection{Single- or Multi-Threaded Pyro?}
-\label{Single-orMulti-ThreadedPyro?}
-
-In single threaded mode Pyro's \lstinline=handleRequests()= returns
-after at least one request (i.e.\ remote method call) was
-handled, or after a timeout. Using \lstinline|timeout = None| 
-allows us to process tasks only when remote method invocations
-come in.  Further, we can detect the remote calls that actually change
-task states, and thereby drop into the task processing code only when
-necessary, which eliminates a lot of extraneous output when debugging
-the task processing loop (e.g.\ in dummy mode there are a lot of remote
-calls on the dummy clock object, which does not alter tasks at all). 
-
-In multithreaded mode, \lstinline=handleRequests()= returns immediately
-after creating a new request handling thread for a single remote object,
-and thereafter remote method calls on that object come in asynchronously
-in the dedicated thread. This is not good for cylc's scheduling
-algorithm because tasks are only set running in the task processing
-block which can be delayed while \lstinline=handleRequests()= blocks waiting
-for a new connection to be established, even as messages that warrant
-task processing are coming in on existing connections. The only way
-around this seems to be to do task processing on \lstinline=handleRequests()=
-timeouts which results in a lot of unnecessary processing when nothing
-important is happening.
-
-Addendum, we now use a timeout on \lstinline=handleRequests()= because
-contact tasks can trigger purely on the wall clock, so we delaying task
-processing when no messages are coming in may prevent these contact
-tasks from triggering.   So\dots we may want to revist Multithreading\dots
-
-%\subsubsection{Handling File Dependencies: Possible Alternative Method}
-%
-%In principle extra information could be attached to cylc output
-%messages so that actual file locations could be passed dynamically from
-%to whichever tasks use the output. Cylc currently cannot do this (you
-%can put actual file locations in the messages, but the receiver has to
-%have the exact matching message and therefore would have to know the
-%location in advance). This is a possible future development, but is 
-%probably not worth the effort because configuring the external tasks 
-%to report this information takes more effort than putting the same
-%information into the cylc task definition files. The cylc setup
-%would remain entirely context-independent, which is nice, and would
-%automatically pass on changes to the external input / output config of
-%the system.
-
-%\subsection{Unusual Task Behaviour}
-%\label{UnusualTaskBehaviour}
-%
-%If you require task behaviour that cannot be represented in the current 
-%task definition files you will need to derive a new task class manually.
-%Use the auto-generated task classes as a starting point. Raw Python 
-%task class definitions can be kept in the suite taskdef sub-directory
-%alongside the taskdef files; they will be copied verbatim to the
-%\lstinline=configured= sub-directory when the suite is configured.
-%
-%Out of the entire EcoConnect operation, only the highly unusual
-%scheduling behaviour of the TopNet river model requires a custom task
-%class (it keeps up with real time streamflow observations and uses
-%the {\em most recent} regional weather forecast output). 
-
-%\subsubsection{Fuzzy Prerequisites}
-%
-%EcoConnect's Topnet model (mentioned just above) runs hourly and
-%triggers off the most recent regional weather forecast available.
-%The cycle point interval between the two tasks can vary. This makes 
-%use of cylc's {\em fuzzy prerequisites}, which the task definition
-%parser is not currently aware of (hence the custom Python taskdef).
-
-%\subsection{Task Prerequisites And Outputs}
-%\label{TaskPrerequisitesAndOutputs}
-%
-%Cylc's scheduling algorithm matches one task's completed outputs with
-%another's unsatisfied prerequisites
-%(Section~\ref{TheCylcSchedulingAlgorithm}).  
-%
-%Internally, these prerequisites (which must be satisfied before the task
-%can run) and outputs (that must be be completed as the task runs) take
-%the form of {\em literal text strings - messages that running tasks 
-%send to their proxy objects inside the scheduler}.
-%
-%\begin{myitemize}
-%    \item A task proxy considers a registered output ``completed''
-%        if it has received a matching message from its external task.
-%
-%    \item A task proxy considers a registered prerequisite ``satisfied''
-%        if another task proxy reports that it has a matching completed
-%        output.
-%
-%\end{myitemize}
-%
-%\subsubsection{Cycle Point}
-%
-%{\em Prerequisites and outputs should always contain a cycle point} to
-%distinguish between different instances of a task (at different 
-%forecast cycles) that may coexist in the task pool at any time. 
-%
-%Prerequisites that reflect same-cycle dependencies, which is the usual
-%case, should mention the host task's own cycle point, expressed as
-%\lstinline=$(CYCLE_TIME)= in task definition files.
-%
-%For intercycle dependencies, the cycle point in a prerequisite message
-%should be expressed as some offset from the task's own cycle point, e.g.\
-%\lstinline=$(CYCLE_TIME - 6)=. However, the only intercycle dependencies
-%you are likely to encounter (see the TopNet model in EcoConnect,
-%Section~\ref{EcoConnect}, for a counter example) are the restart
-%dependencies of your warm cycled forecast models, and the prerequisites
-%and outputs for these are now registered automatically by cylc.
-%
-%\subsubsection{Message Form}
-%
-%The exact form of the messages does not matter so long as the
-%prerequisites match their corresponding and outputs. For example, if
-%the message, 
-%\begin{lstlisting}
-%"storm surge forecast fields ready for $(CYCLE_TIME)"
-%\end{lstlisting} 
-%is registered as an output by the task that generates said forecast
-%fields, then the exact same message should be registered as a
-%prerequisite by any task that requires that data as input
-%(presumably storm surge postprocessing tasks in this case). 
-%
-%\subsubsection{Message Content}
-%
-%Prerequisites and outputs typically refer to the completion of a file or
-%a group of files, but it can be any event that a task could conceivably
-%trigger off: database interactions, download of data from a network,
-%copying or archiving of files, etc.
-%
-%For single file outputs the cylc message could include the actual
-%filename:
-%\begin{lstlisting}
-%"file surface-pressure-$(CYCLE_TIME).nc ready for use"
-%\end{lstlisting}
-%but there is no need to do this (see {\em Message Truth} below); you
-%might as well adopt a message format that applies equally well to
-%more general events and multi-file outputs:
-%\begin{lstlisting}
-%"surface pressure fields ready for $(CYCLE_TIME)"
-%\end{lstlisting}
-%
-%
-%\subsubsection{Message Truth}
-%
-%{\em Cylc does not check that incoming messages are true.}  For example,
-%if the message refers to completion of a particular output file, cylc
-%does not check that the file actually exists as the reporting task
-%claims it does. There are two reasons for this: (1) cylc does not place
-%any restriction on the kind of event that can be used as a task trigger,
-%so it would be next to impossible for it to verify outputs in general,
-%and (2) there is actually no need for cylc to check because the tasks
-%themselves must necessarily do it, and they must immediately report
-%problems back to cylc before aborting (or in the worst case, neglect to
-%check and then fail for lack of required inputs, with the same result).
-%
-%
-%\subsubsection{Uniqueness}
-%
-%Prerequisites need not be unique; i.e.\ multiple tasks can trigger off
-%the same event.
-%
-%Outputs should probably be unique; otherwise a task that depends on a
-%particular output will trigger off the first task to provide it.
-%
-
-   
-% automatic post-intervention recovery from nasty 
-%        failures, because cylc will know about the actual restart
-%        dependencies of your real tasks. For example, in the userguide
-%        example suite, if the weather model (task A) fails requiring a
-%        cold start 12 hours later, insert the cold start task into the
-%        suite (at failure time + 12) and purge all downstream dependants of 
-%        the failed task through to the cold start cycle. Then, tasks
-%        B and C will carry on as normal because their restart
-%        prerequisites will be satisfied automatically by their
-%        predecessors from several cycles ago, before the gap caused by
-%        the failure.
-
-
diff --git a/doc/gcylcrc.tex b/doc/gcylcrc.tex
index 7571f53..14de2ac 100644
--- a/doc/gcylcrc.tex
+++ b/doc/gcylcrc.tex
@@ -53,6 +53,18 @@ to list your available themes.
 \item {\em default:} ``default''
 \end{myitemize}
 
+\subsubsection{initial side-by-side views}
+
+Set the suite view panels initial orientation when the GUI starts.
+This can be changed later using the "View" menu "Toggle views side-by-side"
+ option.
+
+\begin{myitemize}
+\item {\em type:} boolean (False or True)
+\item {\em legal values:} ``False'', ``True''.
+\item {\em default:} ``False''
+\end{myitemize}
+
 \subsubsection{dot icon size}
 
 Set the size of the task state dot icons displayed in the text and dot
@@ -94,7 +106,7 @@ are required to prevent the hex code being interpreted as a comment).
 
 This section may contain task state color theme definitions.
 
-\subsubsection[THEME]{[themes] $\rightarrow$ THEME}
+\subsubsection[{[}THEME{]}]{[themes] $\rightarrow$ [[THEME]]}
 
 The name of the task state color-theme to be defined in this section.
 
@@ -102,7 +114,7 @@ The name of the task state color-theme to be defined in this section.
 \item {\em type:} string
 \end{myitemize}
 
-\subsubsection[inherit]{[themes] $\rightarrow$ [THEME] $\rightarrow$ inherit}
+\paragraph[inherit]{[themes] $\rightarrow$ [[THEME]] $\rightarrow$ inherit}
 
 You can inherit from another theme in order to avoid defining all states.
 
@@ -111,7 +123,7 @@ You can inherit from another theme in order to avoid defining all states.
 \item {\em default:} ``default''
 \end{myitemize}
 
-\subsubsection[defaults]{[themes] $\rightarrow$ [THEME] $\rightarrow$ defaults}
+\paragraph[defaults]{[themes] $\rightarrow$ [[THEME]] $\rightarrow$ defaults}
 
 Set default icon attributes for all state icons in this theme.
 
@@ -127,7 +139,7 @@ rgb.txt file, e.g.\ \lstinline=SteelBlue=; or hexadecimal color codes, e.g.
 See \lstinline at gcylc.rc.eg@ and \lstinline at themes.rc@ in
 \lstinline@$CYLC_DIR/conf/gcylcrc/@ for examples. 
 
-\subsubsection[STATE]{[themes] $\rightarrow$ [THEME] $\rightarrow$ STATE}
+\paragraph[STATE]{[themes] $\rightarrow$ [[THEME]] $\rightarrow$ STATE}
 
 Set icon attributes for all task states in THEME, or for a subset of them if 
 you have used theme inheritance and/or defaults. Legal values of STATE are
diff --git a/doc/gh-pages/index.html b/doc/gh-pages/index.html
index 04894a8..c2b16ea 100644
--- a/doc/gh-pages/index.html
+++ b/doc/gh-pages/index.html
@@ -7,7 +7,7 @@
 <meta name="keywords" content="cylc, forecast, suite, suite engine, metascheduler"/>
 <meta name="description" content="The cylc suite engine home page on GitHub." />
 <meta name="robots" content="all"/>
-<meta name="google-site-verification" content="80O-lgcVlVkKfF1yVyn0Q05W6QuN-WjcP30UftFhmiM" /> 
+<meta name="google-site-verification" content="80O-lgcVlVkKfF1yVyn0Q05W6QuN-WjcP30UftFhmiM" />
 
 <title>The Cylc Suite Engine</title>
 
@@ -38,13 +38,13 @@
         </div> <!--pageHeader-->
 
         <div id="banner">
-            <p class="p1"><span><b>The cylc suite engine</b> (<em>"silk"</em>) 
+            <p class="p1"><span><b>The cylc suite engine</b> (<em>"silk"</em>)
                 is a <b>workflow engine</b> and <b>meta-scheduler</b>. It
                 specialises in cycling (regularly repeating) workflows such as
                 those used in weather forecasting and climate modeling, but it
                 can also be used for non-cycling suites.
                 <b>Cylc is free software under the GNU GPL v3 license.</b>
-            </span></p> 
+            </span></p>
 
             <p class="p2"><span>.</span></p> <!-- p2 is required to get
         the lower banner background image, and the . is required
@@ -64,7 +64,7 @@
                 <h3><span>Overview</span></h3>
 
                 <p class="p3"><span>
-                    Cylc's suite design paradigm is simple and intuitive: configure 
+                    Cylc's suite design paradigm is simple and intuitive: configure
                     task scheduling with a dependency graph, then configure
                     task runtime properties in an efficient inheritance hierarchy.
                 </span></p>
@@ -72,7 +72,7 @@
                 <p class="p3"><span>
                     Cylc handles inter-cycle dependence explicitly and can
                     therefore interleave tasks from multiple cycles for
-                    efficient scheduling.                
+                    efficient scheduling.
                 </span></p>
 
                 <p class="p1"><span>
@@ -100,15 +100,15 @@
             <div id="navlist">
                 <h3 class="select"><span>Navigation:</span></h3>
                 <ul>
-                    <li> <a href="#overview">Overview</a> 
-                    <li> <a href="#who">Who's Using Cylc?</a> 
-                    <li> <a href="#features">Features</a> 
-                    <li> <a href="#requirements">Requirements</a> 
-                    <li> <a href="#download">Download</a> 
-                    <li> <a href="#installation">Installation</a> 
-                    <li> <a href="#documentation">Documentation</a> 
-                    <li> <a href="#support">Support</a> 
-                    <li> <a href="#development">Development</a> 
+                    <li> <a href="#overview">Overview</a>
+                    <li> <a href="#who">Who's Using Cylc?</a>
+                    <li> <a href="#features">Features</a>
+                    <li> <a href="#requirements">Requirements</a>
+                    <li> <a href="#download">Download</a>
+                    <li> <a href="#installation">Installation</a>
+                    <li> <a href="#documentation">Documentation</a>
+                    <li> <a href="#support">Support</a>
+                    <li> <a href="#development">Development</a>
                 </ul>
             </div> <!--navlist-->
 
@@ -129,14 +129,14 @@
                 </span></p>
 
                 <ul>
-                    <li> <a href="http://www.niwa.co.nz">NIWA</a> - cylc is used with <a href="https://github.com/metomi/rose">Rose</a> 
-                    <li> <a href="http://www.metoffice.gov.uk">Met Office (UK)</a> - cylc is used with <a href="https://github.com/metomi/rose">Rose</a> 
+                    <li> <a href="http://www.niwa.co.nz">NIWA</a> - cylc is used with <a href="https://github.com/metomi/rose">Rose</a>
+                    <li> <a href="http://www.metoffice.gov.uk">Met Office (UK)</a> - cylc is used with <a href="https://github.com/metomi/rose">Rose</a>
                     <li> <a href="http://www.nrlmry.navy.mil">NRL Marine Meteorology Division</a> (USA)
-                    <li> <a href="http://www.mpimet.mpg.de">Max Planck Institute for Meteorology</a> (Germany) 
-                    <li> <a href="http://www.bom.gov.au">Bureau of Meteorology</a> (Australia)  - cylc is used with <a href="https://github.com/metomi/rose">Rose</a> 
+                    <li> <a href="http://www.mpimet.mpg.de">Max Planck Institute for Meteorology</a> (Germany)
+                    <li> <a href="http://www.bom.gov.au">Bureau of Meteorology</a> (Australia)  - cylc is used with <a href="https://github.com/metomi/rose">Rose</a>
                     <li> <a href="http://www.gfdl.noaa.gov">GFDL</a> (NOAA/Princeton University, USA)
-                    <li> <a href="http://www.ncmrwf.gov.in">NCMRWF</a> (India)  - cylc is used with <a href="https://github.com/metomi/rose">Rose</a> 
-                    <li> <a href="http://kma.go.kr">KMA</a> (Korea) - cylc is used with <a href="https://github.com/metomi/rose">Rose</a> 
+                    <li> <a href="http://www.ncmrwf.gov.in">NCMRWF</a> (India)  - cylc is used with <a href="https://github.com/metomi/rose">Rose</a>
+                    <li> <a href="http://kma.go.kr">KMA</a> (Korea) - cylc is used with <a href="https://github.com/metomi/rose">Rose</a>
                 </ul>
 
                 <p class="nav"><span><a href="#">(back to top)</a></span></p>
@@ -156,33 +156,41 @@
                         <li> efficient task runtime configuration by <b>multiple inheritance</b>
                     </ul>
                     <li> <b>Distributed suites</b> - run tasks on multiple hosts.
-                    <li> Supports many different job submission methods (PBS, slurm, etc.).
+                    <li> Supports many different <b>job submission</b> methods (PBS, slurm, etc.).
                     <li> <b>cycling workflows</b>
                     <ul>
                         <li> ISO 8601 compliant <b>date-time cycling</b> - easily
-                        generate sequences from minutes to billions of years, future or past. 
+                        generate sequences from minutes to billions of years, future or past.
                         <li> <b>Integer cycling</b>
                         <li> <b>Inter-cycle triggers</b> allow automatic
                         cycle-interleaving (dependencies allowing)
                     </ul>
                     <li> Supports tasks that run only at the initial and final
-                    cycle points, or at any point or points between. 
+                    cycle points, or at any point or points between.
                     <li> Handles <b>several thousand tasks</b> in a single suite.
-                    <li> <b>Inter-suite dependence</b> - triggering tasks off tasks in other suites. 
+                    <li> <b>Inter-suite dependence</b> - triggering tasks off tasks in other suites.
                     <li> <b>Suite validation</b> - catch many errors prior to run time.
                     <li> Advanced <b>restart</b> capability - cylc can restart from any
                     point in a workflow, and can determine what happened to
-                    submitted or running tasks while the suite was down.
+                    active tasks while the suite was down.
                     <li> <b>Conditional triggering</b>.
-                    <li> <b>Task family triggering</b>.
+                    <li> <b>Task families</b> - for triggering, and inheritance of shared task runtime settings.
                     <li> <b>Automated failure recovery</b> by task retry or alternate workflows (via suicide triggers).
                     <li> <b>Task and suite event hooks and timeouts</b> for central alerting.
                     <li> <b>Simulation and dummy modes</b> - get the scheduling right without running real tasks.
                     <li> Supports use of the <b>Jinja2 Template Processor</b> in suite definitions.
                     <li> <b>Internal Queues</b> to limit the number of simultaneously active tasks.
                     <li> <b>Task Poll and Kill</b> for all supported job submission methods.
-										<li> <b>Edit Runs</b> - edit the final task job script
-										before (re)triggering a job manually
+                    <li> <b>Edit Runs</b> - edit the final task job script
+                    before (re)triggering a job manually
+                    <li> <b>Clock triggering</b> - in date-time cycling suites,
+                    tasks can trigger off the wall clock as well as off other
+                    tasks.
+                    <li> <b>Expiring tasks</b> - in date-time cycling suites,
+                    tasks can expire and not submit jobs if too far behind the wall clock.
+                    <li> <b>External event triggering</b> - tasks can trigger
+                    off external events as well as other tasks and the wall
+                    clock.
                 </ul>
 
                 <p class="nav"><span><a href="#">(back to top)</a></span></p>
@@ -199,7 +207,7 @@
                     <li> Python 2.6 or later (but not 3.x as yet).
                     <!-- <sup><a href="#fn2" id="fnr2" title="click to see footnote">2</a></sup> -->
 
-                    <li> <a href="http://www.pygtk.org">PyGTK</a>, 
+                    <li> <a href="http://www.pygtk.org">PyGTK</a>,
                     a Python wrapper for the GTK+ graphical user
                     interface toolkit, included in most Linux Distributions.
 
@@ -209,8 +217,8 @@
                     <li> The <a href="http://www.graphviz.org">graphviz</a> graph
                     layout engine (latest version tested: 2.28.0).
 
-                    <li> <a href="http://networkx.lanl.gov/pygraphviz">Pygraphviz</a>, 
-                    a python interface to graphviz (latest version tested: 1.1). 
+                    <li> <a href="http://networkx.lanl.gov/pygraphviz">Pygraphviz</a>,
+                    a python interface to graphviz (latest version tested: 1.1).
 
                     <li> <a href="http://jinja.pocoo.org">Jinja2</a>, a template processor for Python.
 
@@ -220,8 +228,8 @@
                 </ul>
 
                 <p class="p1"><span>
-                    Cylc has also absorbed <a href="http://code.google.com/p/jrfonseca/wiki/XDot">xdot</a> 
-                    (LGPL license) in modified form (no need to install separately).  
+                    Cylc has also absorbed <a href="http://code.google.com/p/jrfonseca/wiki/XDot">xdot</a>
+                    (LGPL license) in modified form (no need to install separately).
                 </span></p>
 
                 <p class="nav"><span><a href="#">(back to top)</a></span></p>
@@ -233,7 +241,7 @@
                 <p class="p1"><span>
                     Go to the
                     <a href="https://github.com/cylc/cylc/releases/">releases</a>
-                    page at the cylc repository on github, and 
+                    page at the cylc repository on github, and
                     download a compressed archive of the cylc source tree.
                 </span></p>
 
@@ -265,12 +273,12 @@
                     held online in HTML format:</span></p>
 
                 <ul>
-                    <li> <a href="html/multi/cug-html.html">Cylc User Guide (HTML multi-page)</a> 
-                    <li> <a href="html/single/cug-html.html">Cylc User Guide (HTML single-page)</a> 
+                    <li> <a href="html/multi/cug-html.html">Cylc User Guide (HTML multi-page)</a>
+                    <li> <a href="html/single/cug-html.html">Cylc User Guide (HTML single-page)</a>
                 </ul>
 
                 <p class="p2"><span>These documents, and a <b>PDF</b> version,
-                    can also be generated in your local cylc installation 
+                    can also be generated in your local cylc installation
                     by running <code>make</code> in the source tree. Local
                     documentation can be accessed by running the <code>cylc
                         doc</code> command.
@@ -289,7 +297,7 @@
                     future development planning.</span></p>
 
                 <!-- HJO: embedding the group causes the web page to jump down to the embedding location
-            <iframe id="forum_embed" 
+            <iframe id="forum_embed"
                 src="javascript:void(0)"
                 scrolling="no"
                 frameborder="0"
@@ -298,10 +306,10 @@
             </iframe>
             <script type="text/javascript">
                 document.getElementById('forum_embed').src =
-                'https://groups.google.com/forum/embed/?place=forum/cylc-dev' + 
-                '&showsearch=true&showpopout=true&showtabs=false' + 
+                'https://groups.google.com/forum/embed/?place=forum/cylc-dev' +
+                '&showsearch=true&showpopout=true&showtabs=false' +
                 '&parenturl=' + encodeURIComponent(window.location.href);
-            </script> 
+            </script>
             -->
 
                 <p class="nav"><span><a href="#">(back to top)</a></span></p>
@@ -315,13 +323,13 @@
                     Cylc uses the powerful <a href="git-scm.com">Git</a>
                     distributed source code management system.  Git makes
                     branching and merging, forking/cloning, and pushing and
-                    pulling changes between repositories very easy. The 
-                    <a href="http://github.com/cylc/cylc">cylc repository</a> 
+                    pulling changes between repositories very easy. The
+                    <a href="http://github.com/cylc/cylc">cylc repository</a>
                     is hosted on <a href="http://github.com">GitHub</a>.
                 </span></p>
 
                 <p class="p2"><span>
-                    We use the <em>integrator</em> or 
+                    We use the <em>integrator</em> or
                     <em>fork and pull</em> model of distributed development.
                     Developers fork the master repository on GitHub, and
                     clone their fork to their local workstation.  Changes
diff --git a/doc/gh-pages/screenshots/gcylc-text-view.png b/doc/gh-pages/screenshots/gcylc-text-view.png
index f380290..ebbf646 100644
Binary files a/doc/gh-pages/screenshots/gcylc-text-view.png and b/doc/gh-pages/screenshots/gcylc-text-view.png differ
diff --git a/doc/gh-pages/screenshots/gsummary.png b/doc/gh-pages/screenshots/gsummary.png
index 2562015..30ecc20 100644
Binary files a/doc/gh-pages/screenshots/gsummary.png and b/doc/gh-pages/screenshots/gsummary.png differ
diff --git a/doc/graphics/png/orig/gsummary.png b/doc/graphics/png/orig/gscan.png
similarity index 100%
rename from doc/graphics/png/orig/gsummary.png
rename to doc/graphics/png/orig/gscan.png
diff --git a/doc/graphics/png/scaled/gsummary.png b/doc/graphics/png/scaled/gscan.png
similarity index 99%
rename from doc/graphics/png/scaled/gsummary.png
rename to doc/graphics/png/scaled/gscan.png
index 73e5459..2df67a0 100644
Binary files a/doc/graphics/png/scaled/gsummary.png and b/doc/graphics/png/scaled/gscan.png differ
diff --git a/doc/implementation.tex b/doc/implementation.tex
deleted file mode 100644
index c412501..0000000
--- a/doc/implementation.tex
+++ /dev/null
@@ -1,379 +0,0 @@
-
-
-!!!!!!!!!!MUCH OF THIS IS OBSOLETE OR NEEDS UPDATING!!!!!!!!!!!!!!!
-
-\subsection{The Main Algorithm}
-
-From the discussion above it is apparent that the additional complexity
-due to explicitly handling intercycle dependencies is too difficult to
-deal with in a Finite State Machine, and that the ``forecast cycle'' as
-a global control system parameter has to be replaced with an independent
-``forecast cycle time'' for each task. This devolving of cycle
-timing to the individual tasks suggests treating the system as a {\em
-simulation} of autonomous proxy objects that represent the external
-tasks and interact regardless of cycle time to negotiate
-dependencies at run time (i.e.\ by matching completed outputs against
-prerequisites). If this can be made to work it provides extraordinary
-power and flexibility because it treats all dependencies equally and it
-makes any convoluted task scheduling logic entirely disappear: if task
-proxy objects can interact indiscriminately then they don't need to know
-{\em who} is supposed to satisfy their prerequisites and they can be
-defined without reference to the other tasks in the system (except of
-course that some other task(s) must exist that will satisfy their
-prerequisites).  Existing tasks could be taken out of the system, or new
-ones added, without changing the control system in any other way.
-Further, by means of object polymorphism\footnote{Polymorphism is the
-ability of one type to appear as and be used like another type. In OOP
-languages with inheritance, this usually refers to the ability to treat
-derived class objects as if they were members of a base class so that,
-for instance, a group of mixed-type objects can all be treated as
-members of a common base class while retaining their specialized derived
-class behaviour.} the control system can be designed to automatically
-handle any future task so long as it is derived from (inherits the
-properties of) the original task base class.
-
-The following simple description should be sufficient to enable the
-reader to understand how the algorithm achieves optimal forecast
-cycle-independent metascheduling. Everything else is arguably just
-implementation, although some important aspects of that are not trivial
-and will be discussed later.
-
-\begin{itemize}
-    \item The control system maintains a pool of autonomous {\em task
-        proxy objects} that represent each real task. 
-       
-    \item The internal state of a task proxy object must reflect that
-        of the real task it represents. This state information includes:
-
-        \begin{itemize}
-
-            \item task proxy object name.
-
-            \item associated external (real) task.  
-
-            \item owner of the real task, if necessary (who the task
-                should run as).
-
-            \item UTC {\em forecast cycle time}, e.g. $2010012418$
-        
-            \item current execution status: {\em waiting}, {\em running}, 
-                {\em finished}, or {\em failed}. 
-
-            \item a list of prerequisites and whether or not they are
-                satisfied yet, e.g.\ {\em file FOO is ready}. 
-
-            \item a list of outputs completed so far, e.g.\ {\em file
-                FOO is ready}.
-
-        \end{itemize}
-       
-    \item A task proxy object can launch its associated external task
-        when all of its prerequisites are satisfied.
-
-    \item A task proxy object can interact with other task proxy
-        objects (regardless of cycle time; all dependencies are now
-        equal) to determine if any of their completed outputs can
-        satisfy any of its prerequisites.
-
-    \item The control system gets the task pool to interact and
-        negotiate dependencies whenever any new output is reported.
- 
-    \item A task proxy object must exist by the time it is needed to
-        interact with other tasks, and must not cease to exist before
-        it is no longer needed.
-
-\end{itemize}
-
-\subsubsection{Main Loop}
-
-{\small
-\noindent
-\rule{5cm}{.2mm}
-\begin{lstlisting}
-while True:
-
-   if task_base.state_changed:
-       # PROCESS ALL TASKS whenever one has changed state
-       # as a result of a remote task message coming in. 
-       #---
-       task_pool.process_tasks()
-       task_pool.dump_state()
-       if task_pool.all_finished():
-           clean_shutdown( "ALL TASKS FINISHED" )
-
-    # REMOTE METHOD HANDLING; handleRequests() returns 
-    # after one or more remote method invocations are 
-    # processed (these are not just task messages, hence 
-    # the use of task_base.state_changed above).
-    #---
-    task_base.state_changed = False
-    pyro_daemon.handleRequests( timeout = None )
-
-# END MAIN LOOP
-\end{lstlisting}
-}
-
-
-\label{sec-task-messaging}
-
-
-\subsection{Task Proxy Object Life Cycle}
-
-Task proxy creation and destruction must be managed so that, in a
-continuously running system, they exist when needed, but do not exist
-for too long before they are needed, and cease to exist soon after they
-are no longer needed.
-
-\subsubsection{Task Creation}
-
-A task proxy object needs to exist, at the latest, by the time that all
-of its prerequisites have been satisfied.  The earliest a task can run
-is governed chiefly by whether it depends on its previous
-instance (in which case it can potentially run as soon as its previous
-instance has finished\footnote{Or when it has generated its background
-state for the next instance, at least.}) or not (in which case it can
-potentially run in parallel with its previous instance). This
-information is specific to the task type so the best place to hold it is
-in the task proxy class definitions. 
-
-New tasks are therefore created after their previous instance {\em
-spawns}; for forecast models this happens when the previous instance
-finishes\footnote{But see previous footnote}; and otherwise as soon as
-the previous instance starts running. This ensures that a task cannot
-run before its previous instance without use of explicit intercycle
-prerequisites that would require special treatment at startup (when
-there is no previous cycle). Tasks are not deleted immediately on
-abdication (see below).
-
-\subsubsection{Removing Spent Tasks} 
-
-A task is spent if it finished {\em and} no longer needed to satisfy the
-prequisites of any other task. Most tasks are only needed by other
-cotemporal downstream tasks; these can be removed when they are finished
-{\em and} older than the oldest non-finished task. For rare cases that
-are needed by tasks in later cycle times (e.g.\ nzlam post
-processing: multiple hourly topnet tasks need the same most recent
-previously finished 06 or 18Z nzlam post processing task), each
-non-finished task reports its {\em cutoff cycle time} which is the
-oldest cycle time that may contain tasks still needed to satisfy its
-own prerequisites (if it is waiting) or those of its immediate
-post-abdication successor (if it is running already), then the task
-manager can then kill any finished tasks that are also older than the
-oldest task cutoff time.
-
-\subsubsection{Removing Lame Tasks} 
-
-Tasks that will never run (because their prerequisites cannot be
-satisfied by any other task in the system) are removed from the {\em
-oldest batch} of tasks.  If not removed they would prevent the spent
-task deletion algorithm from working properly. Lame tasks can only be
-detected in the oldest task batch; in younger batches some tasks may yet
-appear as their predecessors spawn.
-
-Lame tasks are spawned rather than just deleted, because their
-descendents will not necessarily be lame: e.g.\ if the system is started
-at 12Z with topnet turned on, all topnet tasks from 12Z through 17Z will
-be valid but lame, because they will want to take input from a
-non-existent nzlam\_post from 06Z prior to startup. However, the
-presence of lame tasks may indicate user error: e.g.\ if you forget
-to turn on task type $foo$ that supplies input to task type $bar$,
-any instance of $bar$ will be lame.
-
-\subsection{Constraining The System}
-
-No task is allowed to get more than 48 hours (user configurable) ahead
-of the slowest task in the system (with respect to cycle time).
-
-
-\subsection{Coupling Task Proxies to Tasks} 
-
-Our task proxy objects must keep track of progress in their external
-counterparts. Most task prerequisites are just files generated by other
-tasks, so it is tempting to have the controller use the appearance of
-expected new output files as a proxy for task progress. But we have to
-be sure that a newly detected file is complete, not just that it exists,
-and it is difficult to do this in an OS-independent way (using {\em
-inotify} on Linux, for example.). 
-%On Linux one could insist that every completed output file is
-%immediately renamed by the generating task, and have the controller use
-%{\em inotify} to watch for the sudden appearance of the new file
-%(because file rename operations are atomic when the source and target
-%are on the same file system) [REF: Simon, if he wants]. But this is not
-%platform independent, and most forecast systems run on heterogeneous
-%distributed hardware. 
-More importantly though, prerequisites are not necessarily single files:
-a task could conceivably depend on completion of a large set of files, a
-database update, or a data transfer by remote procedure call, for
-instance. Consequently we chose to use a high level messaging system for
-communication between external tasks and the control system. This is
-platform independent and allows tasks to be triggered off any
-conceivable condition. For example, rather than detecting the existence
-of the file {\em FOO}, the controller would receive a message saying
-{\em file FOO is ready}, or similar, from the task that has
-just generated the file.  There is no need for the control system itself
-does to verify that the message is true (i.e. that file {\em FOO}
-really does exist) because any downstream task that
-depends on file {\em FOO} must necessarily do that itself, and error 
-conditions can be reported back to the controller, and possibly to a
-separate monitoring system as well, at that point.
-
-The Python Remote Object Protocal (Pyro) allows external programs to
-communicate directly, across the network, with specific objects inside
-the running controller. This means that tasks can communicate directly
-with their own proxy objects, obviating the need for any any internal
-message brokering mechanism in the control system.    
-
-Each task must express its prerequisites (i.e.\ its dependence on
-upstream tasks) as a text string, for example ``file X is ready'', or
-``task X has completed'', or ``task X has completed writing all Y
-files'', and must send messages of the same kind back to the controller
-to indicate when it has reached an important waypoint or completed
-generated any important outputs.  
-
-
-\subsection{Task Definition}
-blah.
-
-\subsection{Pyro}
-blah.
-
-\subsection{Pure Simulation Mode}
-
-The dynamic metascheduling algorithm is essentially a simulation of an
-interacting task set in which the state of each task proxy object is
-coupled to that of the real task it represents. In addition, task proxy
-state changes occur in response to {\em messages} rather than, say,
-actual detection of newly generated input files.  This suggests
-a {\em dummy mode} in which each configured external task is replaced by
-an instance of an external dummy program that masquerades as the real
-task by reporting completion of each of its outputs in turn (task output
-lists can be exposed to other programs, namely to the dummy task
-program, through Pyro RPC calls). As far as the control system is
-concerned this is indistinguishable from real operation, except that
-external dummy tasks are less likely to be delayed by resource
-contention, and the dummy mode can be run according to an accelerated
-clock, rather than real time, for quick testing.  Dummy tasks therefore
-complete in approximately the same dummy clock time as the real tasks do
-in real time. An initial dummy clock offset relative to the initial
-cycle time can also be specified, which allows simulation of the
-transition between catch up and real time operation, and vice versa. Log
-messages are stamped with dummy clock time instead of real time.
-
-The same script is used for all external dummy tasks but it has special
-behaviour in certain cases: the dummy downloader ``waits for incoming
-files'' until 3:15 past its cycle time, and the dummy topnet ``waits
-for stream flow data'' until 0:15 past its cycle time.
-
-The dummy clock can be bumped forward a number of hours by remote
-control, while the system is running. This affects the postrequisite
-timing of running tasks correctly, but if it causes a running task to
-finish immediately the next task in line will still start from the
-beginning no matter how big the bump.
-
-
-
-%some tasks, such as those that wait on external input data, and tide
-%models, may have no upstream dependencies at all.
-
-%This could be done by checking for the existence of required inputs
-%directly, or by monitoring the state of the other tasks that are known
-%to provide the inputs in each case (are they finished yet?).  
-
-%The control program thus remains simple and generic, regardless of the
-%number of tasks or the complexity of their interdependencies; it simply
-%manages a set of tasks that are all individually configured as if they
-%were to run in isolation.\footnote{The system manager does of course
-%have to ensure that the configured task pool is self consistent, i.e.\
-%that each task's prerequisites will be satisfied by some other task(s)
-%in the system.}
-%The total absence of explicit scheduling logic makes this method
-%extremely flexible and extensible.\footnote{To extend the system, one
-%simply derives a new class definition that lists the new task's
-%prerequisites and outputs. The new task will automatically run at the
-%right time, i.e.\ when its prerequisites have been satisfied by some
-%other task(s) in the system.}
-
-
-\subsection{Applicability}
-
-The object oriented dynamic metascheduling concept is quite general and
-could in principle be implemented for any set of interdependent tasks.
-cylc, however, is specialized toward cycling forecast systems in that
-each task must have an associated {\em forecast cycle time} that is
-part of a predetermined series for a given task type and is not
-necessarily related to the real time at which the task actually runs.  
-
-\subsection{Environment}
-
-EcoConnect operates in a well defined environment so that each real task
-knows what its input files look like for a given cycle time
-(through filenaming conventions) and where to get them from (e.g.\ from
-their own input directories, or upstream output directories).
-Consequently the control system does not need to know the location of
-important input/output files, just (via messaging) that they exist. In a
-less structured environment additional tasks could easily be added to
-to move files around as needed. 
-
-\subsection{Startup and Initialization}
-
-An initial cycle time and list of task object names are read in from
-the config file, then each task object is created at the initial
-cycle time {\em or} at the first subsequent cycle time that is
-valid for the task type. Optionally, we can tell the controller to
-reload the current state dump file (which may have been edited); this
-will override the configured start time and task list. After startup,
-new tasks are created only by {\em abdication} (below).
-
-An initial run through the {\em task processing} code, by virtue of the
-fact that the main loop starts with task processing, causes tasks with
-no prerequisites (e.g.\ {\em downloader}) to enter the {\em running}
-state and launch their external tasks immediately. Otherwise ({\em or}
-if there are no tasks that lack prerequisites) nothing will happen.
-
-
-\subsection{Task Interaction} 
-
-Each task keeps track of which of its postrequisites are completed, and
-asks the other tasks if they can satisfy any of its prerequisites. 
-
-{\small
-\noindent
-\rule{5cm}{.2mm}
-\begin{lstlisting}
-class task_pool( Pyro.core.ObjBase ):
-    # ...
-    def interact( self ):
-        # get each task to ask all the others if 
-        # they can satisfy its prerequisites
-        #--
-        for task in self.tasks:
-            task.get_satisfaction( self.tasks )
-    # ...
-\end{lstlisting}
-}
-
-\subsection{Running Tasks}
-
-Each task object can launch its associated external task, and enter the
-{\em running} state if its prerequisites are all satisfied, any existing
-older tasks of the same type are already {\em finished}, and fewer than
-{\em MAX\_ RUNAHEAD} finished tasks of the same type still exist (this
-stops tasks with no prerequisites from running ahead indefinitely).
-
-\subsection{Pyro Remote Method Calls}
-
-The Pyro request handling loop executes remote method calls coming in
-from external tasks, and returns after at least one call was handled.
-Pyro must be run in non-default single-threaded mode (see Appendix
-\ref{pyro-appendix}).
-
-\subsection{Dumping State} 
-
-The current state (waiting, running, or finished) of each task is
-written out to the {\em state dump file}.  This provides a running
-snapshot of the system as it runs, and just prior to shutdown or
-failure. The controller can optionally start up by loading the state
-dump (which can be edited first). Any 'running' tasks are reloaded in
-the 'waiting' state.
-
-
diff --git a/doc/siterc.tex b/doc/siterc.tex
index 56bf476..46a3c99 100644
--- a/doc/siterc.tex
+++ b/doc/siterc.tex
@@ -1,8 +1,8 @@
 
-\section{Site And User Config File Reference}
+\section{Global (Site, User) Config File Reference}
 \label{SiteRCReference}
 
-\lstset{language=bash}
+\lstset{language=transcript}
 
 This section defines all legal items and values for cylc site and
 user config files. See {\em Site And User Config Files}
@@ -219,11 +219,11 @@ Suite event logs are rolled over when they reach this file size.
 Documentation locations for the \lstinline=cylc doc= command and gcylc
 Help menus.
 
-\subsubsection[{[[}files{]]}]{[documentation] $\rightarrow$ [files]}
+\subsubsection[{[[}files{]]}]{[documentation] $\rightarrow$ [[files]]}
 
 File locations of documentation held locally on the cylc host server.
 
-\paragraph[html index]{[documentation] $\rightarrow$ [files] $\rightarrow$ html index }
+\paragraph[html index]{[documentation] $\rightarrow$ [[files]] $\rightarrow$ html index }
 
 File location of the main cylc documentation index.
 \begin{myitemize}
@@ -231,7 +231,7 @@ File location of the main cylc documentation index.
 \item {\em default:} \lstinline=$CYLC_DIR/doc/index.html=
 \end{myitemize}
 
-\paragraph[pdf user guide]{[documentation] $\rightarrow$ [files] $\rightarrow$ pdf user guide }
+\paragraph[pdf user guide]{[documentation] $\rightarrow$ [[files]] $\rightarrow$ pdf user guide }
 
 File location of the cylc User Guide, PDF version.
 \begin{myitemize}
@@ -239,7 +239,7 @@ File location of the cylc User Guide, PDF version.
 \item {\em default:} \lstinline=$CYLC_DIR/doc/cug-pdf.pdf=
 \end{myitemize}
 
-\paragraph[multi-page html user guide]{[documentation] $\rightarrow$ [files] $\rightarrow$ multi-page html user guide }
+\paragraph[multi-page html user guide]{[documentation] $\rightarrow$ [[files]] $\rightarrow$ multi-page html user guide }
 
 File location of the cylc User Guide, multi-page HTML version.
 \begin{myitemize}
@@ -247,7 +247,7 @@ File location of the cylc User Guide, multi-page HTML version.
 \item {\em default:} \lstinline=$CYLC_DIR/doc/html/multi/cug-html.html=
 \end{myitemize}
 
-\paragraph[single-page html user guide]{[documentation] $\rightarrow$ [files] $\rightarrow$ single-page html user guide }
+\paragraph[single-page html user guide]{[documentation] $\rightarrow$ [[files]] $\rightarrow$ single-page html user guide }
 
 File location of the cylc User Guide, single-page HTML version.
 \begin{myitemize}
@@ -255,11 +255,11 @@ File location of the cylc User Guide, single-page HTML version.
 \item {\em default:} \lstinline=$CYLC_DIR/doc/html/single/cug-html.html=
 \end{myitemize}
 
-\subsubsection[{[[}urls{]]}]{[documentation] $\rightarrow$ [urls]}
+\subsubsection[{[[}urls{]]}]{[documentation] $\rightarrow$ [[urls]]}
 
 Online documentation URLs.
 
-\paragraph[internet homepage]{[documentation] $\rightarrow$ [urls] $\rightarrow$ internet homepage }
+\paragraph[internet homepage]{[documentation] $\rightarrow$ [[urls]] $\rightarrow$ internet homepage }
 
 URL of the cylc internet homepage, with links to documentation for the
 latest official release.
@@ -269,7 +269,7 @@ latest official release.
 \item {\em default:} http://cylc.github.com/cylc/
 \end{myitemize}
 
-\paragraph[local index]{[documentation] $\rightarrow$ [urls] $\rightarrow$ local index}
+\paragraph[local index]{[documentation] $\rightarrow$ [[urls]] $\rightarrow$ local index}
 
 Local intranet URL of the main cylc documentation index.
 
@@ -369,6 +369,24 @@ Each suite stores its port number, by suite name, under this directory.
 \item {\em default:} \lstinline@$HOME/.cylc/ports/@
 \end{myitemize}
 
+\subsection{[monitor]}
+
+Configurable settings for the command line \lstinline=cylc monitor= tool.
+
+\subsubsection[monitor]{[monitor] $\rightarrow$ sort order}
+
+The sort order for tasks in the monitor view.
+\begin{myitemize}
+\item {\em type:} string
+\item {\em options:}
+    \begin{myitemize}
+    \item {\bf alphanumeric}
+    \item {\bf definition} -  the order that tasks appear under
+	    \lstinline=[runtime]= in the suite definition.
+  \end{myitemize}
+\item {\em default:} definition
+\end{myitemize}
+
 \subsection{[hosts]}
 
 The [hosts] section configures some important host-specific settings for
@@ -379,7 +397,7 @@ are not listed here, in which case local settings will be assumed,
 with the local home directory path, if present, replaced by
 \lstinline=$HOME= in items that configure directory locations.
 
-\subsubsection[{[[}HOST{]]}]{[hosts] $\rightarrow$ HOST}
+\subsubsection[{[[}HOST{]]}]{[hosts] $\rightarrow$ [[HOST]]}
 
 The default task host is the suite host, {\bf localhost}, with default
 values as listed below. Use an explicit \lstinline=[hosts][[localhost]]=
@@ -414,7 +432,7 @@ file.
 \end{myitemize}
 \end{myitemize}
 
-\paragraph[run directory]{[hosts] $\rightarrow$ HOST $\rightarrow$ run directory }
+\paragraph[run directory]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ run directory }
 
 The top level of the directory tree that holds suite-specific output logs,
 state dump files, run database, etc.
@@ -424,7 +442,7 @@ state dump files, run database, etc.
 \item {\em default:} \lstinline=$HOME/cylc-run=
 \end{myitemize}
 
-\paragraph[work directory]{[hosts] $\rightarrow$ HOST $\rightarrow$ work directory }
+\paragraph[work directory]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ work directory }
 \label{workdirectory}
 
 The top level for suite work and share directories.
@@ -435,7 +453,7 @@ The top level for suite work and share directories.
 \end{myitemize}
 
 
-\paragraph[task communication method]{[hosts] $\rightarrow$ HOST $\rightarrow$ task communication method }
+\paragraph[task communication method]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ task communication method }
 \label{task_comms_method}
 
 The means by which task progress messages are reported back to the running suite.
@@ -452,7 +470,7 @@ See above for default polling intervals for the poll method.
 \item {\em localhost default:} pyro
 \end{myitemize}
 
-\paragraph[remote copy template]{[hosts] $\rightarrow$ HOST $\rightarrow$ remote copy template }
+\paragraph[remote copy template]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ remote copy template }
 
 A string for the command used to copy files to a remote host. This is not used
 on the suite host unless you run local tasks under another user account.
@@ -462,7 +480,7 @@ on the suite host unless you run local tasks under another user account.
 \item {\em localhost default:} \lstinline at scp -oBatchMode=yes -oConnectTimeout=10@
 \end{myitemize}
 
-\paragraph[remote shell template]{[hosts] $\rightarrow$ HOST $\rightarrow$ remote shell template }
+\paragraph[remote shell template]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ remote shell template }
 
 A string for the command used to invoke commands on this host.
 This is not used on the suite host unless you run local tasks under
@@ -475,7 +493,7 @@ being a placeholder for the name of the host. This is no longer the case. Any
 \item {\em localhost default:} \lstinline at ssh -oBatchMode=yes -oConnectTimeout=10@
 \end{myitemize}
 
-\paragraph[use login shell]{[hosts] $\rightarrow$ HOST $\rightarrow$ use login shell }
+\paragraph[use login shell]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ use login shell }
 
 Whether to use a login shell or not for remote command invocation. By
 default cylc runs remote ssh commands using a login shell,
@@ -498,7 +516,7 @@ environment.
 \item {\em localhost default:} true
 \end{myitemize}
 
-\paragraph[cylc executable]{[hosts] $\rightarrow$ HOST $\rightarrow$ cylc executable }
+\paragraph[cylc executable]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ cylc executable }
 
 The \lstinline=cylc= executable on a remote host. Note this should point to the 
 cylc multi-version wrapper (see~\ref{CUI}) on the host, not
@@ -511,7 +529,7 @@ invoked via \lstinline=ssh= on this host.
 \item {\em localhost default:} \lstinline at cylc@
 \end{myitemize}
 
-\paragraph[global init-script]{[hosts] $\rightarrow$ HOST $\rightarrow$ global init-script }
+\paragraph[global init-script]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ global init-script }
 \label{GlobalInitScript}
 
 If specified, the value of this setting will be inserted to just before the
@@ -523,7 +541,7 @@ submitted to the specified remote host.
 \item {\em localhost default:} \lstinline@""@
 \end{myitemize}
 
-\paragraph[copyable environment variables]{[hosts] $\rightarrow$ HOST $\rightarrow$ copyable environment variables }
+\paragraph[copyable environment variables]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ copyable environment variables }
 
 A list containing the names of the environment variables that can and/or need
 to be copied from the suite daemon to a job.
@@ -533,6 +551,133 @@ to be copied from the suite daemon to a job.
 \item {\em localhost default:} \lstinline@[]@
 \end{myitemize}
 
+\paragraph[retrieve job logs]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ retrieve job logs}
+
+Global default for the~\ref{runtime-remote-retrieve-job-logs} setting for the
+specified host.
+
+\paragraph[retrieve job logs command]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ retrieve job logs command}
+
+If \lstinline at rsync -a@ is unavailable or insufficient to retrieve job logs
+from a remote host, you can use this setting to specify a suitable command.
+
+\begin{myitemize}
+\item {\em type:} string
+\item {\em default:} rsync -a
+\end{myitemize}
+
+\paragraph[retrieve job logs max size]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ retrieve job logs max size}
+
+Global default for the~\ref{runtime-remote-retrieve-job-logs-max-size} setting for the
+specified host.
+
+\paragraph[retrieve job logs retry delays]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ retrieve job logs retry delays}
+
+Global default for the~\ref{runtime-remote-retrieve-job-logs-retry-delays}
+setting for the specified host.
+
+\paragraph[task event handler retry delays]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ task event handler retry delays}
+
+Host specific default for the~\ref{runtime-events-handler-retry-delays}
+setting.
+
+\paragraph[local tail command template]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ local tail command template}
+\label{local-tail-template}
+
+A template (with \lstinline=%(filename)s= substitution) for the command used to
+tail-follow local job logs, used by the gcylc log viewer and
+\lstinline=cylc cat-log --tail=.  You are unlikely to need to override this.
+
+\begin{myitemize}
+\item {\em type:} string
+\item {\em default:} \lstinline at tail -n +1 -F %(filename)s@ 
+\end{myitemize}
+
+\paragraph[remote tail command template]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ remote tail command template}
+\label{remote-tail-template}
+
+A template (with \lstinline=%(filename)s= substitution) for the command used
+to tail-follow remote job logs, used by the gcylc log viewer and
+\lstinline=cylc cat-log --tail=.  The remote tail command needs to be told to
+die when its parent process exits. You may need to override this command for
+task hosts where the default \lstinline=tail= or \lstinline=ps= commands are
+not equivalent to the Gnu Linux versions.
+
+\begin{myitemize}
+\item {\em type:} string
+\item {\em default:} \lstinline at tail --pid=$(ps h -o ppid $$ | sed -e 's/[[:space:]]//g') -n +1 -F %(filename)s@ 
+\item {\em example:} for AIX hosts:\\
+    \lstinline@/gnu/tail --pid=$(ps -o ppid= -p $$ | sed -e 's/[[:space:]]//g') -n +1 -F %(filename)s@
+\end{myitemize}
+
+\paragraph[{[[[}batch systems{]]]}]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ [[[batch systems]]]}
+
+Settings for particular batch systems on HOST. In the subsections below, SYSTEM
+should be replaced with the cylc job submission method name that represents the
+batch system (see~\ref{RuntimeJobSubMethods}).
+
+\subparagraph[{[[[[}SYSTEM{]]]]}err tailer]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ [[[batch systems]]] $\rightarrow$ [[[[SYSTEM]]]] $\rightarrow$ err tailer}
+\label{err-tailer}
+
+A command template (with \lstinline=%(job_id)s= substitution) that can be used
+to tail-follow the stderr stream of a running job if SYSTEM does
+not use the normal log file location while the job is running.  This setting
+overrides~\ref{local-tail-template} and~\ref{remote-tail-template} above.
+
+\begin{myitemize}
+\item {\em type:} string
+\item {\em default:} (none)
+\item {\em example:} For PBS:
+    \begin{lstlisting}
+[hosts]
+    [[ myhpc*]]
+        [[[batch systems]]]
+            [[[[pbs]]]]
+                err tailer = qcat -f -e %(job_id)s
+                out tailer = qcat -f -o %(job_id)s
+                err viewer = qcat -e %(job_id)s
+                out viewer = qcat -o %(job_id)s
+    \end{lstlisting}
+\end{myitemize}
+
+\subparagraph[{[[[[}SYSTEM{]]]]}out tailer]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ [[[batch systems]]] $\rightarrow$ [[[[SYSTEM]]]] $\rightarrow$ out tailer}
+\label{out-tailer}
+
+A command template (with \lstinline=%(job_id)s= substitution) that can be used
+to tail-follow the stdout stream of a running job if SYSTEM does
+not use the normal log file location while the job is running.  This setting
+overrides~\ref{local-tail-template} and~\ref{remote-tail-template} above.
+
+\begin{myitemize}
+\item {\em type:} string
+\item {\em default:} (none)
+\item {\em example:} see~\ref{err-tailer}
+\end{myitemize}
+
+\subparagraph[{[[[[}SYSTEM{]]]]}err viewer]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ [[[batch systems]]] $\rightarrow$ [[[[SYSTEM]]]] $\rightarrow$ err viewer}
+
+A command template (with \lstinline=%(job_id)s= substitution) that can be used
+to view the stderr stream of a running job if SYSTEM does
+not use the normal log file location while the job is running.
+
+\begin{myitemize}
+\item {\em type:} string
+\item {\em default:} (none)
+\item {\em example:} see~\ref{err-tailer}
+\end{myitemize}
+
+\subparagraph[{[[[[}SYSTEM{]]]]}out viewer]{[hosts] $\rightarrow$ [[HOST]] $\rightarrow$ [[[batch systems]]] $\rightarrow$ [[[[SYSTEM]]]] $\rightarrow$ out viewer}
+
+A command template (with \lstinline=%(job_id)s= substitution) that can be used
+to view the stdout stream of a running job if SYSTEM does
+not use the normal log file location while the job is running.
+
+\begin{myitemize}
+\item {\em type:} string
+\item {\em default:} (none)
+\item {\em example:} see~\ref{err-tailer}
+\end{myitemize}
+
 \subsection{[suite host self-identification] }
 
 The suite host's identity must be determined locally by cylc and passed
@@ -601,6 +746,10 @@ A list of hosts to scan for running suites.
 \item {\em default:} localhost
 \end{myitemize}
 
+\subsection{[task events]}
+
+Global site/user defaults for~\ref{TaskEventHandling2}.
+
 \subsection{[test battery]}
 
 Settings for the automated development tests.
@@ -615,25 +764,92 @@ test battery.
 The name of a remote host without shared HOME file system as the host running
 the test battery.
 
-\subsubsection[{[[}batch systems{]]}]{[test battery] $\rightarrow$ [batch systems]}
+\subsubsection[{[[}batch systems{]]}]{[test battery] $\rightarrow$ [[batch systems]]}
 
 Settings for testing supported batch systems (job submission methods). The
 tests for a batch system are only performed if the batch system is available on
 the test host or a remote host accessible via SSH from the test host.
 
-\paragraph[{[[[}\_\_NAME\_\_{]]]}]{[test battery] $\rightarrow$ [batch systems] $\rightarrow$ [\_\_NAME\_\_]}
+\paragraph[{[[[}SYSTEM{]]]}]{[test battery] $\rightarrow$ [[batch systems]] $\rightarrow$ [[[SYSTEM]]]}
 
-\_\_NAME\_\_ is the name of a supported batch system with automated tests.
+SYSTEM is the name of a supported batch system with automated tests.
 This can currently be "loadleveler", "lsf", "pbs", "sge" and/or "slurm".
 
-\subparagraph[host]{[test battery] $\rightarrow$ [batch systems] $\rightarrow$ [\_\_NAME\_\_] $\rightarrow$ host}
+\subparagraph[host]{[test battery] $\rightarrow$ [[batch systems]] $\rightarrow$ [[[SYSTEM]]] $\rightarrow$ host}
 
 The name of a host where commands for this batch system is available. Use
 "localhost" if the batch system is available on the host running the test
 battery. Any specified remote host should be accessible via SSH from the host
 running the test battery.
 
-\subparagraph[{[[[[}directives{]]]]}]{[test battery] $\rightarrow$ [batch systems] $\rightarrow$ [\_\_NAME\_\_] $\rightarrow$ [directives]}
+\subparagraph[err viewer]{[test battery] $\rightarrow$ [[batch systems]] $\rightarrow$ [[[SYSTEM]]] $\rightarrow$ err viewer}
+
+The command template (with \lstinline=\%(job_id)s= substitution) for testing
+the run time stderr viewer functionality for this batch system.
+
+\subparagraph[out viewer]{[test battery] $\rightarrow$ [[batch systems]] $\rightarrow$ [[[SYSTEM]]] $\rightarrow$ out viewer}
+
+The command template (with \lstinline=\%(job_id)s= substitution) for testing
+the run time stdout viewer functionality for this batch system.
+
+\subparagraph[{[[[[}directives{]]]]}]{[test battery] $\rightarrow$ [[batch systems]] $\rightarrow$ [[[SYSTEM]]] $\rightarrow$ [[[[directives]]]]}
 
 The minimum set of directives that must be supplied to the batch system on the
 site to initiate jobs for the tests.
+
+\subsection{[cylc]}
+
+Default values for entries in the suite.rc [cylc] section.
+
+\subsubsection[UTC mode]{[cylc] $\rightarrow$ UTC mode}
+\label{SiteUTCMode}
+
+Allows you to set a default value for UTC mode in a suite at the site level.
+See ~\ref{UTC-mode} for details.
+
+\subsubsection[{[}event hooks{]}]{[cylc] $\rightarrow$ [[event hooks]]}
+\label{SiteCylcHooks}
+
+You can define site defaults for each of the following options, details 
+of which can be found under ~\ref{SuiteEventHandling}:
+
+\subparagraph[startup handler]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ startup handler}
+
+\subparagraph[shutdown handler]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ shutdown handler}
+
+\subparagraph[timeout handler]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ timeout handler}
+
+\subparagraph[timeout]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ timeout}
+
+\subparagraph[abort on timeout]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ abort on timeout}
+
+\subsection{[authentication]}
+\label{GlobalAuth}
+
+Authentication of client programs with suite daemons can be configured here, and
+overridden in suites if necessary (see~\ref{SuiteAuth}).
+
+The suite-specific passphrase must be installed on a user's account to
+authorize full control privileges (see~\ref{tutPassphrases}
+and~\ref{ConnectionAuthentication}). In the future we plan to move to a more
+traditional user account model so that each authorized user can have their own
+password.
+
+\subsubsection[public]{[authentication] $\rightarrow$ public}
+
+This sets the client privilege level for public access - i.e.\ no suite passphrase
+required.
+
+\begin{myitemize}
+\item {\em type:} string (must be one of the following options)
+\item {\em options:}
+    \begin{myitemize}
+        \item {\em identity} - only suite and owner names revealed
+        \item {\em description} - identity plus suite title and description
+        \item {\em state-totals} - identity, description, and task state totals
+        \item {\em full-read} - full read-only access for monitor and GUI
+        \item {\em shutdown} - full read access plus shutdown, but no other
+            control.
+    \end{myitemize}
+\item {\em default:} state-totals
+\end{myitemize}
diff --git a/doc/suiterc.tex b/doc/suiterc.tex
index 1420c2d..fe6babc 100644
--- a/doc/suiterc.tex
+++ b/doc/suiterc.tex
@@ -70,13 +70,14 @@ expensive real tasks during suite development.
 
 Cylc runs off the suite host's system clock by default. This item allows
 you to run the suite in UTC even if the system clock is set to local time.
-Clock-triggered tasks will trigger when the current UTC time is equal to
+Clock-trigger tasks will trigger when the current UTC time is equal to
 their cycle point date-time plus offset; other time values used, reported,
-or logged by the suite daemon will usually also be in UTC.
+or logged by the suite daemon will usually also be in UTC. The default for
+this can be set at the site level (see ~\ref{SiteUTCMode}).
 
 \begin{myitemize}
     \item {\em type:} boolean
-    \item {\em default:} False
+    \item {\em default:} False, unless overridden at site level.
 \end{myitemize}
 
 \subsubsection[cycle point format]{ [cylc] $\rightarrow$ cycle point format}
@@ -213,17 +214,21 @@ Additional information can be passed to event handlers via
 
 \paragraph[EVENT handler]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ EVENT handler}
 
-A list of one or more event handlers to call when one of the following EVENTs occurs:
+A comma-separated list of one or more event handlers to call when one of the
+following EVENTs occurs:
 \begin{myitemize}
     \item {\bf startup}  - the suite has started running
     \item {\bf shutdown} - the suite is shutting down
     \item {\bf timeout}  - the suite has timed out
 \end{myitemize}
 
+Default values for these can be set at the site level via the siterc file
+(see ~\ref{SiteCylcHooks}).
+
 Item details:
 \begin{myitemize}
     \item {\em type:} string (event handler script name)
-    \item {\em default:} None
+    \item {\em default:} None, unless defined at the site level.
     \item {\em example:} \lstinline at startup handler = my-handler.sh@
 \end{myitemize}
 
@@ -231,12 +236,13 @@ Item details:
 
 If a timeout is set and the timeout event is handled, the timeout event
 handler(s) will be called if the suite times out before it finishes.
-The timer is set initially at suite start up.
+The timer is set initially at suite start up. It is possible to set a default
+for this at the site level (see ~\ref{SiteCylcHooks}).
 
 \begin{myitemize}
     \item {\em type:} ISO 8601 duration/interval representation (e.g.
  \lstinline=PT5S=, 5 seconds, \lstinline=PT1S=, 1 second) - minimum 0 seconds.
-    \item {\em default:} (none)
+    \item {\em default:} (none), unless set at the site level.
 \end{myitemize}
 
 \paragraph[reset timer]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ reset timer}
@@ -254,11 +260,12 @@ time.
 \paragraph[abort on timeout]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ abort on timeout}
 
 If a suite timer is set (above) this will cause the suite to abort with
-error status if the suite times out while still running.
+error status if the suite times out while still running. It is possible to set 
+a default for this at the site level (see ~\ref{SiteCylcHooks}).
 
 \begin{myitemize}
     \item {\em type:} boolean
-    \item {\em default:} False
+    \item {\em default:} False, unless set at the site level.
 \end{myitemize}
 
 \paragraph[abort if startup handler fails]{[cylc] $\rightarrow$ [[event hooks]] $\rightarrow$ abort if EVENT handler fails}
@@ -373,7 +380,7 @@ A reference test run will abort immediately if any task fails, unless
 in a list IDs of tasks that are expected to fail.
 
 \begin{myitemize}
-    \item {\em type:} string list (task IDs: \lstinline=name.cycle_point=)
+    \item {\em type:} Comma-separated list of strings (task IDs: \lstinline=name.cycle_point=).
     \item {\em default:} (none)
     \item {\em example:} \lstinline=foo.20120808, bar.20120908=
 \end{myitemize}
@@ -420,6 +427,18 @@ it is not possible to arrive at a sensible default for all suites.
     \item {\em default:} PT1M (1 minute)
 \end{myitemize}
 
+\subsection{[authentication]}
+\label{SuiteAuth}
+
+Authentication of client programs with suite daemons can be configured in the
+global site/user files (\ref{GlobalAuth}) and overridden here if necessary.
+See~\ref{GlobalAuth} for more information.
+
+\subsubsection{[authentication] $\rightarrow$ public}
+
+The client privilege level granted for public access - i.e.\ no suite passphrase
+required.  See~\ref{GlobalAuth} for legal values.
+
 \subsection{[scheduling]}
 
 This section allows cylc to determine when tasks are ready to run.
@@ -454,10 +473,15 @@ the time zone determined by \ref{cycle-point-time-zone} if that is set.
 
 \begin{myitemize}
     \item {\em type:} ISO 8601 date/time point representation (e.g.
- \lstinline=CCYYMMDDThhmm=, 19951231T0630)
+ \lstinline=CCYYMMDDThhmm=, 19951231T0630) or ``now''.
     \item {\em default:} (none)
 \end{myitemize}
 
+The string ``now'' converts to the current date-time on the suite host (adjusted
+to UTC if the suite is in UTC mode but the host is not) to minute resolution.
+Minutes (or hours, etc.) may be ignored depending on your cycle point format
+(\ref{cycle-point-format}).
+
 \subsubsection[final cycle point]{[scheduling] $\rightarrow$ final cycle point}
 
 Cycling tasks are held once they pass the final cycle point, if one is
@@ -484,8 +508,8 @@ defining a list of truncated time points under the initial cycle point
 constraints.
 
 \begin{myitemize}
-    \item {\em type:} List of ISO 8601 truncated time point representations 
-    (e.g. T00, T06, T-30)
+    \item {\em type:} Comma-separated list of ISO 8601 truncated time point
+        representations (e.g. T00, T06, T-30).
     \item {\em default:} (none)
 \end{myitemize}
 
@@ -497,11 +521,18 @@ defining a list of truncated time points under the final cycle point
 constraints.
 
 \begin{myitemize}
-    \item {\em type:} List of ISO 8601 truncated time point representations 
-    (e.g. T00, T06, T-30)
+    \item {\em type:} Comma-separated list of ISO 8601 truncated time point
+        representations (e.g. T00, T06, T-30).
     \item {\em default:} (none)
 \end{myitemize}
 
+\subsubsection[hold after point]{[scheduling] $\rightarrow$ hold after point}
+
+Cycling tasks are held once they pass the hold after cycle point, if one is
+specified. Unlike the final cycle point suite will not shut down once all tasks
+have passed this point. If this item is provided you can override it on the 
+command line or in the gcylc suite start panel.
+
 \subsubsection[runahead limit]{[scheduling] $\rightarrow$ runahead limit}
 \label{runahead limit}
 
@@ -568,34 +599,69 @@ The maximum number of active tasks allowed at any one time, for this queue.
 A list of member tasks, or task family names, to assign to this queue
 (assigned tasks will automatically be removed from the default queue).
 \begin{myitemize}
-\item {\em type:} string list
+\item {\em type:} Comma-separated list of strings (task or family names).
 \item {\em default:} none for user-defined queues; all tasks for the ``default'' queue
 \end{myitemize}
 
 \subsubsection[{[[}special tasks{]]}]{[scheduling] $\rightarrow$ [[special tasks]]}
 
-This section is used to identify any tasks with several kinds of special
-behaviour. Family names can be used in special task lists as shorthand
-for listing all member tasks.
+This section is used to identify tasks with special behaviour. Family names can
+be used in special task lists as shorthand for listing all member tasks.
 
-\paragraph[clock-triggered]{[scheduling] $\rightarrow$ [[special tasks]] $\rightarrow$ clock-triggered}
+\paragraph[clock-trigger]{[scheduling] $\rightarrow$ [[special tasks]] $\rightarrow$ clock-trigger}
 
-Clock-triggered tasks wait on a wall clock time specified as an offset from
-their own cycle point, in addition to dependence on other tasks.
-Clock-triggers can be used to make tasks that wait on external real time
-data trigger at the expected time of data availability, or to make suite
-polling tasks trigger at the expected time of the remote suite event.
-In delayed or historical operation clock-triggered tasks do not constrain the
-suite until they catch up to the wall clock.
+Clock-trigger tasks (see~\ref{ClockTriggerTasks}) wait on a wall clock time
+specified as an offset from their own cycle point.
 
 \begin{myitemize}
-    \item {\em type:} list of task or family names with optional offsets in
-        brackets. An offset should be expressed as an ISO8601 interval
-        string, positive or negative, e.g. \lstinline=PT1H= for 1 hour.
+    \item {\em type:} Comma-separated list of task or family names with
+        associated date-time offsets expressed as ISO8601 interval strings,
+        positive or negative, e.g. \lstinline=PT1H= for 1 hour.  The offset
+        specification may be omitted to trigger right on the cycle point.
     \item {\em default:} (none)
-    \item {\em examples:} \lstinline=foo(PT1H30M)=, \lstinline=bar(PT1.5H)=, \lstinline=baz=
+    \item {\em example:}
+\begin{lstlisting}
+    clock-trigger = foo(PT1H30M), bar(PT1.5H), baz
+\end{lstlisting}
 \end{myitemize}
 
+\paragraph[clock-expire]{[scheduling] $\rightarrow$ [[special tasks]] $\rightarrow$ clock-expire}
+\label{ClockExpireRef}
+
+Clock-expire tasks enter the {\em expired} state and skip job submission if too
+far behind the wall clock when they become ready to run.  The expiry time is
+specified as an offset from wall-clock time; typically it should be negative -
+see~\ref{ClockExpireTasks}. 
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of task or family names with
+        associated date-time offsets expressed as ISO8601 interval strings,
+        positive or negative, e.g. \lstinline=PT1H= for 1 hour.  The offset
+        may be omitted if it is zero.
+    \item {\em default:} (none)
+    \item {\em example:}
+\begin{lstlisting}
+    clock-expire = foo(-P1D)
+\end{lstlisting}
+\end{myitemize}
+
+\paragraph[external-trigger]{[scheduling] $\rightarrow$ [[special tasks]] $\rightarrow$ external-trigger}
+
+Externally triggered tasks (see~\ref{ExternalTriggers}) wait on external events
+reported via the \lstinline=cylc ext-trigger= command.
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of task names with associated
+        external trigger message strings.
+    \item {\em default:} (none)
+    \item {\em example:} (note the comma and line-continuation character)
+\begin{lstlisting}
+    external-trigger = get-satx("new sat-X data ready"), \
+                         get-saty("new sat-Y data ready")
+\end{lstlisting}
+\end{myitemize}
+
+
 \paragraph[sequential]{[scheduling] $\rightarrow$ [[special tasks]] $\rightarrow$ sequential}
 
 Sequential tasks are automatically given dependence on their own
@@ -605,7 +671,7 @@ graph visualization. For more on sequential tasks see~\ref{SequentialTasks}
 and~\ref{LimitPID}.
 
 \begin{myitemize}
-    \item {\em type:} list of task or family names
+    \item {\em type:} Comma-separated list of task or family names.
     \item {\em default:} (none)
     \item {\em example:} \lstinline at sequential = foo, bar@
 \end{myitemize}
@@ -628,7 +694,7 @@ subsequent cycles so they must typically be used in OR'd conditional
 expressions to avoid holding up the suite.
 
 \begin{myitemize}
-    \item {\em type:} list of task or family names
+    \item {\em type:} Comma-separated list of task or family names.
     \item {\em default:} (none)
 \end{myitemize}
 
@@ -640,7 +706,7 @@ automatically one-off tasks and do not need to be listed here.
 Dependence on one-off tasks is not restricted to the first cycle.
 
 \begin{myitemize}
-\item {\em type:} list of task or family names
+\item {\em type:} Comma-separated list of task or family names.
 \item {\em default:} (none)
 \end{myitemize}
 
@@ -656,7 +722,7 @@ removed from the suite dependency graph, in which case some manual
 triggering, or insertion of excluded tasks, may be required.
 
 \begin{myitemize}
-    \item {\em type:} list of task or family names
+    \item {\em type:} Comma-separated list of task or family names.
     \item {\em default:} (none)
 \end{myitemize}
 
@@ -673,7 +739,7 @@ graph, in which case some manual triggering, or insertion of excluded
 tasks, may be required.
 
 \begin{myitemize}
-    \item {\em type:} list of task or family names
+    \item {\em type:} Comma-separated list of task or family names.
     \item {\em default:} (none)
 \end{myitemize}
 
@@ -755,7 +821,7 @@ hiearchies. For details and examples see~\ref{NIORP}.
 
 \subsubsection[{[[}\_\_NAME\_\_{]]}]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]]}
 
-Replace \_\_NAME\_\_ with a namespace name, or a comma separated list of
+Replace \_\_NAME\_\_ with a namespace name, or a comma-separated list of
 names, and repeat as needed to define all tasks in the suite. Names may
 contain letters, digits, underscores, and hyphens. A namespace
 represents a group or family of tasks if other namespaces inherit from
@@ -786,7 +852,7 @@ A list of the immediate parent(s) this namespace inherits from. If no
 parents are listed \lstinline=root= is assumed.
 
 \begin{myitemize}
-\item {\em type:} string list (parent namespace names)
+\item {\em type:} Comma-separated list of strings (parent namespace names).
 \item {\em default:} \lstinline=root=
 \end{myitemize}
 
@@ -919,8 +985,8 @@ execution environment is incremented each time, starting from 1 for the
 first try - this can be used to vary task behavior by try number.
 
 \begin{myitemize}
-    \item {\em type:} list of ISO 8601 duration/interval representations,
-    optionally {\em preceded} by multipliers
+    \item {\em type:} Comma-separated list of ISO 8601 duration/interval representations,
+    optionally {\em preceded} by multipliers.
     \item {\em example:} \lstinline=PT1.5M,3*PT10M= is equivalent to
     \lstinline=PT1.5M, PT10M, PT10M, PT10M= - 1.5 minutes, 10 minutes,
     10 minutes, 10 minutes.
@@ -947,8 +1013,8 @@ until finished.
 see~\ref{DetachingTasks}.}
 
 \begin{myitemize}
-    \item {\em type:} list of ISO 8601 duration/interval representations,
-    optionally {\em preceded} by multipliers
+    \item {\em type:} Comma-separated list of ISO 8601 duration/interval
+        representations, optionally {\em preceded} by multipliers.
     \item {\em example:} \lstinline=PT1M,3*PT1H, PT1M= is equivalent to
     \lstinline=PT1M, PT1H, PT1H, PT1H, PT1M= - 1 minute, 1 hour, 1 hour, 1
     hour, 1 minute.
@@ -976,8 +1042,8 @@ until finished.
 see~\ref{DetachingTasks}.}
 
 \begin{myitemize}
-    \item {\em type:} list of ISO 8601 duration/interval representations,
-    optionally {\em preceded} by multipliers
+    \item {\em type:} Comma-separated list of ISO 8601 duration/interval
+        representations, optionally {\em preceded} by multipliers.
     \item {\em example:} \lstinline=PT1M,3*PT1H, PT1M= is equivalent to
     \lstinline=PT1M, PT1H, PT1H, PT1H, PT1M= - 1 minute, 1 hour, 1 hour, 1
     hour, 1 minute.
@@ -1008,8 +1074,7 @@ it does not \lstinline=cd= elsewhere at runtime). The default directory
 path contains task name and cycle point, to provide a unique workspace for
 every instance of every task. If several tasks need to exchange files and
 simply read and write from their from current working directory, this item
-can be used to override the default to make them all use the same workspace
-(see~\ref{worksubdirectory}).
+can be used to override the default to make them all use the same workspace.
 
 The top level share and work directory location can be changed (e.g.\ to a
 large data area) by a global config setting (see~\ref{workdirectory}). 
@@ -1084,8 +1149,8 @@ duration/intervals) which define a range from which the simulation mode task
 run length will be randomly chosen.
 
 \begin{myitemize}
-    \item {\em type:} list containing two ISO 8601 duration/interval
-    representations
+    \item {\em type:} Comma-separated list containing two ISO 8601
+        duration/interval representations.
     \item {\em example:} \lstinline=PT1S,PT20S= - a range of 1 second to 20
     seconds
     \item {\em default:} (1, 16)
@@ -1104,13 +1169,14 @@ new methods.  Cylc has a number of built in job submission methods:
 \item {\em type:} string
 \item {\em legal values:}
    \begin{myitemize}
-       \item {\em background} - direct background execution
-       \item {\em at} - the rudimentary Unix \lstinline=at= scheduler
-       \item {\em loadleveler} - \lstinline=llsubmit=, with directives defined in the suite.rc file
-       \item {\em lsf} - IBM Platform LSF \lstinline=bsub=, with directives defined in the suite.rc file
-       \item {\em pbs} - PBS \lstinline=qsub=, with directives defined in the suite.rc file
-       \item {\em sge} - Sun Grid Engine \lstinline=qsub=, with directives defined in the suite.rc file
-       \item {\em slurm} - Simple Linux Utility for Resource Management \lstinline=sbatch=, with directives defined in the suite.rc file.
+       \item \lstinline=background= - direct background execution
+       \item \lstinline=at= - the rudimentary Unix \lstinline=at= scheduler
+       \item \lstinline=loadleveler= - IBM LoadLeveler \lstinline=llsubmit=, with directives defined in the suite.rc file
+       \item \lstinline=lsf= - IBM Platform LSF \lstinline=bsub=, with directives defined in the suite.rc file
+       \item \lstinline=pbs= - PBS \lstinline=qsub=, with directives defined in the suite.rc file
+       \item \lstinline=sge= - Sun Grid Engine \lstinline=qsub=, with directives defined in the suite.rc file
+       \item \lstinline=slurm= - Simple Linux Utility for Resource Management \lstinline=sbatch=, with directives defined in the suite.rc file
+       \item \lstinline=moab= - Moab workload manager \lstinline=msub=, with directives defined in the suite.rc file
    \end{myitemize}
 \item {\em default:} \lstinline=background=
 \end{myitemize}
@@ -1150,8 +1216,8 @@ to be modified slightly to allow use of the C shell.
 A list of duration (in ISO 8601 syntax), after which to resubmit if job
 submission fails.
 \begin{myitemize}
-    \item {\em type:} list of ISO 8601 duration/interval representations,
-    optionally {\em preceded} by multipliers
+    \item {\em type:} Comma-separated list of ISO 8601 duration/interval
+        representations, optionally {\em preceded} by multipliers.
     \item {\em example:} \lstinline=PT1M,3*PT1H, P1D= is equivalent to
     \lstinline=PT1M, PT1H, PT1H, PT1H, P1D= - 1 minute, 1 hour, 1 hour, 1
     hour, 1 day.
@@ -1212,6 +1278,50 @@ the different selectable hosts, you can configure your
 \item {\em default:} (none)
 \end{myitemize}
 
+\subparagraph[retrieve job logs]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[remote]]] $\rightarrow$ retrieve job logs}
+\label{runtime-remote-retrieve-job-logs}
+
+Remote task job logs are saved to the suite run directory on the task host, not
+on the suite host. If you want the job logs pulled back to the suite host
+automatically, you can set this item to \lstinline=True=. The suite will
+then attempt to \lstinline=rsync= the job logs once from the remote host each
+time a task job completes. E.g. if the job file is
+\lstinline=~/cylc-run/tut.oneoff.remote/log/job/1/hello/01/job=, anything under
+\lstinline=~/cylc-run/tut.oneoff.remote/log/job/1/hello/01/= will be retrieved.
+
+\begin{myitemize}
+\item {\em type:} boolean
+\item {\em default:} False
+\end{myitemize}
+
+\subparagraph[retrieve job logs max size]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[remote]]] $\rightarrow$ retrieve job logs max size}
+\label{runtime-remote-retrieve-job-logs-max-size}
+
+If the disk space of the suite host is limited, you may want to set the maximum
+sizes of the job log files to retrieve. The value can be anything that is
+accepted by the \lstinline at --max-size=SIZE@ option of the \lstinline=rsync=
+command.
+
+\begin{myitemize}
+\item {\em type:} string
+\item {\em default:} None
+\end{myitemize}
+
+\subparagraph[retrieve job logs retry delays]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[remote]]] $\rightarrow$ retrieve job logs retry delays}
+\label{runtime-remote-retrieve-job-logs-retry-delays}
+
+Some batch systems have considerable delays between the time when the job
+completes and when it writes the job logs in its normal location. If this is
+the case, you can configure an initial delay and some retry delays between
+subsequent attempts. The default behaviour is to attempt once without any
+delay.
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of ISO 8601 duration/interval representations, optionally {\em preceded} by multipliers.
+    \item {\em default:} (none)
+    \item {\em example:} \lstinline at handler = PT10S, PT1M, PT5M@
+\end{myitemize}
+
 \subparagraph[suite definition directory]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[remote]]] $\rightarrow$  suite definition directory}
 
 The path to the suite definition directory on the remote host, needed if
@@ -1232,30 +1342,40 @@ interpretation on the remote host.
 \paragraph[{[[[}event hooks{]]]}]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[event hooks]]]}
 \label{TaskEventHandling}
 
-Cylc has internal ``hooks'' to which you can attach handlers that are
-called by the suite daemon whenever certain events occur. This section
-configures task event hooks; see~\ref{SuiteEventHandling} for
-suite event hooks.
+Cylc can call nominated event handlers when certain task events occur. This
+section configures specific task event handlers; see~\ref{SuiteEventHandling} for
+suite event hooks. See ~\ref{TaskEventHandling2} for general and built-in
+task event handlers.
 
-Event handlers can send an email or an SMS, call a pager, intervene
-in the operation of their own suite, or whatever.
-They can be held in the suite bin directory, otherwise it is up to you
-to ensure their location is in \lstinline=$PATH= (in the shell in which
-cylc runs, on the suite host).
-\lstinline=cylc [hook] email-task= is a simple task event handler.
+Event handler commands can be located in the suite \lstinline=bin/= directory,
+otherwise it is up to you to ensure their location is in \lstinline=$PATH= (in
+the shell in which cylc runs, on the suite host). The commands should require
+very little resource to run and should return quickly.
 
-Task event handlers are called by the suite daemon with the following arguments:
-\begin{lstlisting}
-<task-event-handler> EVENT SUITE TASK MESSAGE
-\end{lstlisting}
-where,
+Each task event handler can be specified as a list of command lines or command
+line templates.
+
+A command line template may have any or all of these patterns which will be
+substituted with actual values:
 \begin{myitemize}
-    \item EVENT - event name (see below)
-    \item SUITE - suite name
-    \item TASK  - task ID
-    \item MESSAGE - describes what has happened.
+    \item \%(event)s: event name
+    \item \%(suite)s: suite name
+    \item \%(point)s: cycle point
+    \item \%(name)s: task name
+    \item \%(submit\_num)s: submit number
+    \item \%(id)s: task ID (i.e. \%(name)s.\%(point)s)
+    \item \%(message)s: event message, if any
 \end{myitemize}
 
+Otherwise, the command line will be called with the following arguments:
+\begin{lstlisting}
+<task-event-handler> %(event)s %(suite)s %(id)s %(message)s
+\end{lstlisting}
+
+For an explanation of the substitution syntax, see String Formatting Operations
+in the Python documentation:
+\url{https://docs.python.org/2/library/stdtypes.html#string-formatting}.
+
 Additional information can be passed to event handlers via the
 [cylc] $\rightarrow$ [[environment]] (but not via task
 runtime environments - event handlers are not called by tasks).
@@ -1282,12 +1402,13 @@ A list of one or more event handlers to call when one of the following EVENTs oc
 
 Item details:
 \begin{myitemize}
-    \item {\em type:} string list (event handler scripts)
+    \item {\em type:} Comma-separated list of strings (event handler scripts).
     \item {\em default:} None
     \item {\em example:} \lstinline at failed handler = my-failed-handler.sh@
 \end{myitemize}
 
 \subparagraph[submission timeout]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[event hooks]]] $\rightarrow$ submission timeout}
+\label{runtime-event-hooks-submission-timeout}
 
 If a task has not started after the specified ISO 8601 duration/interval, the
 {\em submission timeout} event handler(s) will be called.
@@ -1298,6 +1419,7 @@ If a task has not started after the specified ISO 8601 duration/interval, the
 \end{myitemize}
 
 \subparagraph[execution timeout]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[event hooks]]] $\rightarrow$ execution timeout}
+\label{runtime-event-hooks-execution-timeout}
 
 If a task has not finished after the specified ISO 8601 duration/interval, the
 {\em execution timeout} event handler(s) will be called.
@@ -1308,6 +1430,7 @@ If a task has not finished after the specified ISO 8601 duration/interval, the
 \end{myitemize}
 
 \subparagraph[reset timer]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[event hooks]]] $\rightarrow$ reset timer}
+\label{runtime-event-hooks-reset-timer}
 
 If you set an execution timeout the timer can be reset to zero every
 time a message is received from the running task (which indicates the
@@ -1319,6 +1442,155 @@ finish in the alotted time regardless of incoming messages.
 \item {\em default:} False
 \end{myitemize}
 
+\paragraph[{[[[}events{]]]}]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]]}
+\label{TaskEventHandling2}
+
+Cylc can call nominated event handlers when certain task events occur. This
+section configures general and built-in task event handlers;
+see~\ref{SuiteEventHandling} for suite event hooks. See
+~\ref{TaskEventHandling} for specific task event handlers.
+
+Event handler commands can be located in the suite \lstinline=bin/= directory,
+otherwise it is up to you to ensure their location is in \lstinline=$PATH= (in
+the shell in which cylc runs, on the suite host). The commands should require
+very little resource to run and should return quickly.
+
+Each task event handler can be specified as a list of command lines or command
+line templates.
+
+A command line template may have any or all of these patterns which will be
+substituted with actual values:
+\begin{myitemize}
+    \item \%(event)s: event name
+    \item \%(suite)s: suite name
+    \item \%(point)s: cycle point
+    \item \%(name)s: task name
+    \item \%(submit\_num)s: submit number
+    \item \%(id)s: task ID (i.e. \%(name)s.\%(point)s)
+    \item \%(message)s: event message, if any
+\end{myitemize}
+
+Otherwise, the command line will be called with the following command line
+arguments:
+\begin{lstlisting}
+<task-event-handler> %(event)s %(suite)s %(id)s %(message)s
+\end{lstlisting}
+
+\subparagraph[handlers]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ handlers}
+
+Specify a list of command lines or command line templates as task event handlers.
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of strings (event handler command line or command line templates).
+    \item {\em default:} (none)
+    \item {\em example:} \lstinline at handler = my-handler.sh@
+\end{myitemize}
+
+\subparagraph[handler events]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ handler events}
+
+Specify the events for which the general task event handlers should be invoked.
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of events
+    \item {\em default:} (none)
+    \item {\em example:} \lstinline at handler = submission failed, failed@
+\end{myitemize}
+
+\subparagraph[handler retry delays]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ handler retry delays}
+\label{runtime-events-handler-retry-delays}
+
+Specify an initial delay before running an event handler command and any retry
+delays in case the command returns a non-zero code. The default behaviour is to
+run an event handler command once without any delay.
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of ISO 8601 duration/interval representations, optionally {\em preceded} by multipliers.
+    \item {\em default:} (none)
+    \item {\em example:} \lstinline at handler = PT10S, PT1M, PT5M@
+\end{myitemize}
+
+\subparagraph[mail events]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ mail events}
+
+Specify the events for which notification emails should be sent.
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of events
+    \item {\em default:} (none)
+    \item {\em example:} \lstinline at handler = submission failed, failed@
+\end{myitemize}
+
+\subparagraph[mail from]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ mail from}
+
+Specify an alternate \lstinline=from:= email address for event notifications.
+
+\begin{myitemize}
+    \item {\em type:} string
+    \item {\em default:} None, (notifications at HOSTNAME)
+    \item {\em example:} \lstinline at mail from = no-reply\@your-org@
+\end{myitemize}
+
+\subparagraph[mail retry delays]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ mail retry delays}
+
+Specify an initial delay before running the mail notification command and any
+retry delays in case the command returns a non-zero code. The default behaviour
+is to run the mail notification command once without any delay.
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of ISO 8601 duration/interval representations, optionally {\em preceded} by multipliers.
+    \item {\em default:} (none)
+    \item {\em example:} \lstinline at handler = PT10S, PT1M, PT5M@
+\end{myitemize}
+
+\subparagraph[mail smtp]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ mail smtp}
+
+Specify the SMTP server for sending email notifications.
+
+\begin{myitemize}
+    \item {\em type:} string
+    \item {\em default:} None, (localhost:25)
+    \item {\em example:} \lstinline at mail smtp = smtp.yourorg@
+\end{myitemize}
+
+\subparagraph[mail to]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ mail to}
+
+A list of email addresses to send task event notifications. The list can be
+anything accepted by the \lstinline=mail= command.
+
+\begin{myitemize}
+    \item {\em type:} string
+    \item {\em default:} None, (USER at HOSTNAME)
+    \item {\em example:} \lstinline at mail to = your.colleague@
+\end{myitemize}
+
+\subparagraph[submission timeout]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ submission timeout}
+
+Equivalent to~\ref{runtime-event-hooks-submission-timeout}.
+
+\subparagraph[execution timeout]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ execution timeout}
+
+Equivalent to~\ref{runtime-event-hooks-execution-timeout}.
+
+\subparagraph[register job logs retry delays]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ register job logs retry delays}
+
+Some batch systems have considerable delays between the time when the job
+completes and when it writes the job logs in its normal location. Consequently,
+the suite may be unable to register the existence of some job log files in its
+runtime database until the job log files become available at their expected
+location. If this is the case, you can configure an initial delay and some
+retry delays between subsequent attempts. The default behaviour is to attempt
+once without any delay.
+
+\begin{myitemize}
+    \item {\em type:} Comma-separated list of ISO 8601 duration/interval
+        representations, optionally {\em preceded} by multipliers.
+    \item {\em example:} \lstinline at PT5S, PT1M, PT5M@
+    \item {\em default:} (none)
+\end{myitemize}
+
+\subparagraph[reset timer]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[events]]] $\rightarrow$ reset timer}
+
+Equivalent to~\ref{runtime-event-hooks-reset-timer}.
+
 \paragraph[{[[[}environment{]]]}]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[environment]]]}
 
 The user defined task execution environment. Variables defined here can
@@ -1380,7 +1652,7 @@ inherited environment, others will be filtered out. Variables may also
 be explicitly excluded by an \lstinline=exclude= list.
 
 \begin{myitemize}
-\item {\em type:} string list
+\item {\em type:} Comma-separated list of strings (variable names).
 \item {\em default:} (none)
 \end{myitemize}
 
@@ -1391,7 +1663,7 @@ environment.  Variables may also be implicitly excluded by
 omission from an \lstinline=include= list.
 
 \begin{myitemize}
-\item {\em type:} string list
+\item {\em type:} Comma-separated list of strings (variable names).
 \item {\em default:} (none)
 \end{myitemize}
 
@@ -1400,10 +1672,10 @@ omission from an \lstinline=include= list.
 Batch queue scheduler directives.  Whether or not these are used depends
 on the job submission method. For the built-in methods that support directives
 (\lstinline=loadleveler=, \lstinline=lsf=, \lstinline=pbs=, \lstinline=sge=,
-\lstinline=slurm=), directives are written to the top of the task job script
-in the correct format for the method. Specifying directives individually like
-this allows use of default directives that can be individually overridden at
-lower levels of the runtime namespace hierarchy.
+\lstinline=slurm=, \lstinline=moab=), directives are written to the top of the
+task job script in the correct format for the method. Specifying directives
+individually like this allows use of default directives that can be
+individually overridden at lower levels of the runtime namespace hierarchy.
 
 \subparagraph[\_\_DIRECTIVE\_\_ ]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[directives]]] $\rightarrow$ \_\_DIRECTIVE\_\_}
 
@@ -1464,6 +1736,16 @@ suite-name sub-directory of this location).
     \item {\em default:} as configured by site/user config (for your own suites)
 \end{myitemize}
 
+\subparagraph[template]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[suite state polling]]] $\rightarrow$ template}
+
+Cycle point template of the target suite, if different from that of the polling suite.
+
+\begin{myitemize}
+    \item {\em type:} string
+    \item {\em default:} cycle point format of the polling suite
+    \item {\em example:} \lstinline=\%Y-\%m-\%dT\%H=
+\end{myitemize}
+
 \subparagraph[interval]{[runtime] $\rightarrow$ [[\_\_NAME\_\_]] $\rightarrow$ [[[suite state polling]]] $\rightarrow$ interval}
 
 Polling interval expressed as an ISO 8601 duration/interval.
@@ -1560,7 +1842,7 @@ Interactive GUI controls can then be used to group and ungroup family
 nodes at will.
 
 \begin{myitemize}
-    \item {\em type:} list of family names
+    \item {\em type:} Comma-separated list of family names.
     \item {\em default:} (none)
 \end{myitemize}
 
@@ -1591,7 +1873,7 @@ Set the default attributes (color and style etc.) of graph nodes (tasks and fami
 Attribute pairs must be quoted to hide the internal \lstinline@=@ character.
 
 \begin{myitemize}
-    \item {\em type:} list of quoted \lstinline@'attribute=value'@ pairs
+    \item {\em type:} Comma-separated list of quoted \lstinline@'attribute=value'@ pairs.
     \item {\em legal values:} see graphviz or pygraphviz documentation
     \item {\em default:} \lstinline@'style=filled', 'fillcolor=yellow', 'shape=box'@
 \end{myitemize}
@@ -1602,7 +1884,7 @@ Set the default attributes (color and style etc.) of graph edges
 (dependency arrows).  Attribute pairs must be quoted to hide the
 internal \lstinline@=@ character.
 \begin{myitemize}
-    \item {\em type:} list of quoted \lstinline@'attribute=value'@ pairs
+    \item {\em type:} Comma-separated list of quoted \lstinline@'attribute=value'@ pairs.
     \item {\em legal values:} see graphviz or pygraphviz documentation
     \item {\em default:} \lstinline@'color=black'@
 \end{myitemize}
@@ -1619,7 +1901,7 @@ root, so you can style family and member nodes at once by family name.
 Replace \_\_GROUP\_\_ with each named group of tasks or families.
 
 \begin{myitemize}
-    \item {\em type:} comma separated list of task or family names
+    \item {\em type:} Comma-separated list of task or family names.
     \item {\em default:} (none)
     \item {\em example:}
 \begin{lstlisting}
@@ -1646,7 +1928,7 @@ Replace \_\_NAME\_\_ with each node or node group for style attribute
 assignment.
 
 \begin{myitemize}
-    \item {\em type:} list of quoted \lstinline@'attribute=value'@ pairs
+    \item {\em type:} Comma-separated list of quoted \lstinline@'attribute=value'@ pairs.
     \item {\em legal values:} see graphviz or pygraphviz documentation
     \item {\em default:} (none)
     \item {\em example:} (with reference to the node groups defined above)
diff --git a/examples/admin/suite.rc b/examples/admin/suite.rc
index f761ebb..d79f8fe 100644
--- a/examples/admin/suite.rc
+++ b/examples/admin/suite.rc
@@ -18,7 +18,7 @@ title = "Cylc Admin Test Suite"
     [[special tasks]]
     start-up         = prep
     cold-start       = ColdA, ColdB, ColdC
-    clock-triggered  = X(1)
+    clock-trigger  = X(1)
 
     [[dependencies]]
         [[[0,6,12,18]]]
diff --git a/examples/clock-expire/suite.rc b/examples/clock-expire/suite.rc
new file mode 100644
index 0000000..3ac1f72
--- /dev/null
+++ b/examples/clock-expire/suite.rc
@@ -0,0 +1,20 @@
+title = task expire example suite
+description = """
+Skip a daily post-processing workflow if the 'copy' task has expired."""
+
+[cylc]
+   cycle point format = %Y-%m-%dT%H
+[scheduling]
+    initial cycle point = now
+    final cycle point = +P3D
+    [[special tasks]]
+        clock-expire = copy(-P1DT1H)
+        # NOTE this would normally be copy(P1D) i.e. expire if more than 1 day
+        # behind the wall clock, but here we have to start from 'now' in order
+        # to stay near the wall clock, so expire the task if more than 1 day
+        # behind "now + 1 day". This makes the first two 'copy' tasks expire.
+    [[dependencies]]
+        [[[P1D]]]
+            graph = """
+        model[-P1D] => model => copy => proc
+              copy:expired => !proc"""
diff --git a/examples/demo/ecox/suite.rc b/examples/demo/ecox/suite.rc
index e45107c..bf47079 100644
--- a/examples/demo/ecox/suite.rc
+++ b/examples/demo/ecox/suite.rc
@@ -18,7 +18,7 @@ edit 2014: this pre-dates runtime inheritance and other new features!"""
             mos_nzlam_vp_5day, mos_nzlam_vp_curr, UM_nzlam, nzwave, \
             ext_create_streamflow, globalwave, ncep_get, ncep_run, ncep_xml
 
-        clock-triggered =  ext_get_clidbdata(2.0), ext_create_streamflow(3.0), \
+        clock-trigger =  ext_get_clidbdata(2.0), ext_create_streamflow(3.0), \
             ecan_rain_obs(2.0), ext_check_globalwind_ukmet(4.30), \
             ext_check_globalice_ukmet(4.30), ext_check_frames_ukmet(-2.25), \
             ext_check_bgerr_ukmet(3.15), ext_check_obstore_ukmet(3.15), \
diff --git a/examples/satellite/suite.rc b/examples/satellite/ext-trigger/suite.rc
similarity index 54%
copy from examples/satellite/suite.rc
copy to examples/satellite/ext-trigger/suite.rc
index c0d1ddb..c1cc775 100644
--- a/examples/satellite/suite.rc
+++ b/examples/satellite/ext-trigger/suite.rc
@@ -1,10 +1,16 @@
 #!Jinja2
 
-title = Demonstrates real time satellite data processing
+title = Real time satellite data processing demo, variant 3 of 3
+
 description = """
-Each successive integer cycle retrieves and processes the next
-arbitrarily timed and arbitrarily labelled dataset, in parallel
-with previous cycles if the data comes in quickly."""
+Successive cycle points retrieve and process the next arbitrarily timed and
+labelled dataset, in parallel if the data comes in quickly. This variant of the
+suite has initial get_data tasks with external triggers: they do not submit
+until triggered by an external system."""
+
+# Note that the satellite simulator task here that supplies the external event
+# trigger happens to be a suite task - i.e. it is not really "external" - but
+# this is only a convenience - an easy route to a self-contained example suite.
 
 # you can monitor output processing with:
 # $ watch -n 1 \
@@ -22,6 +28,8 @@ with previous cycles if the data comes in quickly."""
     final cycle point = {{N_DATASETS}}
     max active cycle points = 5
     # runahead limit = P5 # (alternative limiting method)
+    [[special tasks]]
+        external-trigger = get_data("new dataset ready for processing")
     [[dependencies]]
         [[[R1]]] # first cycle
             graph = prep => satsim & get_data
@@ -37,70 +45,68 @@ with previous cycles if the data comes in quickly."""
 [runtime]
     [[prep]]
         title = clean the suite output directories
-        script = \
+        command scripting = \
 rm -rf $CYLC_SUITE_SHARE_DIR $CYLC_SUITE_WORK_DIR
 
     [[satsim]]
         title = simulate a satellite data feed
         description = """Generates {{N_DATASETS}} arbitrarily labelled
-datasets after random durations."""
-        pre-script = mkdir -p {{DATA_IN_DIR}}
-        script = """
+datasets very quickly, to show parallel processing streams."""
+        pre-command scripting = mkdir -p {{DATA_IN_DIR}}
+        command scripting = """
 COUNT=0
 while true; do
-    (( COUNT == {{N_DATASETS}} )) && break
-    sleep $(( 1 + RANDOM % 10 ))
-    touch {{DATA_IN_DIR}}/dataset-$(date +%s).raw
-    (( COUNT += 1 ))
+    ((COUNT == {{N_DATASETS}})) && break
+    # sleep $((RANDOM % 20))
+    # Generate datasets very quickly to test parallel processing.
+    DATA_ID=$(date +%s).$((RANDOM % 100))
+    DATA_FILE=dataset-${DATA_ID}.raw
+    touch {{DATA_IN_DIR}}/$DATA_FILE
+    ((COUNT += 1))
+    # (required to distinguish fast-arriving messages).
+    # Trigger downstream processing in the suite.
+    cylc ext-trigger $CYLC_SUITE_NAME \
+       "new dataset ready for processing" $DATA_ID
 done"""
 
     [[WORKDIR]]
         # Define a common cycle-point-specific work-directory for all
         # processing tasks so that they all work on the same dataset.
         work sub-directory = proc-$CYLC_TASK_CYCLE_POINT
-        pre-script = sleep 10
+        pre-command scripting = "DATASET=dataset-$CYLC_EXT_TRIGGER_ID"
+        post-command scripting = sleep 5
 
     [[get_data]]
         inherit = WORKDIR
-        title = grab one new dataset, waiting if necessary
-        script = """
-while true; do
-    DATASET=$( ls {{DATA_IN_DIR}}/dataset-*.raw 2>/dev/null | head -n 1 )
-    if [[ -z $DATASET ]]; then
-        sleep 1
-        continue
-    fi
-    break
-done
-mv $DATASET $PWD"""
+        title = retrieve next dataset
+        description = just do it - we know it exists already
+        command scripting = mv {{DATA_IN_DIR}}/${DATASET}.raw $PWD
 
     [[proc1]]
         inherit = WORKDIR
         title = convert .raw dataset to .proc1 form
-        script = """
-DATASET=$(ls dataset-*.raw)
-mv $DATASET ${DATASET%raw}proc1"""
+        command scripting = mv ${DATASET}.raw ${DATASET}.proc1
 
     [[proc2]]
         inherit = WORKDIR
         title = convert .proc1 dataset to .proc2 form
-        script = """
-DATASET=$(ls dataset-*.proc1)
-mv $DATASET ${DATASET%proc1}proc2"""
+        command scripting = mv ${DATASET}.proc1 ${DATASET}.proc2
 
     [[products]]
         inherit = WORKDIR
         title = generate products from .proc2 processed dataset
-        pre-script = mkdir -p {{PRODUCT_DIR}}
-        script = """
-DATASET=$( ls dataset-*.proc2 )
-mv $DATASET {{PRODUCT_DIR}}/${DATASET%proc2}prod"""
+        command scripting = """
+mkdir -p {{PRODUCT_DIR}}
+mv ${DATASET}.proc2 {{PRODUCT_DIR}}/${DATASET}.prod"""
 
     [[collate]]
         title = collate all products from the suite run
         # Note you might want to use "cylc suite-state" to check that
         # _all_ product tasks have finished before collating results.
-        script = ls {{PRODUCT_DIR}}
+        command scripting = """
+echo PRODUCTS:
+ls {{PRODUCT_DIR}}
+sleep 20"""
 
 [visualization]
     default node attributes = "style=filled", "shape=box"
diff --git a/examples/satellite/suite.rc b/examples/satellite/task-polling/suite.rc
similarity index 79%
copy from examples/satellite/suite.rc
copy to examples/satellite/task-polling/suite.rc
index c0d1ddb..406ed55 100644
--- a/examples/satellite/suite.rc
+++ b/examples/satellite/task-polling/suite.rc
@@ -1,10 +1,12 @@
 #!Jinja2
 
-title = Demonstrates real time satellite data processing
+title = Real time satellite data processing demo, variant 1 of 3
+
 description = """
-Each successive integer cycle retrieves and processes the next
-arbitrarily timed and arbitrarily labelled dataset, in parallel
-with previous cycles if the data comes in quickly."""
+Successive cycle points retrieve and process the next arbitrarily timed and
+labelled dataset, in parallel if the data comes in quickly. This variant of the
+suite has initial get_data tasks that trigger immediately and run continuously,
+with a manual check-and-wait loop, until they detect new data."""
 
 # you can monitor output processing with:
 # $ watch -n 1 \
@@ -21,7 +23,6 @@ with previous cycles if the data comes in quickly."""
     initial cycle point = 1
     final cycle point = {{N_DATASETS}}
     max active cycle points = 5
-    # runahead limit = P5 # (alternative limiting method)
     [[dependencies]]
         [[[R1]]] # first cycle
             graph = prep => satsim & get_data
@@ -48,10 +49,13 @@ datasets after random durations."""
         script = """
 COUNT=0
 while true; do
-    (( COUNT == {{N_DATASETS}} )) && break
-    sleep $(( 1 + RANDOM % 10 ))
-    touch {{DATA_IN_DIR}}/dataset-$(date +%s).raw
-    (( COUNT += 1 ))
+    ((COUNT == {{N_DATASETS}})) && break
+    DATA_ID=$(date +%s).$((RANDOM % 100))
+    sleep $((RANDOM % 20))
+    DATA_FILE=dataset-${DATA_ID}.raw
+    touch {{DATA_IN_DIR}}/$DATA_FILE
+    cylc task message "$DATA_FILE ready for processing"
+    ((COUNT += 1))
 done"""
 
     [[WORKDIR]]
@@ -63,11 +67,11 @@ done"""
     [[get_data]]
         inherit = WORKDIR
         title = grab one new dataset, waiting if necessary
+        post-script = sleep 5
         script = """
 while true; do
     DATASET=$( ls {{DATA_IN_DIR}}/dataset-*.raw 2>/dev/null | head -n 1 )
     if [[ -z $DATASET ]]; then
-        sleep 1
         continue
     fi
     break
@@ -91,8 +95,8 @@ mv $DATASET ${DATASET%proc1}proc2"""
     [[products]]
         inherit = WORKDIR
         title = generate products from .proc2 processed dataset
-        pre-script = mkdir -p {{PRODUCT_DIR}}
         script = """
+mkdir -p {{PRODUCT_DIR}}
 DATASET=$( ls dataset-*.proc2 )
 mv $DATASET {{PRODUCT_DIR}}/${DATASET%proc2}prod"""
 
@@ -100,7 +104,10 @@ mv $DATASET {{PRODUCT_DIR}}/${DATASET%proc2}prod"""
         title = collate all products from the suite run
         # Note you might want to use "cylc suite-state" to check that
         # _all_ product tasks have finished before collating results.
-        script = ls {{PRODUCT_DIR}}
+        script = """
+echo PRODUCTS:
+ls {{PRODUCT_DIR}}
+sleep 20"""
 
 [visualization]
     default node attributes = "style=filled", "shape=box"
diff --git a/examples/satellite/suite.rc b/examples/satellite/task-retries/suite.rc
similarity index 66%
copy from examples/satellite/suite.rc
copy to examples/satellite/task-retries/suite.rc
index c0d1ddb..0d347b1 100644
--- a/examples/satellite/suite.rc
+++ b/examples/satellite/task-retries/suite.rc
@@ -1,10 +1,13 @@
 #!Jinja2
 
-title = Demonstrates real time satellite data processing
+title = Real time satellite processing demo, variant 2 of 3
+
 description = """
-Each successive integer cycle retrieves and processes the next
-arbitrarily timed and arbitrarily labelled dataset, in parallel
-with previous cycles if the data comes in quickly."""
+Successive cycle points retrieve and process the next arbitrarily timed and
+labelled dataset, in parallel if the data comes in quickly.  This variant of
+the suite has initial get_data tasks that trigger immediately and simply report
+failure if no new data is available, but they are configured to retry
+automatically until they succeed."""
 
 # you can monitor output processing with:
 # $ watch -n 1 \
@@ -21,7 +24,6 @@ with previous cycles if the data comes in quickly."""
     initial cycle point = 1
     final cycle point = {{N_DATASETS}}
     max active cycle points = 5
-    # runahead limit = P5 # (alternative limiting method)
     [[dependencies]]
         [[[R1]]] # first cycle
             graph = prep => satsim & get_data
@@ -37,62 +39,61 @@ with previous cycles if the data comes in quickly."""
 [runtime]
     [[prep]]
         title = clean the suite output directories
-        script = \
+        command scripting = \
 rm -rf $CYLC_SUITE_SHARE_DIR $CYLC_SUITE_WORK_DIR
 
     [[satsim]]
         title = simulate a satellite data feed
         description = """Generates {{N_DATASETS}} arbitrarily labelled
 datasets after random durations."""
-        pre-script = mkdir -p {{DATA_IN_DIR}}
-        script = """
+        pre-command scripting = mkdir -p {{DATA_IN_DIR}}
+        command scripting = """
 COUNT=0
 while true; do
-    (( COUNT == {{N_DATASETS}} )) && break
-    sleep $(( 1 + RANDOM % 10 ))
-    touch {{DATA_IN_DIR}}/dataset-$(date +%s).raw
-    (( COUNT += 1 ))
+    ((COUNT == {{N_DATASETS}})) && break
+    sleep $((RANDOM % 20))
+    DATA_ID=$(date +%s).$((RANDOM % 100))
+    DATA_FILE=dataset-${DATA_ID}.raw
+    touch {{DATA_IN_DIR}}/$DATA_FILE
+    cylc task message "$DATA_FILE ready for processing"
+    ((COUNT += 1))
 done"""
 
     [[WORKDIR]]
         # Define a common cycle-point-specific work-directory for all
         # processing tasks so that they all work on the same dataset.
         work sub-directory = proc-$CYLC_TASK_CYCLE_POINT
-        pre-script = sleep 10
+        post-command scripting = sleep 5
 
     [[get_data]]
         inherit = WORKDIR
-        title = grab one new dataset, waiting if necessary
-        script = """
-while true; do
-    DATASET=$( ls {{DATA_IN_DIR}}/dataset-*.raw 2>/dev/null | head -n 1 )
-    if [[ -z $DATASET ]]; then
-        sleep 1
-        continue
-    fi
-    break
-done
+        title = retreive next dataset
+        description = grab ONE new dataset if available else retry
+        retry delays = 10*PT2S
+        command scripting = """
+DATASET=$( ls {{DATA_IN_DIR}}/dataset-*.raw 2>/dev/null | head -n 1 )
+[[ -z $DATASET ]] && exit 1
 mv $DATASET $PWD"""
 
     [[proc1]]
         inherit = WORKDIR
         title = convert .raw dataset to .proc1 form
-        script = """
+        command scripting = """
 DATASET=$(ls dataset-*.raw)
 mv $DATASET ${DATASET%raw}proc1"""
 
     [[proc2]]
         inherit = WORKDIR
         title = convert .proc1 dataset to .proc2 form
-        script = """
+        command scripting = """
 DATASET=$(ls dataset-*.proc1)
 mv $DATASET ${DATASET%proc1}proc2"""
 
     [[products]]
         inherit = WORKDIR
         title = generate products from .proc2 processed dataset
-        pre-script = mkdir -p {{PRODUCT_DIR}}
-        script = """
+        command scripting = """
+mkdir -p {{PRODUCT_DIR}}
 DATASET=$( ls dataset-*.proc2 )
 mv $DATASET {{PRODUCT_DIR}}/${DATASET%proc2}prod"""
 
@@ -100,7 +101,11 @@ mv $DATASET {{PRODUCT_DIR}}/${DATASET%proc2}prod"""
         title = collate all products from the suite run
         # Note you might want to use "cylc suite-state" to check that
         # _all_ product tasks have finished before collating results.
-        script = ls {{PRODUCT_DIR}}
+        command scripting = """
+echo PRODUCTS:
+ls {{PRODUCT_DIR}}
+sleep 20"""
+
 
 [visualization]
     default node attributes = "style=filled", "shape=box"
diff --git a/lib/cylc/C3MRO.py b/lib/cylc/C3MRO.py
index ed3aa0f..ea3e283 100644
--- a/lib/cylc/C3MRO.py
+++ b/lib/cylc/C3MRO.py
@@ -90,54 +90,64 @@ def print_mro(C):
 print_mro(ex_9.Z)
 """
 
-class C3( object ):
-    def __init__( self, tree={} ):
+
+class C3(object):
+    def __init__(self, tree={}):
         self.tree = tree
 
     def merge(self, seqs):
-        #print '\n\nCPL[%s]=%s' % (seqs[0][0],seqs),
-        res = []; i=0
+        # print '\n\nCPL[%s]=%s' % (seqs[0][0],seqs),
+        res = []
+        i = 0
         while 1:
-          nonemptyseqs=[seq for seq in seqs if seq]
-          if not nonemptyseqs: return res
-          i+=1; #print '\n',i,'round: candidates...',
-          for seq in nonemptyseqs: # find merge candidates among seq heads
-              cand = seq[0]; #print ' ',cand,
-              nothead=[s for s in nonemptyseqs if cand in s[1:]]
-              if nothead: cand=None #reject candidate
-              else: break
-          if not cand: raise Exception( "ERROR: bad runtime namespace inheritance hierarchy.\nSee the cylc documentation on multiple inheritance." )
-          res.append(cand)
-          for seq in nonemptyseqs: # remove cand
-              if seq[0] == cand: del seq[0]
-
-    def mro(self,C):
-        "Compute the precedence list (mro) according to C3"
+            nonemptyseqs = [seq for seq in seqs if seq]
+            if not nonemptyseqs:
+                return res
+            i += 1  # print '\n',i,'round: candidates...',
+            for seq in nonemptyseqs:  # find merge candidates among seq heads
+                cand = seq[0]  # print ' ',cand,
+                nothead = [s for s in nonemptyseqs if cand in s[1:]]
+                if nothead:
+                    cand = None  # reject candidate
+                else:
+                    break
+            if not cand:
+                raise Exception(
+                    "ERROR: bad runtime namespace inheritance hierarchy.\n"
+                    "See the cylc documentation on multiple inheritance.")
+            res.append(cand)
+            for seq in nonemptyseqs:  # remove cand
+                if seq[0] == cand:
+                    del seq[0]
+
+    def mro(self, C):
+        """Compute the precedence list (mro) according to C3"""
         # copy() required here for tree to remain unchanged
-        return self.merge([[C]]+map(self.mro,self.tree[C])+[copy(self.tree[C])])
+        return self.merge(
+            [[C]] + map(self.mro, self.tree[C]) + [copy(self.tree[C])])
 
 if __name__ == "__main__":
     parents = {}
-    parents['root' ] = []
-    parents['a' ] = ['root']
-    parents['b' ] = ['root']
-    parents['foo' ] = ['a','b']
+    parents['root'] = []
+    parents['a'] = ['root']
+    parents['b'] = ['root']
+    parents['foo'] = ['a', 'b']
 
-    print 'foo', C3( parents ).mro( 'foo' )
+    print 'foo', C3(parents).mro('foo')
 
     parents = {}
-    parents['o' ] = []
-    parents['a' ] = ['o']
-    parents['b' ] = ['o']
-    parents['c' ] = ['o']
-    parents['d' ] = ['o']
-    parents['e' ]=  ['o']
-    parents['k1'] = ['a','b','c']
-    parents['k2'] = ['d','b','e']
-    parents['k3'] = ['d','a']
-    parents['z' ] = ['k1','k2','k3']
-
-    print 'z', C3( parents ).mro( 'z' )
+    parents['o'] = []
+    parents['a'] = ['o']
+    parents['b'] = ['o']
+    parents['c'] = ['o']
+    parents['d'] = ['o']
+    parents['e'] = ['o']
+    parents['k1'] = ['a', 'b', 'c']
+    parents['k2'] = ['d', 'b', 'e']
+    parents['k3'] = ['d', 'a']
+    parents['z'] = ['k1', 'k2', 'k3']
+
+    print 'z', C3(parents).mro('z')
 
     # Note we can get Python's result by defining an equivalent class
     # hierarchy (with empty class bodies) and printing foo.__mro__.
diff --git a/lib/cylc/CylcError.py b/lib/cylc/CylcError.py
index aca1212..7fa75fb 100644
--- a/lib/cylc/CylcError.py
+++ b/lib/cylc/CylcError.py
@@ -19,24 +19,26 @@
 """Here lies the unfinished early beginnings of more consistent
 cylc-wide exception handling ..."""
 
-class CylcError( Exception ):
+
+class CylcError(Exception):
     """
     Attributes:
         message - what the problem is.
     """
-    def __init__( self, msg ):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
 
-class SchedulerError( CylcError ):
+class SchedulerError(CylcError):
     pass
 
 
-class TaskStateError( CylcError ):
+class TaskStateError(CylcError):
     pass
 
 
-class TaskNotFoundError( CylcError ):
+class TaskNotFoundError(CylcError):
     pass
diff --git a/lib/cylc/CylcOptionParsers.py b/lib/cylc/CylcOptionParsers.py
index e56b781..54c793b 100644
--- a/lib/cylc/CylcOptionParsers.py
+++ b/lib/cylc/CylcOptionParsers.py
@@ -16,12 +16,13 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, re
-from optparse import OptionParser
-from suite_host import get_hostname
-from owner import user
-from cylc.command_prep import prep_file
+import os
+import re
+from optparse import OptionParser, OptionConflictError
 import cylc.flags
+from cylc.suite_host import get_hostname
+from cylc.owner import user
+from cylc.registration import localdb
 
 """Common options for all cylc commands."""
 
@@ -29,47 +30,51 @@ multitask_usage = """
 To match multiple tasks or families at once, MATCH is interpreted as a
 Python-style regular expression, not a simple shell glob.
 
-To match family rather than task names, use the -m/--family option.
-"""
+To match family rather than task names, use the -m/--family option. This is
+required because MATCH could potentially match task or family names."""
 
-class db_optparse( object ):
-    def __init__( self, dbopt ):
+
+class db_optparse(object):
+    def __init__(self, dbopt):
         # input is DB option spec from the cylc command line
         self.owner = user
         self.location = None
         if dbopt:
-            self.parse( dbopt )
+            self.parse(dbopt)
 
-    def parse( self, dbopt ):
+    def parse(self, dbopt):
         # determine DB location and owner
         if dbopt.startswith('u:'):
             self.owner = dbopt[2:]
-            dbopt = os.path.join( '~' + self.owner, '.cylc', 'DB' )
-        if dbopt.startswith( '~' ):
-            dbopt = os.path.expanduser( dbopt )
+            dbopt = os.path.join('~' + self.owner, '.cylc', 'DB')
+        if dbopt.startswith('~'):
+            dbopt = os.path.expanduser(dbopt)
         else:
-            dbopt = os.path.abspath( dbopt )
+            dbopt = os.path.abspath(dbopt)
         self.location = dbopt
 
-    def get_db_owner( self ):
+    def get_db_owner(self):
         return self.owner
 
-    def get_db_location( self ):
+    def get_db_location(self):
         return self.location
 
-class cop( OptionParser ):
 
-    def __init__( self, usage, argdoc=None, pyro=False, noforce=False,
-            jset=False, multitask=False, prep=False, twosuites=False, auto_add=True ):
+class cop(OptionParser):
+
+    def __init__(self, usage, argdoc=None, pyro=False, noforce=False,
+                 jset=False, multitask=False, prep=False, twosuites=False,
+                 auto_add=True):
 
         self.auto_add = auto_add
-        if argdoc == None:
+        if argdoc is None:
             if not prep:
                 argdoc = [('REG', 'Suite name')]
             else:
                 argdoc = [('SUITE', 'Suite name or path')]
 
-        # noforce=True is for commands that don't use interactive prompts at all
+        # noforce=True is for commands that don't use interactive prompts at
+        # all
 
         usage += """
 
@@ -103,153 +108,260 @@ Arguments:"""
 
             args += arg[0] + " "
 
-            pad = ( maxlen - len(arg[0]) ) * ' ' + '               '
+            pad = (maxlen - len(arg[0])) * ' ' + '               '
             usage += "\n   " + arg[0] + pad + arg[1]
 
-        usage = re.sub( 'ARGS', args, usage )
+        usage = re.sub('ARGS', args, usage)
 
-        OptionParser.__init__( self, usage )
+        OptionParser.__init__(self, usage)
 
-    def add_std_options( self ):
-        self.add_option( "--user",
-                help="Other user account name. This results in "
-                "command reinvocation on the remote account.",
+    def add_std_options(self):
+        """Add standard options if they have not been overridden."""
+        try:
+            self.add_option(
+                "--user",
+                help=(
+                    "Other user account name. This results in "
+                    "command reinvocation on the remote account."
+                ),
                 metavar="USER", default=user,
-                action="store", dest="owner" )
+                action="store", dest="owner")
+        except OptionConflictError:
+            pass
 
-        self.add_option( "--host",
+        try:
+            self.add_option(
+                "--host",
                 help="Other host name. This results in "
                 "command reinvocation on the remote account.",
                 metavar="HOST", action="store", default=get_hostname(),
-                dest="host" )
+                dest="host")
+        except OptionConflictError:
+            pass
 
-        self.add_option( "-v", "--verbose",
+        try:
+            self.add_option(
+                "-v", "--verbose",
                 help="Verbose output mode.",
-                action="store_true", default=False, dest="verbose" )
-
-        self.add_option( "--debug",
-                help="Run suites in non-daemon mode, and show exception tracebacks.",
-                action="store_true", default=False, dest="debug" )
-
-        self.add_option( "--db",
-                help="Alternative suite registration database location, "
-                "defaults to $HOME/.cylc/REGDB.",
-                metavar="PATH", action="store", default=None, dest="db" )
+                action="store_true", default=False, dest="verbose")
+        except OptionConflictError:
+            pass
+
+        try:
+            self.add_option(
+                "--debug",
+                help=(
+                    "Run suites in non-daemon mode, "
+                    "and show exception tracebacks."
+                ),
+                action="store_true", default=False, dest="debug")
+        except OptionConflictError:
+            pass
+
+        try:
+            self.add_option(
+                "--db",
+                help=(
+                    "Alternative suite registration database location, "
+                    "defaults to $HOME/.cylc/REGDB."
+                ),
+                metavar="PATH", action="store", default=None, dest="db")
+        except OptionConflictError:
+            pass
 
         if self.pyro:
-            self.add_option( "--port",
-                help="Suite port number on the suite host. NOTE: this is retrieved "
-                "automatically if passwordless ssh is configured to the suite host.",
-                metavar="INT", action="store", default=None, dest="port" )
+            try:
+                self.add_option(
+                    "--port",
+                    help=(
+                        "Suite port number on the suite host. "
+                        "NOTE: this is retrieved automatically if "
+                        "passwordless ssh is configured to the suite host."
+                    ),
+                    metavar="INT", action="store", default=None, dest="port")
+            except OptionConflictError:
+                pass
 
-            self.add_option( "--use-ssh",
+            try:
+                self.add_option(
+                    "--use-ssh",
                     help="Use ssh to re-invoke the command on the suite host.",
-                    action="store_true", default=False, dest="use_ssh" )
+                    action="store_true", default=False, dest="use_ssh")
+            except OptionConflictError:
+                pass
 
-            self.add_option( "--no-login",
-                    help="Do not use a login shell to run remote ssh commands. "
-                    "The default is to use a login shell.",
-                    action="store_false", default=True, dest="ssh_login" )
+            try:
+                self.add_option(
+                    "--no-login",
+                    help=(
+                        "Do not use a login shell to run remote ssh commands. "
+                        "The default is to use a login shell."
+                    ),
+                    action="store_false", default=True, dest="ssh_login")
+            except OptionConflictError:
+                pass
 
-            self.add_option( "--pyro-timeout", metavar='SEC',
-                    help="Set a timeout for network connections "
-                    "to the running suite. The default is no timeout. "
-                    "For task messaging connections see "
-                    "site/user config file documentation.",
-                    action="store", default=None, dest="pyro_timeout" )
+            try:
+                self.add_option(
+                    "--pyro-timeout", metavar='SEC',
+                    help=(
+                        "Set a timeout for network connections "
+                        "to the running suite. The default is no timeout. "
+                        "For task messaging connections see "
+                        "site/user config file documentation."
+                    ),
+                    action="store", default=None, dest="pyro_timeout")
+            except OptionConflictError:
+                pass
+
+            try:
+                self.add_option(
+                    "--print-uuid",
+                    help=(
+                        "Print the client UUID to stderr. "
+                        "This can be matched "
+                        "to information logged by the receiving suite daemon."
+                    ),
+                    action="store_true", default=False, dest="print_uuid")
+            except OptionConflictError:
+                pass
+
+            try:
+                self.add_option(
+                    "--set-uuid", metavar="UUID",
+                    help=(
+                        "Set the client UUID manually (e.g. from prior use of "
+                        "--print-uuid). This can be used to log multiple "
+                        "commands under the same UUID (but note that only the "
+                        "first [info] command from the same client ID will be "
+                        "logged unless the suite is running in debug mode)."
+                    ),
+                    action="store", default=None, dest="set_uuid")
+            except OptionConflictError:
+                pass
 
             if not self.noforce:
-                self.add_option( "-f", "--force",
-                        help="Do not ask for confirmation before acting. Note that "
-                        "it is not necessary to use this option if interactive command "
-                        "prompts have been disabled in the site/user config files.",
-                        action="store_true", default=False, dest="force" )
+                try:
+                    self.add_option(
+                        "-f", "--force",
+                        help=(
+                            "Do not ask for confirmation before acting. "
+                            "Note that it is not necessary to use this option "
+                            "if interactive command prompts have been "
+                            "disabled in the site/user config files."
+                        ),
+                        action="store_true", default=False, dest="force")
+                except OptionConflictError:
+                    pass
 
         if self.jset:
-            self.add_option( "-s", "--set", metavar="NAME=VALUE",
-                    help="Set the value of a Jinja2 template variable in the suite "
-                    "definition. This option can be used multiple times on the command "
-                    "line.  WARNING: these settings do not persist across suite restarts; "
-                    "they need to be set again on the \"cylc restart\" command line.",
-                    action="append", default=[], dest="templatevars" )
-
-            self.add_option( "--set-file", metavar="FILE",
-                    help="Set the value of Jinja2 template variables in the suite "
-                    "definition from a file containing NAME=VALUE pairs (one per line). "
-                    "WARNING: these settings do not persist across suite restarts; "
-                    "they need to be set again on the \"cylc restart\" command line.",
-                    action="store", default=None, dest="templatevars_file" )
+            try:
+                self.add_option(
+                    "-s", "--set", metavar="NAME=VALUE",
+                    help=(
+                        "Set the value of a Jinja2 template variable in the "
+                        "suite definition. This option can be used multiple "
+                        "times on the command line. "
+                        "WARNING: these settings do not persist across suite "
+                        "restarts; "
+                        "they need to be set again on the \"cylc restart\" "
+                        "command line."
+                    ),
+                    action="append", default=[], dest="templatevars")
+            except OptionConflictError:
+                pass
+
+            try:
+                self.add_option(
+                    "--set-file", metavar="FILE",
+                    help=(
+                        "Set the value of Jinja2 template variables in the "
+                        "suite definition from a file containing NAME=VALUE "
+                        "pairs (one per line). "
+                        "WARNING: these settings do not persist across suite "
+                        "restarts; "
+                        "they need to be set again on the \"cylc restart\" "
+                        "command line."
+                    ),
+                    action="store", default=None, dest="templatevars_file")
+            except OptionConflictError:
+                pass
 
         if self.multitask:
-            self.add_option( "-m", "--family",
+            try:
+                self.add_option(
+                    "-m", "--family",
                     help="Match members of named families rather than tasks.",
-                    action="store_true", default=False, dest="is_family" )
+                    action="store_true", default=False, dest="is_family")
+            except OptionConflictError:
+                pass
 
-    def get_suite( self, index=0 ):
+    def get_suite(self, index=0):
         return self.suite_info[index]
 
-    def _getdef( self, arg, options ):
+    def _getdef(self, arg, options):
         suiterc = arg
-        if os.path.isdir( suiterc ):
+        if os.path.isdir(suiterc):
             # directory
             suite = suiterc
-            suiterc = os.path.join( suiterc, 'suite.rc' )
-        if os.path.isfile( suiterc ):
+            suiterc = os.path.join(suiterc, 'suite.rc')
+        if os.path.isfile(suiterc):
             # suite.rc file
-            suite = os.path.basename( os.path.dirname( suiterc ))
-            suiterc = os.path.abspath( suiterc)
+            suite = os.path.basename(os.path.dirname(suiterc))
+            suiterc = os.path.abspath(suiterc)
             # TODO - return suite def include files to, as below
             watchers = [suiterc]
         else:
             # must be a registered suite name
-            prepper = prep_file( arg, options )
-            suite, suiterc = prepper.execute()
-            # This lists top level suite def include files too:
-            watchers = prepper.get_rcfiles()
+            suite = arg
+            suiterc = localdb(options.db).get_suiterc(suite)
+            watchers = localdb(options.db).get_rcfiles(suite)
         return suite, suiterc, watchers
 
-    def parse_args( self, remove_opts=[] ):
-
+    def parse_args(self, remove_opts=[]):
         if self.auto_add:
+            # Add common options after command-specific options.
             self.add_std_options()
+
         for opt in remove_opts:
             try:
-                self.remove_option( opt )
+                self.remove_option(opt)
             except:
                 pass
 
-        (options, args) = OptionParser.parse_args( self )
+        (options, args) = OptionParser.parse_args(self)
 
         if len(args) < self.n_compulsory_args:
-            self.error( "Wrong number of arguments (too few)" )
+            self.error("Wrong number of arguments (too few)")
 
         elif not self.unlimited_args and \
                 len(args) > self.n_compulsory_args + self.n_optional_args:
-            self.error( "Wrong number of arguments (too many)" )
+            self.error("Wrong number of arguments (too many)")
 
-        foo = db_optparse( options.db )
+        foo = db_optparse(options.db)
         options.db = foo.get_db_location()
         options.db_owner = foo.get_db_owner()
 
         if self.jset:
             if options.templatevars_file:
-                options.templatevars_file = os.path.abspath( os.path.expanduser( options.templatevars_file ))
+                options.templatevars_file = os.path.abspath(os.path.expanduser(
+                    options.templatevars_file))
 
         if self.prep:
             # allow file path or suite name
             try:
-                self.suite_info.append( self._getdef( args[0], options ))
+                self.suite_info.append(self._getdef(args[0], options))
                 if self.twosuites:
-                    self.suite_info.append( self._getdef( args[1], options ))
+                    self.suite_info.append(self._getdef(args[1], options))
             except IndexError:
                 if options.filename:
                     # Empty args list is OK if we supplied a filename
                     pass
                 else:
                     # No filename, so we're expecting an argument
-                    self.error( "Need either a filename or suite name(s)" )
+                    self.error("Need either a filename or suite name(s)")
 
         cylc.flags.verbose = options.verbose
         cylc.flags.debug = options.debug
 
-        return ( options, args )
+        return (options, args)
diff --git a/lib/cylc/LogDiagnosis.py b/lib/cylc/LogDiagnosis.py
index 6d84efe..e6a8b5c 100644
--- a/lib/cylc/LogDiagnosis.py
+++ b/lib/cylc/LogDiagnosis.py
@@ -1,29 +1,32 @@
 #!/usr/bin/env python
 
-import os, sys, re
-import datetime
+import sys
+import re
 from difflib import unified_diff
 
-class LogAnalyserError( Exception ):
-    def __init__( self, msg ):
+
+class LogAnalyserError(Exception):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
-class LogSpec( object ):
+
+class LogSpec(object):
     """Get important information from an existing reference run log
     file, in order to do the same run for a reference test. Currently
     just gets the start and stop cycle points."""
 
-    def __init__( self, log ):
-        h = open( log, 'rb' )
+    def __init__(self, log):
+        h = open(log, 'rb')
         self.lines = h.readlines()
         h.close()
 
-    def get_initial_point_string( self ):
+    def get_initial_point_string(self):
         found = False
         for line in self.lines:
-            m = re.search( 'Initial point: (.*)$',line)
+            m = re.search('Initial point: (.*)$', line)
             if m:
                 found = True
                 point_string = m.groups()[0]
@@ -33,12 +36,12 @@ class LogSpec( object ):
         if found:
             return point_string
         else:
-            raise LogAnalyserError( "ERROR: logged start point not found" )
+            raise LogAnalyserError("ERROR: logged start point not found")
 
-    def get_start_point_string( self ):
+    def get_start_point_string(self):
         found = False
         for line in self.lines:
-            m = re.search( 'Start point: (.*)$',line)
+            m = re.search('Start point: (.*)$', line)
             if m:
                 found = True
                 point_string = m.groups()[0]
@@ -49,10 +52,10 @@ class LogSpec( object ):
             return point_string
         return None
 
-    def get_final_point_string( self ):
+    def get_final_point_string(self):
         found = False
         for line in self.lines:
-            m = re.search( 'Final point: (.*)$',line)
+            m = re.search('Final point: (.*)$', line)
             if m:
                 found = True
                 point_string = m.groups()[0]
@@ -62,44 +65,49 @@ class LogSpec( object ):
         if found:
             return point_string
         else:
-            raise LogAnalyserError( "ERROR: logged stop point not found" )
+            raise LogAnalyserError("ERROR: logged stop point not found")
+
 
-class LogAnalyser( object ):
+class LogAnalyser(object):
     """Compare an existing reference log with the log from a new
     reference test run. Currently just compares triggering info."""
 
-    def __init__( self, new_log, ref_log ):
-        h = open( new_log, 'rb' )
+    def __init__(self, new_log, ref_log):
+        h = open(new_log, 'rb')
         self.new_loglines = h.readlines()
         h.close()
-        h = open( ref_log, 'rb' )
+        h = open(ref_log, 'rb')
         self.ref_loglines = h.readlines()
         h.close()
 
-    def get_triggered( self, lines ):
+    def get_triggered(self, lines):
         res = []
         for line in lines:
-            m = re.search( 'INFO - (\[.* -triggered off .*)$', line )
+            m = re.search('INFO - (\[.* -triggered off .*)$', line)
             if m:
                 res.append(m.groups()[0])
         return res
 
-    def verify_triggering( self ):
-        new = self.get_triggered( self.new_loglines )
-        ref = self.get_triggered( self.ref_loglines )
+    def verify_triggering(self):
+        new = self.get_triggered(self.new_loglines)
+        ref = self.get_triggered(self.ref_loglines)
 
         if len(new) == 0:
-            raise LogAnalyserError( "ERROR: new log contains no triggering info." )
+            raise LogAnalyserError(
+                "ERROR: new log contains no triggering info.")
 
         if len(ref) == 0:
-            raise LogAnalyserError( "ERROR: reference log contains no triggering info." )
+            raise LogAnalyserError(
+                "ERROR: reference log contains no triggering info.")
 
         new.sort()
         ref.sort()
 
         if new != ref:
-            diff = unified_diff( new, ref )
+            diff = unified_diff(new, ref)
             print >> sys.stderr, '\n'.join(diff)
-            raise LogAnalyserError( "ERROR: triggering is NOT consistent with the reference log" )
+            raise LogAnalyserError(
+                "ERROR: triggering is NOT consistent with the reference log")
         else:
-            print "LogAnalyser: triggering is consistent with the reference log"
+            print(
+                "LogAnalyser: triggering is consistent with the reference log")
diff --git a/lib/cylc/RunEventHandler.py b/lib/cylc/RunEventHandler.py
index 6235ee4..eade92e 100644
--- a/lib/cylc/RunEventHandler.py
+++ b/lib/cylc/RunEventHandler.py
@@ -19,7 +19,8 @@
 import subprocess
 import logging
 
-def RunHandler( event, script, suite, taskID=None, msg=None, fg=False ):
+
+def RunHandler(event, script, suite, taskID=None, msg=None, fg=False):
     """This is now only used for suite (not task) event handlers."""
 
     tolog = 'Calling ' + event + ' handler'
@@ -27,7 +28,7 @@ def RunHandler( event, script, suite, taskID=None, msg=None, fg=False ):
         tolog += ' in the foreground'
     print tolog
     logger = logging.getLogger('main')
-    logger.info( tolog )
+    logger.info(tolog)
     command = script + ' ' + event + ' ' + suite
     if taskID:
         command += ' ' + taskID
@@ -35,6 +36,6 @@ def RunHandler( event, script, suite, taskID=None, msg=None, fg=False ):
     if not fg:
         command += ' &'
 
-    res = subprocess.call( command, shell=True )
+    res = subprocess.call(command, shell=True)
     if fg and res != 0:
-        raise Exception( 'ERROR: event handler failed' )
+        raise Exception('ERROR: event handler failed')
diff --git a/lib/cylc/__init__.py b/lib/cylc/__init__.py
index 64f58d2..96ee124 100644
--- a/lib/cylc/__init__.py
+++ b/lib/cylc/__init__.py
@@ -15,13 +15,13 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-# Set up the cylc environment.
+"""Set up the cylc environment."""
 
 import os
 import socket
 import sys
 
+
 def environ_init(argv0=None):
     """Initialise cylc environment."""
 
@@ -37,11 +37,18 @@ def environ_init(argv0=None):
         if cylc_dir != os.getenv('CYLC_DIR', ''):
             os.environ['CYLC_DIR'] = cylc_dir
 
-        dirs = [os.path.join(cylc_dir, 'bin')]
+        cylc_dir_lib = os.path.join(cylc_dir, 'lib')
+        my_lib = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+        if cylc_dir_lib == my_lib:
+            dirs = []
+        else:
+            # For backward compat, old versions of "cylc" may end up loading an
+            # incorrect version of this file.
+            dirs = [os.path.join(cylc_dir, 'bin')]
         if os.getenv('CYLC_SUITE_DEF_PATH', ''):
             dirs.append(os.getenv('CYLC_SUITE_DEF_PATH'))
         environ_path_add(dirs)
-        environ_path_add([os.path.join(cylc_dir, 'lib')], 'PYTHONPATH')
+        environ_path_add([cylc_dir_lib], 'PYTHONPATH')
 
     # Python output buffering delays appearance of stdout and stderr
     # when output is not directed to a terminal (this occurred when
@@ -49,17 +56,25 @@ def environ_init(argv0=None):
     # case in post-5.0 daemon-mode cylc?)
     os.environ['PYTHONUNBUFFERED'] = 'true'
 
+
 def environ_path_add(dirs, key='PATH'):
-    """For each dir in dirs, add dir to the front of the PATH environment
-    variable. If the 2nd argument key is specified, add each dir to the front of
-    the named environment variable instead of PATH.
+    """For each dir_ in dirs, prepend dir_ to the PATH environment variable.
+
+    If key is specified, prepend dir_ to the named environment variable instead
+    of PATH.
+
     """
 
-    paths = os.getenv(key, '').split(os.pathsep)
-    for dir in dirs:
-        while dir in paths:
-            paths.remove(dir)
-        paths.insert(0, dir)
+    paths_str = os.getenv(key, '')
+    # ''.split(os.pathsep) gives ['']
+    if paths_str.strip():
+        paths = paths_str.split(os.pathsep)
+    else:
+        paths = []
+    for dir_ in dirs:
+        while dir_ in paths:
+            paths.remove(dir_)
+        paths.insert(0, dir_)
     os.environ[key] = os.pathsep.join(paths)
 
 
diff --git a/lib/cylc/batch_sys_handlers/at.py b/lib/cylc/batch_sys_handlers/at.py
index 5c7982f..e4706e7 100644
--- a/lib/cylc/batch_sys_handlers/at.py
+++ b/lib/cylc/batch_sys_handlers/at.py
@@ -43,7 +43,8 @@ class AtCommandHandler(object):
     # process group, which allows the job script and its child processes to be
     # killed correctly.
     KILL_CMD_TMPL = "atrm '%(job_id)s'"
-    POLL_CMD_TMPL = "atq"
+    POLL_CMD = "atq"
+    POLL_CMD_TMPL = POLL_CMD
     REC_ERR_FILTERS = [
         re.compile("warning: commands will be executed using /bin/sh")]
     REC_ID_FROM_SUBMIT_ERR = re.compile(r"\Ajob\s(?P<id>\S+)\sat")
diff --git a/lib/cylc/batch_sys_handlers/background.py b/lib/cylc/batch_sys_handlers/background.py
index 804b9ef..b08adad 100644
--- a/lib/cylc/batch_sys_handlers/background.py
+++ b/lib/cylc/batch_sys_handlers/background.py
@@ -17,9 +17,10 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """Background job submission and manipulation."""
 
+import errno
 import os
 import re
-from subprocess import Popen
+from subprocess import Popen, STDOUT
 import sys
 from cylc.batch_sys_manager import BATCH_SYS_MANAGER
 
@@ -27,41 +28,53 @@ from cylc.batch_sys_manager import BATCH_SYS_MANAGER
 class BgCommandHandler(object):
     """Background job submission and manipulation.
 
-    Run a task job as a background process. Uses 'wait' to prevent exit before
-    the job is finished (which would be a problem for remote background jobs at
-    sites that do not allow unattended jobs on login nodes).
+    Run a task job as a nohup background process in its own process group.
 
     """
 
     CAN_KILL_PROC_GROUP = True
-    IS_BG_SUBMIT = True
-    POLL_CMD_TMPL = "ps '%(job_id)s'"
+    POLL_CMD = "ps"
+    POLL_CMD_TMPL = POLL_CMD + " '%(job_id)s'"
     REC_ID_FROM_SUBMIT_OUT = re.compile(r"""\A(?P<id>\d+)\Z""")
 
     @classmethod
     def submit(cls, job_file_path):
         """Submit "job_file_path"."""
-        out_file = open(job_file_path + ".out", "wb")
-        err_file = open(job_file_path + ".err", "wb")
-        proc = Popen(
-            [job_file_path], stdout=out_file, stderr=err_file,
-            preexec_fn=os.setpgrp)
-        # Send PID info back to suite
-        sys.stdout.write("%(pid)d\n%(key)s=%(pid)d\n" % {
-            "key": BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID,
-            "pid": proc.pid,
-        })
-        sys.stdout.flush()
-        # Write PID info to status file
-        job_status_file = open(job_file_path + ".status", "a")
-        job_status_file.write("%s=%d\n" % (
-            BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID, proc.pid))
-        job_status_file.close()
-        # Wait for job
-        proc.communicate()
-        out_file.close()
-        err_file.close()
-        return proc
+        # Check access permission here because we are unable to check the
+        # result of the nohup command.
+        if not os.access(job_file_path, os.R_OK | os.X_OK):
+            exc = OSError(
+                errno.EACCES, os.strerror(errno.EACCES), job_file_path)
+            return (1, None, str(exc))
+        job_file_path_dir = os.path.dirname(job_file_path)
+        if not os.access(job_file_path_dir, os.W_OK):
+            exc = OSError(
+                errno.EACCES, os.strerror(errno.EACCES), job_file_path_dir)
+            return (1, None, str(exc))
+        try:
+            # This is essentially a double fork to ensure that the child
+            # process can detach as a process group leader and not subjected to
+            # SIGHUP from the current process.
+            proc = Popen(
+                [
+                    "nohup",
+                    "bash",
+                    "-c",
+                    r'''exec "$0" <'/dev/null' >"$0.out" 2>"$0.err"''',
+                    job_file_path,
+                ],
+                preexec_fn=os.setpgrp,
+                stdin=open(os.devnull),
+                stdout=open(os.devnull, "wb"),
+                stderr=STDOUT)
+        except OSError as exc:
+            # subprocess.Popen has a bad habit of not setting the
+            # filename of the executable when it raises an OSError.
+            if not exc.filename:
+                exc.filename = command[0]
+            return (1, None, str(exc))
+        else:
+            return (0, "%d\n" % (proc.pid), None)
 
 
 BATCH_SYS_HANDLER = BgCommandHandler()
diff --git a/lib/cylc/batch_sys_handlers/loadleveler.py b/lib/cylc/batch_sys_handlers/loadleveler.py
index 13bfb7f..e461434 100644
--- a/lib/cylc/batch_sys_handlers/loadleveler.py
+++ b/lib/cylc/batch_sys_handlers/loadleveler.py
@@ -26,7 +26,8 @@ class LoadlevelerHandler(object):
 
     DIRECTIVE_PREFIX = "# @ "
     KILL_CMD_TMPL = "llcancel '%(job_id)s'"
-    POLL_CMD_TMPL = "llq -f%%id '%(job_id)s'"
+    POLL_CMD = "llq"
+    POLL_CMD_TMPL = POLL_CMD + " -f%%id '%(job_id)s'"
     REC_ID_FROM_SUBMIT_OUT = re.compile(
         r"""\Allsubmit:\sThe\sjob\s"(?P<id>[^"]+)"\s""")
     REC_ERR_FILTERS = [
@@ -50,7 +51,8 @@ class LoadlevelerHandler(object):
         # executed* (that is determined by the '#!' at the top of the task
         # job script).
         directives["shell"] = "/bin/ksh"
-        directives.update(job_conf["directives"])
+        for key, value in job_conf["directives"].items():
+            directives[key] = value
         lines = []
         for key, value in directives.items():
             if value:
@@ -91,6 +93,21 @@ class LoadlevelerHandler(object):
                 return True
         return False
 
+    @classmethod
+    def filter_poll_many_output(cls, out):
+        """Return a list of job IDs still in the batch system.
+
+        Drop STEPID from the JOBID.STEPID returned by 'llq'.
+        """
+        job_ids = []
+        for line in out.splitlines():
+            try:
+                head = line.split(None, 1)[0]
+            except IndexError:
+                continue
+            job_ids.append(".".join(head.split(".")[:2]))
+        return job_ids
+
     def get_vacation_signal(self, job_conf):
         """Return "USR1" if "restart" directive is "yes"."""
         if job_conf["directives"].get("restart") == "yes":
diff --git a/lib/cylc/batch_sys_handlers/lsf.py b/lib/cylc/batch_sys_handlers/lsf.py
index e3a283d..5b58ca2 100644
--- a/lib/cylc/batch_sys_handlers/lsf.py
+++ b/lib/cylc/batch_sys_handlers/lsf.py
@@ -25,7 +25,8 @@ class LSFHandler(object):
 
     DIRECTIVE_PREFIX = "#BSUB "
     KILL_CMD_TMPL = "bkill '%(job_id)s'"
-    POLL_CMD_TMPL = "bjobs -noheader '%(job_id)s'"
+    POLL_CMD = "bjobs"
+    POLL_CMD_TMPL = POLL_CMD + " -noheader '%(job_id)s'"
     REC_ID_FROM_SUBMIT_OUT = re.compile(r"^Job <(?P<id>\d+)>")
     SUBMIT_CMD_TMPL = "bsub"
     SUBMIT_CMD_STDIN_IS_JOB_FILE = True
@@ -44,7 +45,8 @@ class LSFHandler(object):
         directives['-J'] = job_conf['suite name'] + '.' + job_conf['task id']
         directives['-o'] = job_file_path + ".out"
         directives['-e'] = job_file_path + ".err"
-        directives.update(job_conf['directives'])
+        for key, value in job_conf['directives'].items():
+            directives[key] = value
         lines = []
         for key, value in directives.items():
             if value:
diff --git a/lib/cylc/batch_sys_handlers/pbs.py b/lib/cylc/batch_sys_handlers/moab.py
similarity index 75%
copy from lib/cylc/batch_sys_handlers/pbs.py
copy to lib/cylc/batch_sys_handlers/moab.py
index 8f4e1ef..820cd32 100644
--- a/lib/cylc/batch_sys_handlers/pbs.py
+++ b/lib/cylc/batch_sys_handlers/moab.py
@@ -15,36 +15,35 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"PBS batch system job submission and manipulation."
+"Moab batch system job submission and manipulation."
 
 import re
 
 
-class PBSHandler(object):
+class MoabHandler(object):
 
-    "PBS batch system job submission and manipulation."
+    "Moab batch system job submission and manipulation."
 
     DIRECTIVE_PREFIX = "#PBS "
-    KILL_CMD_TMPL = "qdel '%(job_id)s'"
+    KILL_CMD_TMPL = "mjobctl -c '%(job_id)s'"
     # N.B. The "qstat JOB_ID" command returns 1 if JOB_ID is no longer in the
     # system, so there is no need to filter its output.
-    POLL_CMD_TMPL = "qstat '%(job_id)s'"
+    POLL_CMD = "checkjob"
+    POLL_CMD_TMPL = POLL_CMD + " '%(job_id)s'"
     REC_ID_FROM_SUBMIT_OUT = re.compile(r"""\A\s*(?P<id>\S+)\s*\Z""")
-    SUBMIT_CMD_TMPL = "qsub '%(job)s'"
+    SUBMIT_CMD_TMPL = "msub '%(job)s'"
 
     def format_directives(self, job_conf):
         """Format the job directives for a job file."""
         job_file_path = job_conf["job file path"].replace(r"$HOME/", "")
         directives = job_conf["directives"].__class__()  # an ordereddict
 
-        # Old versions of PBS (< 11) requires jobs names <= 15 characters.
-        # Version 12 appears to truncate the job name to 15 characters if it is
-        # longer.
         directives["-N"] = (
-            job_conf["task id"] + "." + job_conf["suite name"])[0:15]
+            job_conf["task id"] + "." + job_conf["suite name"])
 
         directives["-o"] = job_file_path + ".out"
         directives["-e"] = job_file_path + ".err"
+        # restartable?
         directives.update(job_conf["directives"])
         lines = []
         for key, value in directives.items():
@@ -52,12 +51,12 @@ class PBSHandler(object):
                 # E.g. -l walltime=3:00:00
                 lines.append("%s%s=%s" % (self.DIRECTIVE_PREFIX, key, value))
             elif value:
-                # E.g. -q queue_name 
+                # E.g. -q queue_name
                 lines.append("%s%s %s" % (self.DIRECTIVE_PREFIX, key, value))
             else:
-                # E.g. -V 
+                # E.g. -V
                 lines.append(self.DIRECTIVE_PREFIX + key)
         return lines
 
 
-BATCH_SYS_HANDLER = PBSHandler()
+BATCH_SYS_HANDLER = MoabHandler()
diff --git a/lib/cylc/batch_sys_handlers/pbs.py b/lib/cylc/batch_sys_handlers/pbs.py
index 8f4e1ef..fe77d34 100644
--- a/lib/cylc/batch_sys_handlers/pbs.py
+++ b/lib/cylc/batch_sys_handlers/pbs.py
@@ -28,7 +28,8 @@ class PBSHandler(object):
     KILL_CMD_TMPL = "qdel '%(job_id)s'"
     # N.B. The "qstat JOB_ID" command returns 1 if JOB_ID is no longer in the
     # system, so there is no need to filter its output.
-    POLL_CMD_TMPL = "qstat '%(job_id)s'"
+    POLL_CMD = "qstat"
+    POLL_CMD_TMPL = POLL_CMD + " '%(job_id)s'"
     REC_ID_FROM_SUBMIT_OUT = re.compile(r"""\A\s*(?P<id>\S+)\s*\Z""")
     SUBMIT_CMD_TMPL = "qsub '%(job)s'"
 
@@ -45,17 +46,18 @@ class PBSHandler(object):
 
         directives["-o"] = job_file_path + ".out"
         directives["-e"] = job_file_path + ".err"
-        directives.update(job_conf["directives"])
+        for key, value in job_conf["directives"].items():
+            directives[key] = value
         lines = []
         for key, value in directives.items():
             if value and " " in key:
                 # E.g. -l walltime=3:00:00
                 lines.append("%s%s=%s" % (self.DIRECTIVE_PREFIX, key, value))
             elif value:
-                # E.g. -q queue_name 
+                # E.g. -q queue_name
                 lines.append("%s%s %s" % (self.DIRECTIVE_PREFIX, key, value))
             else:
-                # E.g. -V 
+                # E.g. -V
                 lines.append(self.DIRECTIVE_PREFIX + key)
         return lines
 
diff --git a/lib/cylc/batch_sys_handlers/sge.py b/lib/cylc/batch_sys_handlers/sge.py
index 8aefcef..2d8eb9a 100644
--- a/lib/cylc/batch_sys_handlers/sge.py
+++ b/lib/cylc/batch_sys_handlers/sge.py
@@ -28,7 +28,8 @@ class SGEHandler(object):
     KILL_CMD_TMPL = "qdel '%(job_id)s'"
     # N.B. The "qstat -j JOB_ID" command returns 1 if JOB_ID is no longer in
     # the system, so there is no need to filter its output.
-    POLL_CMD_TMPL = "qstat -j '%(job_id)s'"
+    POLL_CMD = "qstat"
+    POLL_CMD_TMPL = POLL_CMD + " -j '%(job_id)s'"
     REC_ID_FROM_SUBMIT_OUT = re.compile(r"\D+(?P<id>\d+)\D+")
     SUBMIT_CMD_TMPL = "qsub '%(job)s'"
 
@@ -39,7 +40,8 @@ class SGEHandler(object):
         directives['-N'] = job_conf['suite name'] + '.' + job_conf['task id']
         directives['-o'] = job_file_path + ".out"
         directives['-e'] = job_file_path + ".err"
-        directives.update(job_conf['directives'])
+        for key, value in job_conf['directives'].items():
+            directives[key] = value
         lines = []
         for key, value in directives.items():
             if value:
diff --git a/lib/cylc/batch_sys_handlers/slurm.py b/lib/cylc/batch_sys_handlers/slurm.py
index 86d17e5..71ff740 100644
--- a/lib/cylc/batch_sys_handlers/slurm.py
+++ b/lib/cylc/batch_sys_handlers/slurm.py
@@ -18,6 +18,7 @@
 """SLURM job submission and manipulation."""
 
 import re
+import shlex
 
 
 class SLURMHandler(object):
@@ -27,7 +28,8 @@ class SLURMHandler(object):
     KILL_CMD_TMPL = "scancel '%(job_id)s'"
     # N.B. The "squeue -j JOB_ID" command returns 1 if JOB_ID is no longer in
     # the system, so there is no need to filter its output.
-    POLL_CMD_TMPL = "squeue -h -j '%(job_id)s'"
+    POLL_CMD = "squeue -h"
+    POLL_CMD_TMPL = POLL_CMD + " -j '%(job_id)s'"
     REC_ID_FROM_SUBMIT_OUT = re.compile(
         r"\ASubmitted\sbatch\sjob\s(?P<id>\d+)")
     SUBMIT_CMD_TMPL = "sbatch '%(job)s'"
@@ -47,7 +49,8 @@ class SLURMHandler(object):
             job_conf['suite name'] + '.' + job_conf['task id'])
         directives['--output'] = job_file_path + ".out"
         directives['--error'] = job_file_path + ".err"
-        directives.update(job_conf['directives'])
+        for key, value in job_conf['directives'].items():
+            directives[key] = value
         lines = []
         for key, value in directives.items():
             if value:
@@ -73,5 +76,9 @@ class SLURMHandler(object):
         """
         return ["EXIT", "ERR", "XCPU"]
 
+    def get_poll_many_cmd(cls, job_ids):
+        """Return the poll command for a list of job IDs."""
+        return shlex.split(cls.POLL_CMD) + ["-j", ",".join(job_ids)]
+
 
 BATCH_SYS_HANDLER = SLURMHandler()
diff --git a/lib/cylc/batch_sys_manager.py b/lib/cylc/batch_sys_manager.py
index 9534e29..90abdf1 100644
--- a/lib/cylc/batch_sys_manager.py
+++ b/lib/cylc/batch_sys_manager.py
@@ -35,28 +35,37 @@ batch_sys.filter_poll_output(out, job_id) => boolean
       output to see if job_id is still alive in the batch system, and return
       True if so. See also "batch_sys.POLL_CMD_TMPL".
 
+batch_sys.filter_poll_many_output(out) => job_ids
+    * Called after the batch system's poll many command. The method should read
+      the output and return a list of job IDs that are still in the batch
+      system.
+
 batch_sys.filter_submit_output(out, err) => new_out, new_err
     * Filter the standard output and standard error of the job submission
       command. This is useful if the job submission command returns information
       that should just be ignored. See also "batch_sys.SUBMIT_CMD_TMPL" and
       "batch_sys.SUBMIT_CMD_STDIN_TMPL".
 
+batch_sys.format_directives(job_conf) => lines
+    * If relevant, this method formats the job directives for a job file, if
+      job file directives are relevant for the batch system. The argument
+      "job_conf" is a dict containing the job configuration.
+
 batch_sys.get_fail_signals(job_conf) => list of strings
     * Return a list of names of signals to trap for reporting errors. Default
       is ["EXIT", "ERR", "TERM", "XCPU"]. ERR and EXIT are always recommended.
       EXIT is used to report premature stopping of the job script, and its trap
       is unset at the end of the script.
 
+batch_sys.get_poll_many_cmd(job-id-list) => list
+    * Return a list containing the shell command to poll the jobs in the
+      argument list.
+
 batch_sys.get_vacation_signal(job_conf) => str
     * If relevant, return a string containing the name of the signal that
       indicates the job has been vacated by the batch system.
 
-batch_sys.format_directives(job_conf) => lines
-    * If relevant, this method formats the job directives for a job file, if
-      job file directives are relevant for the batch system. The argument
-      "job_conf" is a dict containing the job configuration.
-
-batch_sys.submit(job_file_path) => proc
+batch_sys.submit(job_file_path) => ret_code, out, err
     * Submit a job and return an instance of the Popen object for the
       submission. This method is useful if the job submission requires logic
       beyond just running a system or shell command. See also
@@ -103,15 +112,64 @@ batch_sys.SUBMIT_CMD_STDIN_IS_JOB_FILE
 
 """
 
-from datetime import datetime
 import os
 import shlex
 from signal import SIGKILL
 import stat
-from subprocess import check_call, Popen, PIPE
+from subprocess import call, Popen, PIPE
 import sys
+import traceback
 from cylc.mkdir_p import mkdir_p
 from cylc.task_id import TaskID
+from cylc.task_message import TaskMessage
+from cylc.wallclock import get_current_time_string
+
+
+class JobPollContext(object):
+    """Context object for a job poll.
+
+    0 ctx.job_log_dir -- cycle/task/submit_num
+    1 ctx.batch_sys_name -- batch system name
+    2 ctx.batch_sys_job_id -- job ID in batch system
+    3 ctx.batch_sys_exit_polled -- 0 for false, 1 for true
+    4 ctx.run_status -- 0 for success, 1 for failure
+    5 ctx.run_signal -- signal received on run failure
+    6 ctx.time_submit_exit -- submit (exit) time
+    7 ctx.time_run -- run start time
+    8 ctx.time_run_exit -- run exit time
+
+    """
+
+    def __init__(self, job_log_dir):
+        self.job_log_dir = job_log_dir
+        self.batch_sys_name = None
+        self.batch_sys_job_id = None
+        self.batch_sys_exit_polled = None
+        self.run_status = None
+        self.run_signal = None
+        self.time_submit_exit = None
+        self.time_run = None
+        self.time_run_exit = None
+        self.messages = []
+
+    def get_summary_str(self):
+        """Return the poll context as a summary string delimited by "|"."""
+        items = []
+        for item in [
+                self.job_log_dir,
+                self.batch_sys_name,
+                self.batch_sys_job_id,
+                self.batch_sys_exit_polled,
+                self.run_status,
+                self.run_signal,
+                self.time_submit_exit,
+                self.time_run,
+                self.time_run_exit]:
+            if item is None:
+                items.append("")
+            else:
+                items.append(str(item))
+        return "|".join(items)
 
 
 class BatchSysManager(object):
@@ -121,14 +179,21 @@ class BatchSysManager(object):
 
     """
 
-    CYLC_JOB_SUBMIT_TIME = "CYLC_JOB_SUBMIT_TIME"
     CYLC_BATCH_SYS_NAME = "CYLC_BATCH_SYS_NAME"
     CYLC_BATCH_SYS_JOB_ID = "CYLC_BATCH_SYS_JOB_ID"
-    LINE_PREFIX_CYLC_DIR = "export CYLC_DIR="
+    CYLC_BATCH_SYS_JOB_SUBMIT_TIME = "CYLC_BATCH_SYS_JOB_SUBMIT_TIME"
+    CYLC_BATCH_SYS_EXIT_POLLED = "CYLC_BATCH_SYS_EXIT_POLLED"
+    LINE_PREFIX_CYLC_DIR = "    export CYLC_DIR="
     LINE_PREFIX_BATCH_SYS_NAME = "# Job submit method: "
     LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL = "# Job submit command template: "
+    LINE_PREFIX_EOF = "#EOF: "
+    LINE_PREFIX_JOB_LOG_DIR = "# Job log directory: "
     LINE_UPDATE_CYLC_DIR = (
         "# N.B. CYLC_DIR has been updated on the remote host\n")
+    OUT_PREFIX_COMMAND = "[TASK JOB COMMAND]"
+    OUT_PREFIX_MESSAGE = "[TASK JOB MESSAGE]"
+    OUT_PREFIX_SUMMARY = "[TASK JOB SUMMARY]"
+    OUT_PREFIX_CMD_ERR = "[TASK JOB ERROR]"
     _INSTANCES = {}
 
     @classmethod
@@ -173,14 +238,128 @@ class BatchSysManager(object):
         if hasattr(batch_sys, "get_vacation_signal"):
             return batch_sys.get_vacation_signal(job_conf)
 
-    def is_bg_submit(self, batch_sys_name):
-        """Return True if batch_sys_name behaves like background submit."""
-        return getattr(self.get_inst(batch_sys_name), "IS_BG_SUBMIT", False)
+    def jobs_kill(self, job_log_root, job_log_dirs):
+        """Kill multiple jobs.
+
+        job_log_root -- The log/job/ sub-directory of the suite.
+        job_log_dirs -- A list containing point/name/submit_num for task jobs.
+
+        """
+        # Note: The more efficient way to do this is to group the jobs by their
+        # batch systems, and call the kill command for each batch system once.
+        # However, this will make it more difficult to determine if the kill
+        # command for a particular job is successful or not.
+        if "$" in job_log_root:
+            job_log_root = os.path.expandvars(job_log_root)
+        self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
+        now = get_current_time_string()
+        for job_log_dir in job_log_dirs:
+            ret_code, err = self.job_kill(
+                os.path.join(job_log_root, job_log_dir, "job.status"))
+            sys.stdout.write("%s%s|%s|%d\n" % (
+                self.OUT_PREFIX_SUMMARY, now, job_log_dir, ret_code))
+            # Note: Print STDERR to STDOUT may look a bit strange, but it
+            # requires less logic for the suite to parse the output.
+            if err.strip():
+                for line in err.splitlines(True):
+                    if not line.endswith("\n"):
+                        line += "\n"
+                    sys.stdout.write("%s%s|%s|%s" % (
+                        self.OUT_PREFIX_CMD_ERR, now, job_log_dir, line))
+
+    def jobs_poll(self, job_log_root, job_log_dirs):
+        """Poll multiple jobs.
+
+        job_log_root -- The log/job/ sub-directory of the suite.
+        job_log_dirs -- A list containing point/name/submit_num for task jobs.
+
+        """
+        if "$" in job_log_root:
+            job_log_root = os.path.expandvars(job_log_root)
+        self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
+
+        ctx_list = []  # Contexts for all relevant jobs
+        ctx_list_by_batch_sys = {}  # {batch_sys_name1: [ctx1, ...], ...}
+
+        for job_log_dir in job_log_dirs:
+            ctx = self._jobs_poll_status_files(job_log_root, job_log_dir)
+            if ctx is None:
+                continue
+            ctx_list.append(ctx)
+
+            if not ctx.batch_sys_name or not ctx.batch_sys_job_id:
+                sys.stderr.write(
+                    "%s/job.status: incomplete batch system info\n" % (
+                        ctx.job_log_dir))
+                continue
+
+            # We can trust:
+            # * Jobs previously polled to have exited the batch system.
+            # * Jobs succeeded or failed with ERR/EXIT.
+            if (ctx.batch_sys_exit_polled or ctx.run_status == 0 or
+                    ctx.run_signal in ["ERR", "EXIT"]):
+                continue
+
+            if ctx.batch_sys_name not in ctx_list_by_batch_sys:
+                ctx_list_by_batch_sys[ctx.batch_sys_name] = []
+            ctx_list_by_batch_sys[ctx.batch_sys_name].append(ctx)
+
+        for batch_sys_name, my_ctx_list in ctx_list_by_batch_sys.items():
+            self._jobs_poll_batch_sys(
+                job_log_root, batch_sys_name, my_ctx_list)
+
+        cur_time_str = get_current_time_string()
+        for ctx in ctx_list:
+            for message in ctx.messages:
+                sys.stdout.write("%s%s|%s|%s\n" % (
+                    self.OUT_PREFIX_MESSAGE,
+                    cur_time_str,
+                    ctx.job_log_dir,
+                    message))
+            sys.stdout.write("%s%s|%s\n" % (
+                self.OUT_PREFIX_SUMMARY,
+                cur_time_str,
+                ctx.get_summary_str()))
+
+    def jobs_submit(self, job_log_root, job_log_dirs, remote_mode=False):
+        """Submit multiple jobs.
+
+        job_log_root -- The log/job/ sub-directory of the suite.
+        job_log_dirs -- A list containing point/name/submit_num for task jobs.
+
+        """
+        if "$" in job_log_root:
+            job_log_root = os.path.expandvars(job_log_root)
+        self.configure_suite_run_dir(job_log_root.rsplit(os.sep, 2)[0])
+
+        if remote_mode:
+            items = self._jobs_submit_prep_by_stdin(job_log_root, job_log_dirs)
+        else:
+            items = self._jobs_submit_prep_by_args(job_log_root, job_log_dirs)
+        now = get_current_time_string()
+        for job_log_dir, batch_sys_name, batch_submit_cmd_tmpl in items:
+            job_file_path = os.path.join(job_log_root, job_log_dir, "job")
+            if not batch_sys_name:
+                sys.stdout.write("%s%s|%s|1|\n" % (
+                    self.OUT_PREFIX_SUMMARY, now, job_log_dir))
+                continue
+            ret_code, out, err, job_id = self._job_submit_impl(
+                job_file_path, batch_sys_name, batch_submit_cmd_tmpl)
+            sys.stdout.write("%s%s|%s|%d|%s\n" % (
+                self.OUT_PREFIX_SUMMARY, now, job_log_dir, ret_code, job_id))
+            for key, value in [("STDERR", err), ("STDOUT", out)]:
+                if value is None or not value.strip():
+                    continue
+                for line in value.splitlines(True):
+                    if not value.endswith("\n"):
+                        value += "\n"
+                    sys.stdout.write("%s%s|%s|[%s] %s" % (
+                        self.OUT_PREFIX_COMMAND, now, job_log_dir, key, line))
 
     def job_kill(self, st_file_path):
         """Ask batch system to terminate the job specified in "st_file_path".
 
-        Return zero on success, non-zero on failure.
+        Return 0 on success, non-zero integer on failure.
 
         """
         # SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job.status
@@ -191,14 +370,19 @@ class BatchSysManager(object):
                 batch_sys = self.get_inst(line.strip().split("=", 1)[1])
                 break
         else:
-            return 1
+            return (1, "Cannot determine batch system from 'job.status' file")
         st_file.seek(0, 0)  # rewind
         if getattr(batch_sys, "CAN_KILL_PROC_GROUP", False):
             for line in st_file:
                 if line.startswith("CYLC_JOB_PID="):
                     pid = line.strip().split("=", 1)[1]
-                    os.killpg(int(pid), SIGKILL)
-                    return 0
+                    try:
+                        os.killpg(int(pid), SIGKILL)
+                    except OSError as exc:
+                        traceback.print_exc()
+                        return (1, str(exc))
+                    else:
+                        return (0, "")
         st_file.seek(0, 0)  # rewind
         if hasattr(batch_sys, "KILL_CMD_TMPL"):
             for line in st_file:
@@ -208,14 +392,17 @@ class BatchSysManager(object):
                 command = shlex.split(
                     batch_sys.KILL_CMD_TMPL % {"job_id": job_id})
                 try:
-                    check_call(command)
+                    proc = Popen(command, stderr=PIPE)
                 except OSError as exc:
                     # subprocess.Popen has a bad habit of not setting the
                     # filename of the executable when it raises an OSError.
                     if not exc.filename:
                         exc.filename = command[0]
-                    raise
-        return 1
+                    traceback.print_exc()
+                    return (1, str(exc))
+                else:
+                    return (proc.wait(), proc.communicate()[1])
+        return (1, "Cannot determine batch job ID from 'job.status' file")
 
     def job_poll(self, st_file_path):
         """Poll status of the job specified in the "st_file_path".
@@ -297,6 +484,8 @@ class BatchSysManager(object):
 
         """
         # SUITE_RUN_DIR/log/job/CYCLE/TASK/SUBMIT/job
+        if "$" in job_file_path:
+            job_file_path = os.path.expandvars(job_file_path)
         self.configure_suite_run_dir(job_file_path.rsplit(os.sep, 6)[0])
 
         batch_sys_name = None
@@ -313,8 +502,155 @@ class BatchSysManager(object):
                     batch_submit_cmd_tmpl = line.replace(
                         self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
 
+        return self._job_submit_impl(
+            job_file_path, batch_sys_name, batch_submit_cmd_tmpl)
+
+    @classmethod
+    def _create_nn(cls, job_file_path):
+        """Create NN symbolic link, if necessary.
+
+        Helper for "self.submit".
+
+        """
+        job_file_dir = os.path.dirname(job_file_path)
+        source = os.path.basename(job_file_dir)
+        nn_path = os.path.join(os.path.dirname(job_file_dir), "NN")
+        try:
+            old_source = os.readlink(nn_path)
+        except OSError:
+            old_source = None
+        if old_source is not None and old_source != source:
+            os.unlink(nn_path)
+            old_source = None
+        if old_source is None:
+            os.symlink(source, nn_path)
+
+    def _filter_submit_output(self, st_file_path, batch_sys, out, err):
+        """Filter submit command output, if relevant."""
+        job_id = None
+        if hasattr(batch_sys, "REC_ID_FROM_SUBMIT_ERR"):
+            text = err
+            rec_id = batch_sys.REC_ID_FROM_SUBMIT_ERR
+        elif hasattr(batch_sys, "REC_ID_FROM_SUBMIT_OUT"):
+            text = out
+            rec_id = batch_sys.REC_ID_FROM_SUBMIT_OUT
+        if rec_id:
+            for line in str(text).splitlines():
+                match = rec_id.match(line)
+                if match:
+                    job_id = match.group("id")
+                    job_status_file = open(st_file_path, "a")
+                    job_status_file.write("%s=%s\n" % (
+                        self.CYLC_BATCH_SYS_JOB_ID, job_id))
+                    job_status_file.write("%s=%s\n" % (
+                        self.CYLC_BATCH_SYS_JOB_SUBMIT_TIME,
+                        get_current_time_string()))
+                    job_status_file.close()
+                    break
+        if hasattr(batch_sys, "filter_submit_output"):
+            out, err = batch_sys.filter_submit_output(out, err)
+        return out, err, job_id
+
+    def _jobs_poll_status_files(self, job_log_root, job_log_dir):
+        """Helper 1 for self.jobs_poll(job_log_root, job_log_dirs)."""
+        ctx = JobPollContext(job_log_dir)
+        try:
+            handle = open(os.path.join(
+                job_log_root, ctx.job_log_dir, "job.status"))
+        except IOError as exc:
+            sys.stderr.write(str(exc) + "\n")
+            return
+        for line in handle:
+            if "=" not in line:
+                continue
+            key, value = line.strip().split("=", 1)
+            if key == self.CYLC_BATCH_SYS_NAME:
+                ctx.batch_sys_name = value
+            elif key == self.CYLC_BATCH_SYS_JOB_ID:
+                ctx.batch_sys_job_id = value
+            elif key == self.CYLC_BATCH_SYS_EXIT_POLLED:
+                ctx.batch_sys_exit_polled = 1
+            elif key == self.CYLC_BATCH_SYS_JOB_SUBMIT_TIME:
+                ctx.time_submit_exit = value
+            elif key == TaskMessage.CYLC_JOB_INIT_TIME:
+                ctx.time_run = value
+            elif key == TaskMessage.CYLC_JOB_EXIT_TIME:
+                ctx.time_run_exit = value
+            elif key == TaskMessage.CYLC_JOB_EXIT:
+                if value == TaskMessage.SUCCEEDED.upper():
+                    ctx.run_status = 0
+                else:
+                    ctx.run_status = 1
+                    ctx.run_signal = value
+            elif key == TaskMessage.CYLC_MESSAGE:
+                ctx.messages.append(value)
+        handle.close()
+
+        return ctx
+
+    def _jobs_poll_batch_sys(self, job_log_root, batch_sys_name, my_ctx_list):
+        """Helper 2 for self.jobs_poll(job_log_root, job_log_dirs)."""
+        batch_sys = self.get_inst(batch_sys_name)
+        all_job_ids = [ctx.batch_sys_job_id for ctx in my_ctx_list]
+        if hasattr(batch_sys, "get_poll_many_cmd"):
+            # Some poll commands may not be as simple
+            cmd = batch_sys.get_poll_many_cmd(all_job_ids)
+        else:  # if hasattr(batch_sys, "POLL_CMD"):
+            # Simple poll command that takes a list of job IDs
+            cmd = [batch_sys.POLL_CMD] + all_job_ids
+        try:
+            proc = Popen(cmd, stderr=PIPE, stdout=PIPE)
+        except OSError as exc:
+            # subprocess.Popen has a bad habit of not setting the
+            # filename of the executable when it raises an OSError.
+            if not exc.filename:
+                exc.filename = cmd[0]
+            sys.stderr.write(str(exc) + "\n")
+            return
+        proc.wait()
+        out, err = proc.communicate()
+        sys.stderr.write(err)
+        if hasattr(batch_sys, "filter_poll_many_output"):
+            # Allow custom filter
+            job_ids = batch_sys.filter_poll_many_output(out)
+        else:
+            # Just about all poll commands return a table, with column 1
+            # being the job ID. The logic here should be sufficient to
+            # ensure that any table header is ignored.
+            job_ids = []
+            for line in out.splitlines():
+                try:
+                    head = line.split(None, 1)[0]
+                except IndexError:
+                    continue
+                if head in all_job_ids:
+                    job_ids.append(head)
+        for ctx in my_ctx_list:
+            ctx.batch_sys_exit_polled = int(
+                ctx.batch_sys_job_id not in job_ids)
+            # Add information to "job.status"
+            if ctx.batch_sys_exit_polled:
+                try:
+                    handle = open(os.path.join(
+                        job_log_root, ctx.job_log_dir, "job.status"), "a")
+                    handle.write("%s=%s\n" % (
+                        self.CYLC_BATCH_SYS_EXIT_POLLED,
+                        get_current_time_string()))
+                    handle.close()
+                except IOError as exc:
+                    sys.stderr.write(str(exc) + "\n")
+
+    def _job_submit_impl(
+            self, job_file_path, batch_sys_name, batch_submit_cmd_tmpl):
+        """Helper for self.jobs_submit() and self.job_submit()."""
+
         # Create NN symbolic link, if necessary
         self._create_nn(job_file_path)
+        for name in "job.err", "job.out":
+            try:
+                os.unlink(os.path.join(job_file_path, name))
+            except OSError:
+                pass
 
         # Start new status file
         job_status_file = open(job_file_path + ".status", "w")
@@ -332,30 +668,33 @@ class BatchSysManager(object):
             proc_stdin_value = batch_sys.SUBMIT_CMD_STDIN_TMPL % {
                 "job": job_file_path}
             proc_stdin_arg = PIPE
-        if batch_submit_cmd_tmpl:
-            # No need to catch OSError when using shell. It is unlikely that we
-            # do not have a shell, and still manage to get as far as here.
-            batch_sys_cmd = batch_submit_cmd_tmpl % {"job": job_file_path}
-            proc = Popen(
-                batch_sys_cmd,
-                stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE, shell=True)
-        elif hasattr(batch_sys, "submit"):
+        if hasattr(batch_sys, "submit"):
             # batch_sys.submit should handle OSError, if relevant.
-            proc = batch_sys.submit(job_file_path)
+            ret_code, out, err = batch_sys.submit(job_file_path)
         else:
-            command = shlex.split(
-                batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path})
-            try:
+            if batch_submit_cmd_tmpl:
+                # No need to catch OSError when using shell. It is unlikely
+                # that we do not have a shell, and still manage to get as far
+                # as here.
+                batch_sys_cmd = batch_submit_cmd_tmpl % {"job": job_file_path}
                 proc = Popen(
-                    command, stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE)
-            except OSError as exc:
-                # subprocess.Popen has a bad habit of not setting the filename
-                # of the executable when it raises an OSError.
-                if not exc.filename:
-                    exc.filename = command[0]
-                raise
-        out, err = proc.communicate(proc_stdin_value)
-        ret_code = proc.wait()
+                    batch_sys_cmd,
+                    stdin=proc_stdin_arg, stdout=PIPE, stderr=PIPE, shell=True)
+            else:
+                command = shlex.split(
+                    batch_sys.SUBMIT_CMD_TMPL % {"job": job_file_path})
+                try:
+                    proc = Popen(
+                        command, stdin=proc_stdin_arg,
+                        stdout=PIPE, stderr=PIPE)
+                except OSError as exc:
+                    # subprocess.Popen has a bad habit of not setting the
+                    # filename of the executable when it raises an OSError.
+                    if not exc.filename:
+                        exc.filename = command[0]
+                    return 1, "", str(exc), ""
+            out, err = proc.communicate(proc_stdin_value)
+            ret_code = proc.wait()
 
         # Filter submit command output, if relevant
         # Get job ID, if possible
@@ -370,46 +709,107 @@ class BatchSysManager(object):
 
         return ret_code, out, err, job_id
 
-    @classmethod
-    def _create_nn(cls, job_file_path):
-        """Create NN symbolic link, if necessary.
+    def _jobs_submit_prep_by_args(self, job_log_root, job_log_dirs):
+        """Prepare job files for submit by reading files in arguments.
 
-        Helper for "self.submit".
+        Job files are specified in the arguments in local mode. Extract job
+        submission methods and job submission command templates from each job
+        file.
+
+        Return a list, where each element contains something like:
+        (job_log_dir, batch_sys_name, batch_submit_cmd_tmpl)
 
         """
-        job_file_dir = os.path.dirname(job_file_path)
-        nn_path = os.path.join(os.path.dirname(job_file_dir), "NN")
-        try:
-            os.unlink(nn_path)
-        except OSError:
-            pass
-        os.symlink(os.path.basename(job_file_dir), nn_path)
+        items = []
+        for job_log_dir in job_log_dirs:
+            job_file_path = os.path.join(job_log_root, job_log_dir, "job")
+            batch_sys_name = None
+            batch_submit_cmd_tmpl = None
+            for line in open(job_file_path):
+                if line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
+                    batch_sys_name = line.replace(
+                        self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
+                elif line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
+                    batch_submit_cmd_tmpl = line.replace(
+                        self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
+            items.append((job_log_dir, batch_sys_name, batch_submit_cmd_tmpl))
+        return items
 
-    def _filter_submit_output(self, st_file_path, batch_sys, out, err):
-        """Filter submit command output, if relevant."""
-        job_id = None
-        if hasattr(batch_sys, "REC_ID_FROM_SUBMIT_ERR"):
-            text = err
-            rec_id = batch_sys.REC_ID_FROM_SUBMIT_ERR
-        elif hasattr(batch_sys, "REC_ID_FROM_SUBMIT_OUT"):
-            text = out
-            rec_id = batch_sys.REC_ID_FROM_SUBMIT_OUT
-        if rec_id:
-            for line in str(text).splitlines():
-                match = rec_id.match(line)
-                if match:
-                    job_id = match.group("id")
-                    job_status_file = open(st_file_path, "a")
-                    job_status_file.write("%s=%s\n" % (
-                        self.CYLC_BATCH_SYS_JOB_ID, job_id))
-                    job_status_file.write("%s=%s\n" % (
-                        self.CYLC_JOB_SUBMIT_TIME,
-                        datetime.utcnow().strftime("%FT%H:%M:%SZ")))
-                    job_status_file.close()
-                    break
-        if hasattr(batch_sys, "filter_submit_output"):
-            out, err = batch_sys.filter_submit_output(out, err)
-        return out, err, job_id
+    def _jobs_submit_prep_by_stdin(self, job_log_root, job_log_dirs):
+        """Prepare job files for submit by reading from STDIN.
+
+        Job files are uploaded via STDIN in remote mode. Modify job
+        files' CYLC_DIR for this host. Extract job submission methods
+        and job submission command templates from each job file.
+
+        Return a list, where each element contains something like:
+        (job_log_dir, batch_sys_name, batch_submit_cmd_tmpl)
+
+        """
+        items = [[job_log_dir, None, None] for job_log_dir in job_log_dirs]
+        items_map = {}
+        for item in items:
+            items_map[item[0]] = item
+        handle = None
+        batch_sys_name = None
+        batch_submit_cmd_tmpl = None
+        job_log_dir = None
+        lines = []
+        # Get job files from STDIN.
+        # Modify CYLC_DIR in job file, if necessary.
+        # Get batch system name and batch submit command template from each job
+        # file.
+        # Write job file in correct location.
+        while True:  # Note: "for cur_line in sys.stdin:" may hang
+            cur_line = sys.stdin.readline()
+            if not cur_line:
+                if handle is not None:
+                    handle.close()
+                break
+
+            if cur_line.startswith(self.LINE_PREFIX_CYLC_DIR):
+                old_line = cur_line
+                cur_line = "%s'%s'\n" % (
+                    self.LINE_PREFIX_CYLC_DIR, os.environ["CYLC_DIR"])
+                if old_line != cur_line:
+                    lines.append(self.LINE_UPDATE_CYLC_DIR)
+            elif cur_line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
+                batch_sys_name = cur_line.replace(
+                    self.LINE_PREFIX_BATCH_SYS_NAME, "").strip()
+            elif cur_line.startswith(self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL):
+                batch_submit_cmd_tmpl = cur_line.replace(
+                    self.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL, "").strip()
+            elif cur_line.startswith(self.LINE_PREFIX_JOB_LOG_DIR):
+                job_log_dir = cur_line.replace(
+                    self.LINE_PREFIX_JOB_LOG_DIR, "").strip()
+                mkdir_p(os.path.join(job_log_root, job_log_dir))
+                handle = open(
+                    os.path.join(job_log_root, job_log_dir, "job.tmp"), "wb")
+
+            if handle is None:
+                lines.append(cur_line)
+            else:
+                for line in lines + [cur_line]:
+                    handle.write(line)
+                lines = []
+                if cur_line.startswith(self.LINE_PREFIX_EOF + job_log_dir):
+                    handle.close()
+                    # Make it executable
+                    os.chmod(handle.name, (
+                        os.stat(handle.name).st_mode |
+                        stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
+                    # Rename from "*/job.tmp" to "*/job"
+                    os.rename(handle.name, handle.name[:-4])
+                    try:
+                        items_map[job_log_dir][1] = batch_sys_name
+                        items_map[job_log_dir][2] = batch_submit_cmd_tmpl
+                    except KeyError:
+                        pass
+                    handle = None
+                    job_log_dir = None
+                    batch_sys_name = None
+                    batch_submit_cmd_tmpl = None
+        return items
 
     def _job_submit_prepare_remote(self, job_file_path):
         """Prepare a remote job file.
@@ -430,12 +830,10 @@ class BatchSysManager(object):
             if not line:
                 sys.stdin.close()
                 break
-            if line.strip().startswith(self.LINE_PREFIX_CYLC_DIR):
+            if line.startswith(self.LINE_PREFIX_CYLC_DIR):
                 old_line = line
-                line = (
-                    line[0:line.find(self.LINE_PREFIX_CYLC_DIR)] +
-                    self.LINE_PREFIX_CYLC_DIR + "'%s'\n" %
-                    os.environ["CYLC_DIR"])
+                line = "%s'%s'\n" % (
+                    self.LINE_PREFIX_CYLC_DIR, os.environ["CYLC_DIR"])
                 if old_line != line:
                     job_file.write(self.LINE_UPDATE_CYLC_DIR)
             elif line.startswith(self.LINE_PREFIX_BATCH_SYS_NAME):
diff --git a/lib/cylc/batchproc.py b/lib/cylc/batchproc.py
deleted file mode 100644
index 62d2c50..0000000
--- a/lib/cylc/batchproc.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import sys, re
-import subprocess
-import flags
-
-# Instead of p.wait() below, We could use p.poll() which returns None
-# until process p finishes, after which it returns p's exit status; this
-# would allow print out of exactly when each process finishes. Same
-# result in the end though, in terms of whole batch wait.
-
-class batchproc:
-    """ Batch process items that return a subprocess-style command list
-        [command, arg1, arg2, ...] via an execute() method. Items are
-        added until a batch fills up, then the whole batch is processed
-        in parallel and we wait on the whole batch to complete before
-        beginning the next batch.
-        Users should do a final call to process() to handle any final
-        items in an incomplete batch."""
-
-    def __init__( self, size=1, shell=False ):
-        self.batchno = 0
-        self.items = []
-        self.size = int(size)
-        self.shell = shell
-        print
-        print "  Initializing parallel batch processing, batch size", size
-        print
-
-    def add_or_process( self, item ):
-        n_actioned = 0
-        self.items.append( item )
-        if len( self.items ) >= self.size:
-            n_actioned = self.process()
-            self.items = []
-        return n_actioned
-
-    def process( self ):
-        if len( self.items ) == 0:
-            return 0
-        self.batchno += 1
-        if flags.verbose:
-            print "  Batch No.", self.batchno
-        proc = []
-        count = 0
-        n_succeeded = 0
-        for item in self.items:
-            # SPAWN BATCH MEMBER PROCESSES IN PARALLEL
-            proc.append( subprocess.Popen( item.execute(), shell=self.shell, \
-                    stdout=subprocess.PIPE, stderr=subprocess.PIPE ))
-        for p in proc:
-            # WAIT FOR ALL PROCESSES TO FINISH
-            count += 1
-            p.wait()   # blocks until p finishes
-            stdout, stderr = p.communicate()
-            error_reported = False
-            if stderr != '':
-                error_reported = True
-                print '  ERROR reported in Batch', self.batchno, 'member', count
-            if stdout != '':
-                if flags.verbose or error_reported:
-                    print '    Batch', self.batchno, 'member', count, 'stdout:'
-                for line in re.split( r'\n', stdout ):
-                    if flags.verbose or error_reported:
-                        print '   ', line
-                    if re.search( 'SUCCEEDED', line ):
-                        n_succeeded += 1
-            if error_reported:
-                print '    Batch', self.batchno, 'member', count, 'stderr:'
-                for line in re.split( r'\n', stderr ):
-                    print '   ', line
-
-        return n_succeeded
-
-#========= test code follows: ========>
-
-class item:
-    def __init__( self, i ):
-        self.i = str(i)
-    def execute( self ):
-        return 'echo hello from ' + self.i + '... && sleep 5 && echo ... bye from ' + self.i
-
-if __name__ == "__main__":
-
-    usage = "USAGE: " + sys.argv[0] + " <batch-size>"
-    if len( sys.argv ) != 2:
-        print usage
-        sys.exit(1)
-
-    batchsize = sys.argv[1]
-
-    b = batchproc( batchsize, shell=True )
-    for i in range(10):
-        b.add_or_process( item(i) )
-    # process any leftovers
-    b.process()
diff --git a/lib/cylc/broadcast_report.py b/lib/cylc/broadcast_report.py
index a11e637..0c272dc 100644
--- a/lib/cylc/broadcast_report.py
+++ b/lib/cylc/broadcast_report.py
@@ -20,8 +20,9 @@
 
 BAD_OPTIONS_FMT = "\n  --%s=%s"
 BAD_OPTIONS_TITLE = "ERROR: No broadcast to cancel/clear for these options:"
-BAD_OPTIONS_TITLE_SET = "ERROR: Invalid broadcast set options:"
-CHANGE_FMT = "\n%(prefix)s [%(namespace)s.%(point_string)s] %(setting)s"
+BAD_OPTIONS_TITLE_SET = ("ERROR: Rejected broadcast: settings are not" +
+                         " compatible with the suite")
+CHANGE_FMT = "\n%(change)s [%(namespace)s.%(point)s] %(key)s=%(value)s"
 CHANGE_PREFIX_CANCEL = "-"
 CHANGE_PREFIX_SET = "+"
 CHANGE_TITLE_CANCEL = "Broadcast cancelled:"
@@ -53,26 +54,48 @@ def get_broadcast_bad_options_report(bad_options, is_set=False):
     return msg
 
 
-def get_broadcast_change_report(modified_settings, is_cancel=False):
-    """Return a string for reporting modification to broadcast settings."""
+def get_broadcast_change_iter(modified_settings, is_cancel=False):
+    """Return an iterator of broadcast changes.
+
+    Each broadcast change is a dict with keys:
+    change, point, namespace, key, value
+
+    """
     if not modified_settings:
-        return ""
+        return
     if is_cancel:
-        prefix = CHANGE_PREFIX_CANCEL
-        msg = CHANGE_TITLE_CANCEL
+        change = CHANGE_PREFIX_CANCEL
     else:
-        prefix = CHANGE_PREFIX_SET
-        msg = CHANGE_TITLE_SET
+        change = CHANGE_PREFIX_SET
     for modified_setting in sorted(modified_settings):
-        data = {"prefix": prefix}
-        data["point_string"], data["namespace"], setting = modified_setting
-        data["setting"] = ""
+        point, namespace, setting = modified_setting
         value = setting
+        keys_str = ""
         while isinstance(value, dict):
             key, value = value.items()[0]
             if isinstance(value, dict):
-                data["setting"] += "[" + key + "]"
+                keys_str += "[" + key + "]"
             else:
-                data["setting"] += key + "=" + str(value)
-        msg += CHANGE_FMT % data
+                keys_str += key
+                yield {
+                    "change": change,
+                    "point": point,
+                    "namespace": namespace,
+                    "key": keys_str,
+                    "value": str(value)}
+
+
+def get_broadcast_change_report(modified_settings, is_cancel=False):
+    """Return a string for reporting modification to broadcast settings."""
+    if not modified_settings:
+        return ""
+    if is_cancel:
+        change = CHANGE_PREFIX_CANCEL
+        msg = CHANGE_TITLE_CANCEL
+    else:
+        change = CHANGE_PREFIX_SET
+        msg = CHANGE_TITLE_SET
+    for broadcast_change in get_broadcast_change_iter(
+            modified_settings, is_cancel):
+        msg += CHANGE_FMT % broadcast_change
     return msg
diff --git a/lib/cylc/broker.py b/lib/cylc/broker.py
index 835f413..f48e79e 100644
--- a/lib/cylc/broker.py
+++ b/lib/cylc/broker.py
@@ -24,34 +24,35 @@ import logging
 # suite, and initialised from the outputs of all the tasks.
 # "Satisfied" => the output has been completed.
 
+
 class broker(object):
     # A broker aggregates output messages from many objects.
     # Each task registers its outputs with the suite broker, then each
     # task tries to get its prerequisites satisfied by the broker's
     # outputs.
 
-    def __init__( self ):
-         self.log = logging.getLogger( 'main' )
-         self.all_outputs = {}   # all_outputs[ message ] = taskid
+    def __init__(self):
+        self.log = logging.getLogger('main')
+        self.all_outputs = {}   # all_outputs[ message ] = taskid
 
-    def register( self, tasks ):
+    def register(self, tasks):
 
         for task in tasks:
-            self.all_outputs.update( task.outputs.completed )
+            self.all_outputs.update(task.outputs.completed)
             # TODO - SHOULD WE CHECK FOR SYSTEM-WIDE DUPLICATE OUTPUTS?
             # (note that successive tasks of the same type can register
             # identical outputs if they write staggered restart files).
 
-    def reset( self ):
+    def reset(self):
         # throw away all messages
         self.all_outputs = {}
 
-    def dump( self ):
+    def dump(self):
         # for debugging
         print "BROKER DUMP:"
         for msg in self.all_outputs:
             print " + " + self.all_outputs[msg], msg
 
-    def negotiate( self, task ):
+    def negotiate(self, task):
         # can my outputs satisfy any of task's prerequisites
-        task.satisfy_me( self.all_outputs )
+        task.satisfy_me(self.all_outputs)
diff --git a/lib/cylc/cfgspec/gcylc.py b/lib/cylc/cfgspec/gcylc.py
index 8e7d370..f43a7b7 100644
--- a/lib/cylc/cfgspec/gcylc.py
+++ b/lib/cylc/cfgspec/gcylc.py
@@ -16,9 +16,12 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys, gtk
+import os
+import sys
+import gtk
 from copy import deepcopy, copy
 
+from parsec import ParsecError
 from parsec.config import config, ItemNotFoundError, itemstr
 from parsec.validate import validator as vdr
 from parsec.upgrade import upgrader
@@ -28,46 +31,56 @@ from cylc.task_state import task_state
 
 "gcylc config file format."
 
-SITE_FILE = os.path.join( os.environ['CYLC_DIR'], 'conf', 'gcylcrc', 'themes.rc' )
-USER_FILE = os.path.join( os.environ['HOME'], '.cylc', 'gcylc.rc' )
+SITE_FILE = os.path.join(
+    os.environ['CYLC_DIR'], 'conf', 'gcylcrc', 'themes.rc')
+USER_FILE = os.path.join(os.environ['HOME'], '.cylc', 'gcylc.rc')
 
 SPEC = {
-    'initial views'   : vdr( vtype='string_list', default=["text"] ),
-    'ungrouped views' : vdr( vtype='string_list', default=[] ),
-    'use theme'       : vdr( vtype='string', default="default" ),
-    'dot icon size'   : vdr( vtype='string', default="medium",
-                             options=["small","medium","large", "extra large"]),
-    'sort by definition order' : vdr( vtype='boolean', default=True ), 
-    'task filter highlight color' : vdr(vtype='string', default='PowderBlue'),
-    'themes' : {
-        '__MANY__' : {
-            'inherit'       : vdr( vtype='string', default="default" ),
-            'defaults'      : vdr( vtype='string_list' ),
-            'waiting'       : vdr( vtype='string_list' ),
-            'held'          : vdr( vtype='string_list' ),
-            'queued'        : vdr( vtype='string_list' ),
-            'ready'         : vdr( vtype='string_list' ),
-            'submitted'     : vdr( vtype='string_list' ),
-            'submit-failed' : vdr( vtype='string_list' ),
-            'running'       : vdr( vtype='string_list' ),
-            'succeeded'     : vdr( vtype='string_list' ),
-            'failed'        : vdr( vtype='string_list' ),
-            'retrying'      : vdr( vtype='string_list' ),
-            'submit-retrying' : vdr( vtype='string_list' ),
-            'runahead' : vdr( vtype='string_list' ),
-            },
+    'initial views': vdr(vtype='string_list', default=["text"]),
+    'ungrouped views': vdr(vtype='string_list', default=[]),
+    'use theme': vdr(vtype='string', default="default"),
+    'dot icon size': vdr(
+        vtype='string',
+        default="medium",
+        options=["small", "medium", "large", "extra large"]),
+    'sort by definition order': vdr(vtype='boolean', default=True),
+    'task filter highlight color': vdr(vtype='string', default='PowderBlue'),
+    'initial side-by-side views': vdr(vtype='boolean', default=False),
+    'themes': {
+        '__MANY__': {
+            'inherit': vdr(vtype='string', default="default"),
+            'defaults': vdr(vtype='string_list'),
+            'waiting': vdr(vtype='string_list'),
+            'held': vdr(vtype='string_list'),
+            'queued': vdr(vtype='string_list'),
+            'ready': vdr(vtype='string_list'),
+            'expired': vdr(vtype='string_list'),
+            'submitted': vdr(vtype='string_list'),
+            'submit-failed': vdr(vtype='string_list'),
+            'running': vdr(vtype='string_list'),
+            'succeeded': vdr(vtype='string_list'),
+            'failed': vdr(vtype='string_list'),
+            'retrying': vdr(vtype='string_list'),
+            'submit-retrying': vdr(vtype='string_list'),
+            'runahead': vdr(vtype='string_list'),
         },
-    }
+    },
+}
 
-def upg( cfg, descr ):
-    u = upgrader(cfg, descr )
-    u.deprecate( '5.4.3', ['themes','__MANY__', 'submitting'], ['themes','__MANY__', 'ready'] )
+
+def upg(cfg, descr):
+    u = upgrader(cfg, descr)
+    u.deprecate(
+        '5.4.3',
+        ['themes', '__MANY__', 'submitting'],
+        ['themes', '__MANY__', 'ready'])
     u.upgrade()
 
-class gconfig( config ):
+
+class gconfig(config):
     """gcylc user configuration - default view panels, task themes etc."""
 
-    def transform( self ):
+    def transform(self):
         """
         1) theme inheritance
         2) turn state attribute lists into dicts for easier access:
@@ -87,7 +100,9 @@ class gconfig( config ):
 
         # and check it is valid
         if self.use_theme not in cfg['themes']:
-            print >> sys.stderr, "WARNING: theme " + self.use_theme + " not found, using '" + self.default_theme + "'"
+            print >> sys.stderr, (
+                "WARNING: theme " + self.use_theme + " not found, using '" +
+                self.default_theme + "'")
             cfg['use theme'] = 'default'
             self.use_theme = self.default_theme
 
@@ -103,7 +118,9 @@ class gconfig( config ):
                 if cfg['themes'][name]['inherit']:
                     parent = cfg['themes'][name]['inherit']
                     if parent not in cfg['themes']:
-                        print >> sys.stderr, "WARNING: undefined parent '" + parent + "' (theme '"+ label + "')"
+                        print >> sys.stderr, (
+                            "WARNING: undefined parent '" + parent +
+                            "' (theme '" + label + "')")
                         parent = "default"
                 else:
                     break
@@ -115,86 +132,94 @@ class gconfig( config ):
                 if item in inherited:
                     prev = item
                     continue
-                #print 'Inherit:', item, '<--', prev
-                self.inherit( theme, cfg['themes'][item] )
-                inherited.append( item )
+                # print 'Inherit:', item, '<--', prev
+                self.inherit(theme, cfg['themes'][item])
+                inherited.append(item)
             cfg['themes'][label] = theme
 
         # expand theme data
         cfg_themes = {}
         for theme in cfg['themes']:
-            for key,val in cfg['themes'][self.default_theme].items():
+            for key, val in cfg['themes'][self.default_theme].items():
                 if not cfg['themes'][theme][key]:
                     cfg['themes'][theme][key] = val
 
             cfg_themes[theme] = {}
-            defs = self.parse_state( theme, 'defaults', cfg['themes'][theme]['defaults'] )
+            defs = self.parse_state(
+                theme, 'defaults', cfg['themes'][theme]['defaults'])
 
             for item, val in cfg['themes'][theme].items():
-                if item in [ 'inherit', 'defaults' ]:
+                if item in ['inherit', 'defaults']:
                     continue
                 state = item
                 if state not in task_state.legal:
-                    print >> sys.stderr, "WARNING, ignoring illegal task state '" + state + "' in theme", theme
+                    print >> sys.stderr, (
+                        "WARNING, ignoring illegal task state '" + state +
+                        "' in theme", theme)
                 # reverse inherit (override)
                 tcfg = deepcopy(defs)
-                self.inherit( tcfg, self.parse_state(theme, item, val))
+                self.inherit(tcfg, self.parse_state(theme, item, val))
                 cfg_themes[theme][state] = tcfg
 
         # final themes result:
         cfg['themes'] = cfg_themes
 
-    def check( self ):
+    def check(self):
         # check initial view config
-        cfg = self.get( sparse=True )
+        cfg = self.get(sparse=True)
         if 'initial views' not in cfg:
             return
         views = copy(cfg['initial views'])
         for view in views:
-            if view not in ['dot', 'text', 'graph' ]:
-                print >> sys.stderr, "WARNING: ignoring illegal view name '" + view + "'"
-                cfg['initial views'].remove( view )
+            if view not in ['dot', 'text', 'graph']:
+                print >> sys.stderr, (
+                    "WARNING: ignoring illegal view name '" + view + "'")
+                cfg['initial views'].remove(view)
         views = cfg['initial views']
-        if len( views ) == 0:
+        if len(views) == 0:
             # at least one view required
-            print >> sys.stderr, "WARNING: no initial views defined, defaulting to 'text'"
+            print >> sys.stderr, (
+                "WARNING: no initial views defined, defaulting to 'text'")
             cfg['initial views'] = ['text']
 
-    def parse_state( self, theme, name, cfglist=[] ):
+    def parse_state(self, theme, name, cfglist=[]):
         allowed_keys = ['style', 'color', 'fontcolor']
         cfg = {}
         for item in cfglist:
             key, val = item.split('=')
             if key not in allowed_keys:
-                raise SystemExit( 'ERROR, gcylc.rc, illegal: ' + theme + ': '+ name + ' = ' + cfglist )
+                raise SystemExit('ERROR, gcylc.rc, illegal: ' + theme + ': ' +
+                                 name + ' = ' + cfglist)
             if key == 'color' or key == 'fontcolor':
                 try:
-                    gtk.gdk.color_parse( val )
+                    gtk.gdk.color_parse(val)
                 except ValueError, x:
                     print >> sys.stderr, 'ERROR', x
-                    sys.exit( 'ERROR, gcylc.rc, illegal color: ' + theme + ': ' + name + '="' + item + '"')
+                    sys.exit('ERROR, gcylc.rc, illegal color: ' + theme +
+                             ': ' + name + '="' + item + '"')
             cfg[key] = val
         return cfg
 
-    def inherit( self, target, source ):
+    def inherit(self, target, source):
         for item in source:
-            if isinstance( source[item], dict ):
+            if isinstance(source[item], dict):
                 if item not in target:
                     target[item] = {}
-                self.inherit( target[item], source[item] )
+                self.inherit(target[item], source[item])
             else:
                 target[item] = source[item]
 
-    def dump( self, keys=[], sparse=False, pnative=False, prefix='', none_str='' ):
+    def dump(self, keys=[], sparse=False, pnative=False, prefix='',
+             none_str=''):
         # override parse.config.dump() to restore the list-nature of
         # theme state items
-        cfg = deepcopy( self.get( [], sparse ))
+        cfg = deepcopy(self.get([], sparse))
         try:
             for theme in cfg['themes'].values():
                 for state in theme.keys():
                     clist = []
                     for attr, val in theme[state].items():
-                        clist.append( attr + '=' + val )
+                        clist.append(attr + '=' + val)
                     theme[state] = clist
         except:
             pass
@@ -204,21 +229,33 @@ class gconfig( config ):
             try:
                 cfg = cfg[key]
             except KeyError, x:
-                raise ItemNotFoundError( itemstr(parents,key) )
+                raise ItemNotFoundError(itemstr(parents, key))
             else:
                 parents.append(key)
 
         if pnative:
             print cfg
         else:
-            printcfg( cfg, prefix=prefix, level=len(keys) )
+            printcfg(cfg, prefix=prefix, level=len(keys))
 
 # load on import if not already loaded
 gcfg = None
 if not gcfg:
-    gcfg = gconfig( SPEC, upg )
-    gcfg.loadcfg( SITE_FILE, "site config" )
-    gcfg.loadcfg( USER_FILE, "user config" )
+    gcfg = gconfig(SPEC, upg)
+    try:
+        gcfg.loadcfg(SITE_FILE, "site config")
+    except ParsecError as exc:
+        sys.stderr.write(
+            "WARNING: ignoring bad site GUI config %s:\n"
+            "%s\n" % (SITE_FILE, str(exc)))
+
+    if os.access(USER_FILE, os.F_OK | os.R_OK):
+        try:
+            gcfg.loadcfg(USER_FILE, "user config")
+        except ParsecError as exc:
+            sys.stderr.write("ERROR: bad user GUI config %s:\n" % USER_FILE)
+            raise
+
     # check and correct initial view config etc.
     gcfg.check()
     # add spec defaults and do theme inheritance
diff --git a/lib/cylc/cfgspec/globalcfg.py b/lib/cylc/cfgspec/globalcfg.py
index 1880011..3499d86 100644
--- a/lib/cylc/cfgspec/globalcfg.py
+++ b/lib/cylc/cfgspec/globalcfg.py
@@ -16,7 +16,9 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys, re
+import os
+import sys
+import re
 import atexit
 import shutil
 from tempfile import mkdtemp
@@ -26,6 +28,7 @@ from parsec.validate import (
     coercers, _strip_and_unquote, _strip_and_unquote_list, _expand_list,
     IllegalValueError
 )
+from parsec import ParsecError
 from parsec.util import itemstr
 from parsec.upgrade import upgrader, converter
 from parsec.fileparse import parse
@@ -34,167 +37,298 @@ from cylc.owner import user
 from cylc.envvar import expandvars
 from cylc.mkdir_p import mkdir_p
 import cylc.flags
-from cylc.cfgspec.suite import coerce_interval
-from cylc.cfgspec.suite import coerce_interval_list
+from cylc.cfgspec.utils import coerce_interval
+from cylc.cfgspec.utils import coerce_interval_list
+from cylc.network import PRIVILEGE_LEVELS
 
 
 "Cylc site and user configuration file spec."
 
-coercers['interval_seconds'] = (
-    lambda *args: coerce_interval(*args, check_syntax_version=False))
-coercers['interval_minutes_list'] = (
-    lambda *args: coerce_interval_list(*args, back_comp_unit_factor=60,
-                                       check_syntax_version=False))
+coercers['interval_seconds'] = lambda *args: coerce_interval(
+    *args, check_syntax_version=False)
+coercers['interval_minutes'] = lambda *args: coerce_interval(
+    *args, back_comp_unit_factor=60, check_syntax_version=False)
+coercers['interval_minutes_list'] = lambda *args: coerce_interval_list(
+    *args, back_comp_unit_factor=60, check_syntax_version=False)
+
 
 SPEC = {
-    'process pool size'                   : vdr( vtype='integer', default=None ),
-    'temporary directory'                 : vdr( vtype='string' ),
-    'state dump rolling archive length'   : vdr( vtype='integer', vmin=1, default=10 ),
-    'disable interactive command prompts' : vdr( vtype='boolean', default=True ),
-    'enable run directory housekeeping'   : vdr( vtype='boolean', default=False ),
-    'run directory rolling archive length': vdr( vtype='integer', vmin=0, default=2 ),
-    'submission polling intervals'        : vdr( vtype='interval_minutes_list', default=[]),
-    'execution polling intervals'         : vdr( vtype='interval_minutes_list', default=[]),
-
-    'task host select command timeout'    : vdr( vtype='interval_seconds', default=10),
-    'task messaging' : {
-        'retry interval'                  : vdr( vtype='interval_seconds', default=5),
-        'maximum number of tries'         : vdr( vtype='integer', vmin=1, default=7 ),
-        'connection timeout'              : vdr( vtype='interval_seconds', default=30),
+    'process pool size': vdr(vtype='integer', default=4),
+    'temporary directory': vdr(vtype='string'),
+    'state dump rolling archive length': vdr(
+        vtype='integer', vmin=1, default=10),
+    'disable interactive command prompts': vdr(vtype='boolean', default=True),
+    'enable run directory housekeeping': vdr(vtype='boolean', default=False),
+    'run directory rolling archive length': vdr(
+        vtype='integer', vmin=0, default=2),
+    'submission polling intervals': vdr(
+        vtype='interval_minutes_list', default=[]),
+    'execution polling intervals': vdr(
+        vtype='interval_minutes_list', default=[]),
+
+    'task host select command timeout': vdr(
+        vtype='interval_seconds', default=10),
+    'task messaging': {
+        'retry interval': vdr(vtype='interval_seconds', default=5),
+        'maximum number of tries': vdr(vtype='integer', vmin=1, default=7),
+        'connection timeout': vdr(vtype='interval_seconds', default=30),
 
-        },
+    },
 
-    'suite logging' : {
-        'roll over at start-up'           : vdr( vtype='boolean', default=True ),
-        'rolling archive length'          : vdr( vtype='integer', vmin=1, default=5 ),
-        'maximum size in bytes'           : vdr( vtype='integer', vmin=1000, default=1000000 ),
+    'cylc': {
+        'UTC mode': vdr(vtype='boolean', default=False),
+        'event hooks': {
+            'startup handler': vdr(vtype='string_list', default=[]),
+            'timeout handler': vdr(vtype='string_list', default=[]),
+            'shutdown handler': vdr(vtype='string_list', default=[]),
+            'timeout': vdr(vtype='interval_minutes'),
+            'abort on timeout': vdr(vtype='boolean', default=False),
         },
+    },
 
-    'documentation' : {
-        'files' : {
-            'html index'                  : vdr( vtype='string', default="$CYLC_DIR/doc/index.html" ),
-            'pdf user guide'              : vdr( vtype='string', default="$CYLC_DIR/doc/pdf/cug-pdf.pdf" ),
-            'multi-page html user guide'  : vdr( vtype='string', default="$CYLC_DIR/doc/html/multi/cug-html.html" ),
-            'single-page html user guide' : vdr( vtype='string', default="$CYLC_DIR/doc/html/single/cug-html.html" ),
-            },
-        'urls' : {
-            'internet homepage'           : vdr( vtype='string', default="http://cylc.github.com/cylc/" ),
-            'local index'                 : vdr( vtype='string', default=None ),
-            },
-        },
+    'suite logging': {
+        'roll over at start-up': vdr(vtype='boolean', default=True),
+        'rolling archive length': vdr(vtype='integer', vmin=1, default=5),
+        'maximum size in bytes': vdr(
+            vtype='integer', vmin=1000, default=1000000),
+    },
 
-    'document viewers' : {
-        'pdf'                             : vdr( vtype='string', default="evince" ),
-        'html'                            : vdr( vtype='string', default="firefox" ),
+    'documentation': {
+        'files': {
+            'html index': vdr(
+                vtype='string', default="$CYLC_DIR/doc/index.html"),
+            'pdf user guide': vdr(
+                vtype='string', default="$CYLC_DIR/doc/pdf/cug-pdf.pdf"),
+            'multi-page html user guide': vdr(
+                vtype='string',
+                default="$CYLC_DIR/doc/html/multi/cug-html.html"),
+            'single-page html user guide': vdr(
+                vtype='string',
+                default="$CYLC_DIR/doc/html/single/cug-html.html"),
         },
-    'editors' : {
-        'terminal'                        : vdr( vtype='string', default="vim" ),
-        'gui'                             : vdr( vtype='string', default="gvim -f" ),
+        'urls': {
+            'internet homepage': vdr(
+                vtype='string', default="http://cylc.github.com/cylc/"),
+            'local index': vdr(vtype='string', default=None),
         },
+    },
 
-    'pyro' : {
-        'base port'                       : vdr( vtype='integer', default=7766 ),
-        'maximum number of ports'         : vdr( vtype='integer', default=100 ),
-        'ports directory'                 : vdr( vtype='string', default="$HOME/.cylc/ports/" ),
-        },
+    'document viewers': {
+        'pdf': vdr(vtype='string', default="evince"),
+        'html': vdr(vtype='string', default="firefox"),
+    },
+    'editors': {
+        'terminal': vdr(vtype='string', default="vim"),
+        'gui': vdr(vtype='string', default="gvim -f"),
+    },
+
+    'pyro': {
+        'base port': vdr(vtype='integer', default=7766),
+        'maximum number of ports': vdr(vtype='integer', default=100),
+        'ports directory': vdr(vtype='string', default="$HOME/.cylc/ports/"),
+    },
+
+    'monitor': {
+        'sort order': vdr(vtype='string',
+                          options=["alphanumeric", "definition"],
+                          default="definition"),
+    },
 
-    'hosts' : {
-        'localhost' : {
-            'run directory'               : vdr( vtype='string', default="$HOME/cylc-run" ),
-            'work directory'              : vdr( vtype='string', default="$HOME/cylc-run" ),
-            'task communication method'   : vdr( vtype='string', options=[ "pyro", "ssh", "poll"], default="pyro" ),
-            'remote copy template'        : vdr( vtype='string', default='scp -oBatchMode=yes -oConnectTimeout=10' ),
-            'remote shell template'       : vdr( vtype='string', default='ssh -oBatchMode=yes -oConnectTimeout=10' ),
-            'use login shell'             : vdr( vtype='boolean', default=True ),
-            'cylc executable'             : vdr( vtype='string', default='cylc'  ),
-            'global init-script'          : vdr( vtype='string', default='' ),
-            'copyable environment variables': vdr(vtype='string_list', default=[]),
+    'hosts': {
+        'localhost': {
+            'run directory': vdr(vtype='string', default="$HOME/cylc-run"),
+            'work directory': vdr(vtype='string', default="$HOME/cylc-run"),
+            'task communication method': vdr(
+                vtype='string',
+                options=["pyro", "ssh", "poll"], default="pyro"),
+            'remote copy template': vdr(
+                vtype='string',
+                default='scp -oBatchMode=yes -oConnectTimeout=10'),
+            'remote shell template': vdr(
+                vtype='string',
+                default='ssh -oBatchMode=yes -oConnectTimeout=10'),
+            'use login shell': vdr(vtype='boolean', default=True),
+            'cylc executable': vdr(vtype='string', default='cylc'),
+            'global init-script': vdr(vtype='string', default=''),
+            'copyable environment variables': vdr(
+                vtype='string_list', default=[]),
+            'retrieve job logs': vdr(vtype='boolean', default=False),
+            'retrieve job logs command': vdr(
+                vtype='string', default='rsync -a'),
+            'retrieve job logs max size': vdr(vtype='string'),
+            'retrieve job logs retry delays': vdr(
+                vtype='interval_minutes_list', default=[]),
+            'task event handler retry delays': vdr(
+                vtype='interval_minutes_list', default=[]),
+            'local tail command template': vdr(
+                vtype='string', default="tail -n +1 -F %(filename)s"),
+            'remote tail command template': vdr(
+                vtype='string',
+                default=(
+                    "tail --pid=`ps h -o ppid $$" +
+                    " | sed -e s/[[:space:]]//g` -n +1 -F %(filename)s")),
+            # Template for tail commands on remote files.  On signal to "ssh"
+            # client, a signal is sent to "sshd" on server.  However, "sshd"
+            # cannot send a signal to the "tail" command, because it is not a
+            # terminal. Apparently, we can use "ssh -t" or "ssh -tt", but that
+            # just causes the command to hang here for some reason. The easiest
+            # solution is to use the "--pid=PID" option of the "tail" command,
+            # so it dies as soon as PID dies. Note: if remote login shell is
+            # bash/ksh, we can use $PPID instead of `ps...` command, but we
+            # have to support login shell "tcsh" too.
+            'batch systems': {
+                '__MANY__': {
+                    'err tailer': vdr(vtype='string'),
+                    'out tailer': vdr(vtype='string'),
+                    'err viewer': vdr(vtype='string'),
+                    'out viewer': vdr(vtype='string'),
+                },
             },
-        '__MANY__' : {
-            'run directory'               : vdr( vtype='string'  ),
-            'work directory'              : vdr( vtype='string'  ),
-            'task communication method'   : vdr( vtype='string', options=[ "pyro", "ssh", "poll"] ),
-            'remote copy template'        : vdr( vtype='string'  ),
-            'remote shell template'       : vdr( vtype='string'  ),
-            'use login shell'             : vdr( vtype='boolean' ),
-            'cylc executable'             : vdr( vtype='string'  ),
-            'global init-script'          : vdr( vtype='string'  ),
-            'copyable environment variables': vdr(vtype='string_list', default=[]),
+        },
+        '__MANY__': {
+            'run directory': vdr(vtype='string'),
+            'work directory': vdr(vtype='string'),
+            'task communication method': vdr(
+                vtype='string', options=["pyro", "ssh", "poll"]),
+            'remote copy template': vdr(vtype='string'),
+            'remote shell template': vdr(vtype='string'),
+            'use login shell': vdr(vtype='boolean'),
+            'cylc executable': vdr(vtype='string'),
+            'global init-script': vdr(vtype='string'),
+            'copyable environment variables': vdr(
+                vtype='string_list', default=[]),
+            'retrieve job logs': vdr(vtype='boolean'),
+            'retrieve job logs command': vdr(vtype='string'),
+            'retrieve job logs max size': vdr(vtype='string'),
+            'retrieve job logs retry delays': vdr(
+                vtype='interval_minutes_list'),
+            'task event handler retry delays': vdr(
+                vtype='interval_minutes_list'),
+            'local tail command template': vdr(vtype='string'),
+            'remote tail command template': vdr(vtype='string'),
+            'batch systems': {
+                '__MANY__': {
+                    'err tailer': vdr(vtype='string'),
+                    'out tailer': vdr(vtype='string'),
+                    'out viewer': vdr(vtype='string'),
+                    'err viewer': vdr(vtype='string'),
+                },
             },
         },
+    },
+
+    'task events': {
+        'execution timeout': vdr(vtype='interval_minutes'),
+        'handlers': vdr(vtype='string_list', default=[]),
+        'handler events': vdr(vtype='string_list', default=[]),
+        'handler retry delays': vdr(vtype='interval_minutes_list'),
+        'mail events': vdr(vtype='string_list', default=[]),
+        'mail from': vdr(vtype='string'),
+        'mail retry delays': vdr(vtype='interval_minutes_list', default=[]),
+        'mail smtp': vdr(vtype='string'),
+        'mail to': vdr(vtype='string'),
+        'register job logs retry delays': vdr(
+            vtype='interval_minutes_list', default=[]),
+        'reset timer': vdr(vtype='boolean', default=False),
+        'submission timeout': vdr(vtype='interval_minutes'),
+    },
 
     'test battery': {
         'remote host with shared fs': vdr(vtype='string'),
         'remote host': vdr(vtype='string'),
         'batch systems': {
-            'loadleveler': {
-                'host': vdr(vtype='string'),
-                'directives': {'__MANY__': vdr(vtype='string')},
-            },
-            'lsf': {
-                'host': vdr(vtype='string'),
-                'directives': {'__MANY__': vdr(vtype='string')},
-            },
-            'pbs': {
-                'host': vdr(vtype='string'),
-                'directives': {'__MANY__': vdr(vtype='string')},
-            },
-            'sge': {
-                'host': vdr(vtype='string'),
-                'directives': {'__MANY__': vdr(vtype='string')},
-            },
-            'slurm': {
+            '__MANY__': {
                 'host': vdr(vtype='string'),
+                'out viewer': vdr(vtype='string'),
+                'err viewer': vdr(vtype='string'),
                 'directives': {'__MANY__': vdr(vtype='string')},
             },
         },
     },
 
-    'suite host self-identification' : {
-        'method'                          : vdr( vtype='string', options=["name","address","hardwired"], default="name" ),
-        'target'                          : vdr( vtype='string', default="google.com" ),
-        'host'                            : vdr( vtype='string' ),
-        },
+    'suite host self-identification': {
+        'method': vdr(
+            vtype='string',
+            options=["name", "address", "hardwired"],
+            default="name"),
+        'target': vdr(vtype='string', default="google.com"),
+        'host': vdr(vtype='string'),
+    },
+
+    'suite host scanning': {
+        'hosts': vdr(vtype='string_list', default=["localhost"])
+    },
 
-    'suite host scanning' : {
-        'hosts'                           : vdr( vtype='string_list', default=["localhost"]),
-        }
-    }
-
-
-def upg( cfg, descr ):
-    add_bin_dir = converter( lambda x: x + '/bin', "Added + '/bin' to path" )
-    use_ssh = converter( lambda x: "ssh", "set to 'ssh'" )
-    u = upgrader(cfg, descr )
-    u.deprecate( '5.1.1', ['editors','in-terminal'], ['editors','terminal'] )
-    u.deprecate( '5.1.1', ['task hosts'], ['hosts'] )
-    u.deprecate( '5.1.1', ['hosts','local'], ['hosts','localhost'] )
-    u.deprecate( '5.1.1', ['hosts','__MANY__', 'workspace directory'], ['hosts','__MANY__', 'workdirectory'] )
-    u.deprecate( '5.1.1', ['hosts','__MANY__', 'cylc directory'], ['hosts','__MANY__', 'cylc bin directory'], add_bin_dir )
-    u.obsolete(  '5.2.0', ['hosts','__MANY__', 'cylc bin directory'], ['hosts','__MANY__', 'cylc bin directory'] )
-    u.deprecate( '5.2.0', ['hosts','__MANY__', 'use ssh messaging'], ['hosts','__MANY__', 'task communication method'], use_ssh )
-    u.deprecate( '6.1.2', ['task messaging', 'connection timeout in seconds'], ['task messaging', 'connection timeout'] )
-    u.deprecate( '6.1.2', ['task messaging', 'retry interval in seconds'], ['task messaging', 'retry interval'] )
-    u.deprecate('6.4.0',
+    'authentication': {
+        # Allow owners to grant public shutdown rights at the most, not full
+        # control.
+        'public': vdr(
+            vtype='string',
+            options=PRIVILEGE_LEVELS[:PRIVILEGE_LEVELS.index('shutdown') + 1],
+            default="state-totals")
+    },
+}
+
+
+def upg(cfg, descr):
+    add_bin_dir = converter(lambda x: x + '/bin', "Added + '/bin' to path")
+    use_ssh = converter(lambda x: "ssh", "set to 'ssh'")
+    u = upgrader(cfg, descr)
+    u.deprecate('5.1.1', ['editors', 'in-terminal'], ['editors', 'terminal'])
+    u.deprecate('5.1.1', ['task hosts'], ['hosts'])
+    u.deprecate('5.1.1', ['hosts', 'local'], ['hosts', 'localhost'])
+    u.deprecate(
+        '5.1.1',
+        ['hosts', '__MANY__', 'workspace directory'],
+        ['hosts', '__MANY__', 'workdirectory'])
+    u.deprecate(
+        '5.1.1',
+        ['hosts', '__MANY__', 'cylc directory'],
+        ['hosts', '__MANY__', 'cylc bin directory'],
+        add_bin_dir)
+    u.obsolete(
+        '5.2.0',
+        ['hosts', '__MANY__', 'cylc bin directory'],
+        ['hosts', '__MANY__', 'cylc bin directory'])
+    u.deprecate(
+        '5.2.0',
+        ['hosts', '__MANY__', 'use ssh messaging'],
+        ['hosts', '__MANY__', 'task communication method'],
+        use_ssh)
+    u.deprecate(
+        '6.1.2',
+        ['task messaging', 'connection timeout in seconds'],
+        ['task messaging', 'connection timeout'])
+    u.deprecate(
+        '6.1.2',
+        ['task messaging', 'retry interval in seconds'],
+        ['task messaging', 'retry interval'])
+    u.deprecate(
+        '6.4.0',
         ['runtime', '__MANY__', 'global initial scripting'],
         ['runtime', '__MANY__', 'global init-script'])
     for batch_sys_name in ['loadleveler', 'lsf', 'pbs', 'sge', 'slurm']:
-        u.deprecate('6.4.1',
+        u.deprecate(
+            '6.4.1',
             ['test battery', 'directives', batch_sys_name + ' host'],
             ['test battery', 'batch systems', batch_sys_name, 'host'])
-        u.deprecate('6.4.1',
+        u.deprecate(
+            '6.4.1',
             ['test battery', 'directives', batch_sys_name + ' directives'],
             ['test battery', 'batch systems', batch_sys_name, 'directives'])
     u.obsolete('6.4.1', ['test battery', 'directives'])
     u.upgrade()
 
-class GlobalConfigError( Exception ):
-    def __init__( self, msg ):
+
+class GlobalConfigError(Exception):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
-class GlobalConfig( config ):
+
+class GlobalConfig(config):
     """
     Handle global (all suites) site and user configuration for cylc.
     User file values override site file values.
@@ -218,6 +352,7 @@ class GlobalConfig( config ):
                 print "Loading site/user config files"
             cls._DEFAULT = cls(SPEC, upg)
             conf_path_str = os.getenv("CYLC_CONF_PATH")
+            count = 0
             if conf_path_str is None:
                 # CYLC_CONF_PATH not defined, use default locations
                 for old_base, conf_dir in [
@@ -226,55 +361,68 @@ class GlobalConfig( config ):
                     for base in [cls.CONF_BASE, old_base]:
                         file_name = os.path.join(conf_dir, base)
                         if os.access(file_name, os.F_OK | os.R_OK):
-                            cls._DEFAULT.loadcfg(
-                                file_name, "global config", silent=True)
+                            try:
+                                cls._DEFAULT.loadcfg(
+                                    file_name, "global config")
+                            except ParsecError as exc:
+                                if count == 0:
+                                    sys.stderr.write(
+                                        "WARNING: ignoring bad site config %s:"
+                                        "\n%s\n" % (file_name, str(exc)))
+                                else:
+                                    sys.stderr.write(
+                                        "ERROR: bad user config %s:\n" % (
+                                            file_name))
+                                    raise
+                            count += 1
                             break
             elif conf_path_str:
                 # CYLC_CONF_PATH defined with a value
                 for path in conf_path_str.split(os.pathsep):
                     file_name = os.path.join(path, cls.CONF_BASE)
                     if os.access(file_name, os.F_OK | os.R_OK):
-                        cls._DEFAULT.loadcfg(
-                            file_name, "global config", silent=True)
+                        cls._DEFAULT.loadcfg(file_name, "global config")
             cls._DEFAULT.transform()
         return cls._DEFAULT
-        
 
-    def get_derived_host_item( self, suite, item, host=None, owner=None, replace=False ):
+    def get_derived_host_item(
+            self, suite, item, host=None, owner=None, replace=False):
         """Compute hardwired paths relative to the configurable top dirs."""
 
         # suite run dir
-        srdir = os.path.join( self.get_host_item( 'run directory',  host, owner, replace ), suite )
+        srdir = os.path.join(
+            self.get_host_item('run directory', host, owner, replace), suite)
         # suite workspace
-        swdir = os.path.join( self.get_host_item( 'work directory', host, owner, replace ), suite )
+        swdir = os.path.join(
+            self.get_host_item('work directory', host, owner, replace), suite)
 
         if item == 'suite run directory':
             value = srdir
 
         elif item == 'suite log directory':
-            value = os.path.join( srdir, 'log', 'suite' )
+            value = os.path.join(srdir, 'log', 'suite')
 
         elif item == 'suite job log directory':
-            value = os.path.join( srdir, 'log', 'job' )
+            value = os.path.join(srdir, 'log', 'job')
 
         elif item == 'suite config log directory':
-            value = os.path.join( srdir, 'log', 'suiterc' )
+            value = os.path.join(srdir, 'log', 'suiterc')
 
         elif item == 'suite state directory':
-            value = os.path.join( srdir, 'state' )
+            value = os.path.join(srdir, 'state')
 
         elif item == 'suite work directory':
-            value = os.path.join( swdir, 'work' )
+            value = os.path.join(swdir, 'work')
 
         elif item == 'suite share directory':
-            value = os.path.join( swdir, 'share' )
+            value = os.path.join(swdir, 'share')
 
         else:
-            raise GlobalConfigError( "Illegal derived item: " + item )
+            raise GlobalConfigError("Illegal derived item: " + item)
 
         return value
 
-    def get_host_item( self, item, host=None, owner=None, replace=False ):
+    def get_host_item(self, item, host=None, owner=None, replace=False):
         """This allows hosts with no matching entry in the config file
         to default to appropriately modified localhost settings."""
 
@@ -297,7 +445,7 @@ class GlobalConfig( config ):
             else:
                 # try for a pattern match
                 for h in cfg['hosts']:
-                    if re.match( h, host ):
+                    if re.match(h, host):
                         host_key = h
                         break
         modify_dirs = False
@@ -310,40 +458,42 @@ class GlobalConfig( config ):
             value = cfg['hosts']['localhost'][item]
             modify_dirs = True
 
-        if value and ( 'directory' in item ) and ( modify_dirs or owner != user or replace ):
+        if value and ('directory' in item) and (
+                modify_dirs or owner != user or replace):
             # replace local home dir with $HOME for evaluation on other host
-            value = value.replace( os.environ['HOME'], '$HOME' )
+            value = value.replace(os.environ['HOME'], '$HOME')
 
         return value
 
-    def roll_directory( self, d, name, archlen=0 ):
+    def roll_directory(self, d, name, archlen=0):
         """
         Create a directory after rolling back any previous instances of it.
         e.g. if archlen = 2 we keep: d, d.1, d.2. If 0 keep no old ones.
         """
-        for n in range( archlen, -1, -1 ): # archlen...0
+        for n in range(archlen, -1, -1):  # archlen...0
             if n > 0:
-                dpath = d+'.'+str(n)
+                dpath = d + '.' + str(n)
             else:
                 dpath = d
-            if os.path.exists( dpath ):
+            if os.path.exists(dpath):
                 if n >= archlen:
                     # remove oldest backup
-                    shutil.rmtree( dpath )
+                    shutil.rmtree(dpath)
                 else:
                     # roll others over
-                    os.rename( dpath, d + '.' + str(n+1) )
-        self.create_directory( d, name )
+                    os.rename(dpath, d + '.' + str(n + 1))
+        self.create_directory(d, name)
 
-    def create_directory( self, d, name ):
+    def create_directory(self, d, name):
         try:
-            mkdir_p( d )
+            mkdir_p(d)
         except Exception, x:
             print >> sys.stderr, str(x)
-            raise GlobalConfigError( 'Failed to create directory "' + name + '"' )
+            raise GlobalConfigError(
+                'Failed to create directory "' + name + '"')
 
-    def create_cylc_run_tree( self, suite ):
-        """Create all top-level cylc-run output directories on the suite host."""
+    def create_cylc_run_tree(self, suite):
+        """Create all top-level cylc-run output dirs on the suite host."""
 
         if cylc.flags.verbose:
             print 'Creating the suite output tree:'
@@ -353,9 +503,10 @@ class GlobalConfig( config ):
         item = 'suite run directory'
         if cylc.flags.verbose:
             print ' +', item
-        idir = self.get_derived_host_item( suite, item )
+        idir = self.get_derived_host_item(suite, item)
         if cfg['enable run directory housekeeping']:
-            self.roll_directory( idir, item, cfg['run directory rolling archive length'] )
+            self.roll_directory(
+                idir, item, cfg['run directory rolling archive length'])
 
         for item in [
                 'suite log directory',
@@ -366,19 +517,19 @@ class GlobalConfig( config ):
                 'suite share directory']:
             if cylc.flags.verbose:
                 print ' +', item
-            idir = self.get_derived_host_item( suite, item )
-            self.create_directory( idir, item )
+            idir = self.get_derived_host_item(suite, item)
+            self.create_directory(idir, item)
 
         item = 'temporary directory'
         value = cfg[item]
         if value:
-            self.create_directory( value, item )
+            self.create_directory(value, item)
 
         item = '[pyro]ports directory'
         value = cfg['pyro']['ports directory']
-        self.create_directory( value, item )
+        self.create_directory(value, item)
 
-    def get_tmpdir( self ):
+    def get_tmpdir(self):
         """Make a new temporary directory and arrange for it to be
         deleted automatically when we're finished with it. Call this
         explicitly just before use to ensure the directory is not
@@ -391,8 +542,8 @@ class GlobalConfig( config ):
         cfg = self.get()
         tdir = cfg['temporary directory']
         if tdir:
-            tdir = expandvars( tdir )
-            tmpdir = mkdtemp(prefix="cylc-", dir=expandvars(tdir) )
+            tdir = expandvars(tdir)
+            tmpdir = mkdtemp(prefix="cylc-", dir=expandvars(tdir))
         else:
             tmpdir = mkdtemp(prefix="cylc-")
         # self-cleanup
@@ -401,7 +552,7 @@ class GlobalConfig( config ):
         cfg['temporary directory'] = tmpdir
         return tmpdir
 
-    def transform( self ):
+    def transform(self):
         # host item values of None default to modified localhost values
         cfg = self.get()
 
@@ -411,19 +562,21 @@ class GlobalConfig( config ):
             for item, value in cfg['hosts'][host].items():
                 newvalue = value or cfg['hosts']['localhost'][item]
                 if newvalue and 'directory' in item:
-                    # replace local home dir with $HOME for evaluation on other host
-                    newvalue = newvalue.replace( os.environ['HOME'], '$HOME' )
+                    # replace local home dir with $HOME for evaluation on other
+                    # host
+                    newvalue = newvalue.replace(os.environ['HOME'], '$HOME')
                 cfg['hosts'][host][item] = newvalue
 
         # Expand environment variables and ~user in LOCAL file paths.
-        for key,val in cfg['documentation']['files'].items():
-            cfg['documentation']['files'][key] = expandvars( val )
+        for key, val in cfg['documentation']['files'].items():
+            cfg['documentation']['files'][key] = expandvars(val)
 
-        cfg['pyro']['ports directory'] = expandvars( cfg['pyro']['ports directory'] )
+        cfg['pyro']['ports directory'] = expandvars(
+            cfg['pyro']['ports directory'])
 
-        for key,val in cfg['hosts']['localhost'].items():
+        for key, val in cfg['hosts']['localhost'].items():
             if val and 'directory' in key:
-                cfg['hosts']['localhost'][key] = expandvars( val )
+                cfg['hosts']['localhost'][key] = expandvars(val)
 
 
 GLOBAL_CFG = GlobalConfig.default()
diff --git a/lib/cylc/cfgspec/suite.py b/lib/cylc/cfgspec/suite.py
index 3826a98..6be43a0 100644
--- a/lib/cylc/cfgspec/suite.py
+++ b/lib/cylc/cfgspec/suite.py
@@ -35,13 +35,17 @@ from isodatetime.data import Calendar, TimePoint
 from isodatetime.parsers import TimePointParser, DurationParser
 from cylc.cycling.integer import REC_INTERVAL as REC_INTEGER_INTERVAL
 
+from cylc.cfgspec.utils import coerce_interval
+from cylc.cfgspec.utils import coerce_interval_list
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.network import PRIVILEGE_LEVELS
+
 "Define all legal items and values for cylc suite definition files."
 
-interval_parser = DurationParser()
 
-def _coerce_cycleinterval( value, keys, args ):
+def _coerce_cycleinterval(value, keys, args):
     """Coerce value to a cycle interval."""
-    value = _strip_and_unquote( keys, value )
+    value = _strip_and_unquote(keys, value)
     if value.isdigit():
         # Old runahead limit format.
         set_syntax_version(VERSION_PREV,
@@ -64,9 +68,13 @@ def _coerce_cycleinterval( value, keys, args ):
                            keys[:-1], keys[-1], value))
     return value
 
-def _coerce_cycletime( value, keys, args ):
+
+def _coerce_cycletime(value, keys, args):
     """Coerce value to a cycle point."""
-    value = _strip_and_unquote( keys, value )
+    if value == "now":
+        # Handle this later in config.py when the suite UTC mode is known.
+        return value
+    value = _strip_and_unquote(keys, value)
     if re.match(r"\d+$", value):
         # Could be an old date-time cycle point format, or integer format.
         return value
@@ -91,9 +99,9 @@ def _coerce_cycletime( value, keys, args ):
     return value
 
 
-def _coerce_cycletime_format( value, keys, args ):
+def _coerce_cycletime_format(value, keys, args):
     """Coerce value to a cycle point format (either CCYYMM... or %Y%m...)."""
-    value = _strip_and_unquote( keys, value )
+    value = _strip_and_unquote(keys, value)
     set_syntax_version(VERSION_NEW,
                        "use of [cylc]cycle point format",
                        exc_class=IllegalValueError,
@@ -101,7 +109,7 @@ def _coerce_cycletime_format( value, keys, args ):
     test_timepoint = TimePoint(year=2001, month_of_year=3, day_of_month=1,
                                hour_of_day=4, minute_of_hour=30,
                                second_of_minute=54)
-    if "/" in value or ":" in value:
+    if "/" in value:
         raise IllegalValueError("cycle point format", keys, value)
     if "%" in value:
         try:
@@ -126,9 +134,9 @@ def _coerce_cycletime_format( value, keys, args ):
     return value
 
 
-def _coerce_cycletime_time_zone( value, keys, args ):
+def _coerce_cycletime_time_zone(value, keys, args):
     """Coerce value to a cycle point time zone format - Z, +13, -0800..."""
-    value = _strip_and_unquote( keys, value )
+    value = _strip_and_unquote(keys, value)
     set_syntax_version(VERSION_NEW,
                        "use of [cylc]cycle point time zone format",
                        exc_class=IllegalValueError,
@@ -147,57 +155,12 @@ def _coerce_cycletime_time_zone( value, keys, args ):
     return value
 
 
-def _coerce_final_cycletime( value, keys, args ):
+def _coerce_final_cycletime(value, keys, args):
     """Coerce final cycle point."""
-    value = _strip_and_unquote( keys, value )
+    value = _strip_and_unquote(keys, value)
     return value
 
 
-def coerce_interval(value, keys, args, back_comp_unit_factor=1,
-                    check_syntax_version=True):
-    """Coerce an ISO 8601 interval (or number: back-comp) into seconds."""
-    value = _strip_and_unquote( keys, value )
-    try:
-        backwards_compat_value = float(value) * back_comp_unit_factor
-    except (TypeError, ValueError):
-        pass
-    else:
-        if check_syntax_version:
-            set_syntax_version(VERSION_PREV,
-                               "integer interval: %s" % itemstr(
-                                   keys[:-1], keys[-1], value))
-        return backwards_compat_value
-    try:
-        interval = interval_parser.parse(value)
-    except ValueError:
-        raise IllegalValueError("ISO 8601 interval", keys, value)
-    if check_syntax_version:
-        try:
-            set_syntax_version(VERSION_NEW,
-                               "ISO 8601 interval: %s" % itemstr(
-                                   keys[:-1], keys[-1], value))
-        except SyntaxVersionError as exc:
-            raise Exception(str(exc))
-    days, seconds = interval.get_days_and_seconds()
-    seconds += days * Calendar.default().SECONDS_IN_DAY
-    return seconds
-
-
-def coerce_interval_list(value, keys, args, back_comp_unit_factor=1,
-                         check_syntax_version=True):
-    """Coerce a list of intervals (or numbers: back-comp) into seconds."""
-    values_list = _strip_and_unquote_list( keys, value )
-    type_converter = (
-        lambda v: coerce_interval(
-            v, keys, args,
-            back_comp_unit_factor=back_comp_unit_factor,
-            check_syntax_version=check_syntax_version,
-        )
-    )
-    seconds_list = _expand_list( values_list, keys, type_converter, True )
-    return seconds_list
-
-
 coercers['cycletime'] = _coerce_cycletime
 coercers['cycletime_format'] = _coerce_cycletime_format
 coercers['cycletime_time_zone'] = _coerce_cycletime_time_zone
@@ -214,208 +177,297 @@ coercers['interval_seconds_list'] = coerce_interval_list
 
 
 SPEC = {
-    'title'                                   : vdr( vtype='string', default="" ),
-    'description'                             : vdr( vtype='string', default="" ),
-    'URL'                                     : vdr( vtype='string', default="" ),
-    'cylc' : {
-        'UTC mode'                            : vdr( vtype='boolean', default=False),
-        'cycle point format'                  : vdr( vtype='cycletime_format', default=None),
-        'cycle point num expanded year digits': vdr( vtype='integer', default=0),
-        'cycle point time zone'               : vdr( vtype='cycletime_time_zone', default=None),
-        'required run mode'                   : vdr( vtype='string', options=['live','dummy','simulation'] ),
-        'force run mode'                      : vdr( vtype='string', options=['live','dummy','simulation'] ),
-        'abort if any task fails'             : vdr( vtype='boolean', default=False ),
-        'log resolved dependencies'           : vdr( vtype='boolean', default=False ),
-        'environment' : {
-            '__MANY__'                        : vdr( vtype='string' ),
+    'title': vdr(vtype='string', default=""),
+    'description': vdr(vtype='string', default=""),
+    'URL': vdr(vtype='string', default=""),
+    'cylc': {
+        'UTC mode': vdr(
+            vtype='boolean', default=GLOBAL_CFG.get(['cylc', 'UTC mode'])),
+        'cycle point format': vdr(
+            vtype='cycletime_format', default=None),
+        'cycle point num expanded year digits': vdr(
+            vtype='integer', default=0),
+        'cycle point time zone': vdr(
+            vtype='cycletime_time_zone', default=None),
+        'required run mode': vdr(
+            vtype='string', options=['live', 'dummy', 'simulation']),
+        'force run mode': vdr(
+            vtype='string', options=['live', 'dummy', 'simulation']),
+        'abort if any task fails': vdr(vtype='boolean', default=False),
+        'log resolved dependencies': vdr(vtype='boolean', default=False),
+        'environment': {
+            '__MANY__': vdr(vtype='string'),
+        },
+        'event hooks': {
+            'startup handler': vdr(
+                vtype='string_list',
+                default=GLOBAL_CFG.get([
+                    'cylc', 'event hooks', 'startup handler'])),
+            'timeout handler': vdr(
+                vtype='string_list',
+                default=GLOBAL_CFG.get([
+                    'cylc', 'event hooks', 'timeout handler'])),
+            'shutdown handler': vdr(
+                vtype='string_list',
+                default=GLOBAL_CFG.get([
+                    'cylc', 'event hooks', 'shutdown handler'])),
+            'timeout': vdr(
+                vtype='interval_minutes',
+                default=GLOBAL_CFG.get(['cylc', 'event hooks', 'timeout'])),
+            'reset timer': vdr(vtype='boolean', default=True),
+            'abort if startup handler fails': vdr(
+                vtype='boolean', default=False),
+            'abort if shutdown handler fails': vdr(
+                vtype='boolean', default=False),
+            'abort if timeout handler fails': vdr(
+                vtype='boolean', default=False),
+            'abort on timeout': vdr(
+                vtype='boolean',
+                default=GLOBAL_CFG.get([
+                    'cylc', 'event hooks', 'abort on timeout'])),
+        },
+        'simulation mode': {
+            'disable suite event hooks': vdr(vtype='boolean', default=True),
+        },
+        'dummy mode': {
+            'disable suite event hooks': vdr(vtype='boolean', default=True),
+        },
+        'reference test': {
+            'suite shutdown event handler': vdr(
+                vtype='string', default='cylc hook check-triggering'),
+            'required run mode': vdr(
+                vtype='string', options=['live', 'simulation', 'dummy']),
+            'allow task failures': vdr(vtype='boolean', default=False),
+            'expected task failures': vdr(vtype='string_list', default=[]),
+            'live mode suite timeout': vdr(
+                vtype='interval_minutes', default=60),
+            'dummy mode suite timeout': vdr(
+                vtype='interval_minutes', default=60),
+            'simulation mode suite timeout': vdr(
+                vtype='interval_minutes', default=60),
+        },
+        'authentication': {
+            # Allow owners to grant public shutdown rights at the most, not
+            # full control.
+            'public': vdr(
+                vtype='string',
+                options=PRIVILEGE_LEVELS[
+                    :PRIVILEGE_LEVELS.index('shutdown') + 1],
+                default=GLOBAL_CFG.get(['authentication', 'public']))
+        },
+    },
+    'scheduling': {
+        'initial cycle point': vdr(vtype='cycletime'),
+        'final cycle point': vdr(vtype='final_cycletime'),
+        'initial cycle point constraints': vdr(
+            vtype='string_list', default=[]),
+        'final cycle point constraints': vdr(vtype='string_list', default=[]),
+        'hold after point': vdr(vtype='cycletime'),
+        'cycling mode': vdr(
+            vtype='string',
+            default=Calendar.MODE_GREGORIAN,
+            options=(Calendar.MODES.keys() + ["integer"])),
+        'runahead limit': vdr(vtype='cycleinterval'),
+        'max active cycle points': vdr(vtype='integer', default=3),
+        'queues': {
+            'default': {
+                'limit': vdr(vtype='integer', default=0),
+            },
+            '__MANY__': {
+                'limit': vdr(vtype='integer', default=0),
+                'members': vdr(vtype='string_list', default=[]),
+            },
+        },
+        'special tasks': {
+            'clock-trigger': vdr(vtype='string_list', default=[]),
+            'external-trigger': vdr(vtype='string_list', default=[]),
+            'clock-expire': vdr(vtype='string_list', default=[]),
+            'sequential': vdr(vtype='string_list', default=[]),
+            'start-up': vdr(vtype='string_list', default=[]),
+            'cold-start': vdr(vtype='string_list', default=[]),
+            'exclude at start-up': vdr(vtype='string_list', default=[]),
+            'include at start-up': vdr(vtype='string_list', default=[]),
+        },
+        'dependencies': {
+            'graph': vdr(vtype='string'),
+            '__MANY__':
+            {
+                'graph': vdr(vtype='string'),
             },
-        'event hooks' : {
-            'startup handler'                 : vdr( vtype='string_list', default=[] ),
-            'timeout handler'                 : vdr( vtype='string_list', default=[] ),
-            'shutdown handler'                : vdr( vtype='string_list', default=[] ),
-            'timeout'                         : vdr( vtype='interval_minutes'  ),
-            'reset timer'                     : vdr( vtype='boolean', default=True ),
-            'abort if startup handler fails'  : vdr( vtype='boolean', default=False ),
-            'abort if shutdown handler fails' : vdr( vtype='boolean', default=False ),
-            'abort if timeout handler fails'  : vdr( vtype='boolean', default=False ),
-            'abort on timeout'                : vdr( vtype='boolean', default=False ),
+        },
+    },
+    'runtime': {
+        '__MANY__': {
+            'inherit': vdr(vtype='string_list', default=[]),
+            'title': vdr(vtype='string', default=""),
+            'description': vdr(vtype='string', default=""),
+            'URL': vdr(vtype='string', default=""),
+            'init-script': vdr(vtype='string'),
+            'env-script': vdr(vtype='string'),
+            'pre-script': vdr(vtype='string'),
+            'script': vdr(
+                vtype='string',
+                default='echo Dummy task; sleep $(cylc rnd 1 16)'),
+            'post-script': vdr(vtype='string'),
+            'retry delays': vdr(vtype='interval_minutes_list', default=[]),
+            'manual completion': vdr(vtype='boolean', default=False),
+            'extra log files': vdr(vtype='string_list', default=[]),
+            'enable resurrection': vdr(vtype='boolean', default=False),
+            'work sub-directory': vdr(
+                vtype='string',
+                default='$CYLC_TASK_CYCLE_POINT/$CYLC_TASK_NAME'),
+            'submission polling intervals': vdr(
+                vtype='interval_minutes_list', default=[]),
+            'execution polling intervals': vdr(
+                vtype='interval_minutes_list', default=[]),
+            'environment filter': {
+                'include': vdr(vtype='string_list'),
+                'exclude': vdr(vtype='string_list'),
             },
-        'simulation mode' : {
-            'disable suite event hooks'       : vdr( vtype='boolean', default=True ),
+            'simulation mode': {
+                'run time range': vdr(
+                    vtype='interval_seconds_list', default=[1, 16]),
+                'simulate failure': vdr(vtype='boolean', default=False),
+                'disable task event hooks': vdr(vtype='boolean', default=True),
+                'disable retries': vdr(vtype='boolean', default=True),
             },
-        'dummy mode' : {
-            'disable suite event hooks'       : vdr( vtype='boolean', default=True ),
+            'dummy mode': {
+                'script': vdr(
+                    vtype='string',
+                    default='echo Dummy task; sleep $(cylc rnd 1 16)'),
+                'disable pre-script': vdr(vtype='boolean', default=True),
+                'disable post-script': vdr(vtype='boolean', default=True),
+                'disable task event hooks': vdr(vtype='boolean', default=True),
+                'disable retries': vdr(vtype='boolean', default=True),
             },
-        'reference test' : {
-            'suite shutdown event handler'    : vdr( vtype='string', default='cylc hook check-triggering' ),
-            'required run mode'               : vdr( vtype='string', options=[ 'live','simulation','dummy'] ),
-            'allow task failures'             : vdr( vtype='boolean', default=False ),
-            'expected task failures'          : vdr( vtype='string_list', default=[] ),
-            'live mode suite timeout'         : vdr( vtype='interval_minutes', default=60 ),
-            'dummy mode suite timeout'        : vdr( vtype='interval_minutes', default=60 ),
-            'simulation mode suite timeout'   : vdr( vtype='interval_minutes', default=60 ),
+            'job submission': {
+                'method': vdr(vtype='string', default='background'),
+                'command template': vdr(vtype='string'),
+                'shell': vdr(vtype='string', default='/bin/bash'),
+                'retry delays': vdr(vtype='interval_minutes_list', default=[]),
             },
-        },
-    'scheduling' : {
-        'initial cycle point'                 : vdr(vtype='cycletime'),
-        'final cycle point'                   : vdr(vtype='final_cycletime'),
-        'initial cycle point constraints'     : vdr(vtype='string_list', default=[]),
-        'final cycle point constraints'       : vdr(vtype='string_list', default=[]),
-        'cycling mode'                        : vdr(vtype='string', default=Calendar.MODE_GREGORIAN, options=Calendar.MODES.keys() + ["integer"] ),
-        'runahead limit'                      : vdr(vtype='cycleinterval' ),
-        'max active cycle points'             : vdr(vtype='integer', default=3),
-        'queues' : {
-            'default' : {
-                'limit'                       : vdr( vtype='integer', default=0),
-                },
-            '__MANY__' : {
-                'limit'                       : vdr(vtype='integer', default=0 ),
-                'members'                     : vdr(vtype='string_list', default=[]),
-                },
+            'remote': {
+                'host': vdr(vtype='string'),
+                'owner': vdr(vtype='string'),
+                'suite definition directory': vdr(vtype='string'),
+                'retrieve job logs': vdr(vtype='boolean'),
+                'retrieve job logs max size': vdr(vtype='string'),
+                'retrieve job logs retry delays': vdr(
+                    vtype='interval_minutes_list'),
             },
-        'special tasks' : {
-            'clock-triggered'                 : vdr(vtype='string_list', default=[]),
-            'sequential'                      : vdr(vtype='string_list', default=[]),
-            'start-up'                        : vdr(vtype='string_list', default=[]),
-            'cold-start'                      : vdr(vtype='string_list', default=[]),
-            'exclude at start-up'             : vdr(vtype='string_list', default=[]),
-            'include at start-up'             : vdr(vtype='string_list', default=[]),
+            'event hooks': {
+                'expired handler': vdr(vtype='string_list', default=[]),
+                'submitted handler': vdr(vtype='string_list', default=[]),
+                'started handler': vdr(vtype='string_list', default=[]),
+                'succeeded handler': vdr(vtype='string_list', default=[]),
+                'failed handler': vdr(vtype='string_list', default=[]),
+                'submission failed handler': vdr(
+                    vtype='string_list', default=[]),
+                'warning handler': vdr(vtype='string_list', default=[]),
+                'retry handler': vdr(vtype='string_list', default=[]),
+                'submission retry handler': vdr(
+                    vtype='string_list', default=[]),
+                'submission timeout handler': vdr(
+                    vtype='string_list', default=[]),
+                'submission timeout': vdr(vtype='interval_minutes'),
+                'execution timeout handler': vdr(vtype='string_list'),
+                'execution timeout': vdr(vtype='interval_minutes'),
+                'reset timer': vdr(vtype='boolean'),
             },
-        'dependencies' : {
-            'graph'                           : vdr( vtype='string'),
-            '__MANY__' :
-            {
-                'graph'                       : vdr( vtype='string'),
-                },
+            'events': {
+                'execution timeout': vdr(vtype='interval_minutes'),
+                'handlers': vdr(vtype='string_list'),
+                'handler events': vdr(vtype='string_list'),
+                'handler retry delays': vdr(vtype='interval_minutes_list'),
+                'mail events': vdr(vtype='string_list'),
+                'mail from': vdr(vtype='string'),
+                'mail retry delays': vdr(vtype='interval_minutes_list'),
+                'mail smtp': vdr(vtype='string'),
+                'mail to': vdr(vtype='string'),
+                'register job logs retry delays': vdr(
+                    vtype='interval_minutes_list'),
+                'reset timer': vdr(vtype='boolean'),
+                'submission timeout': vdr(vtype='interval_minutes'),
             },
-        },
-    'runtime' : {
-        '__MANY__' : {
-            'inherit'                         : vdr( vtype='string_list', default=[] ),
-            'title'                           : vdr( vtype='string', default="" ),
-            'description'                     : vdr( vtype='string', default="" ),
-            'URL'                             : vdr( vtype='string', default="" ),
-            'init-script'                     : vdr( vtype='string' ),
-            'env-script'                      : vdr( vtype='string' ),
-            'pre-script'                      : vdr( vtype='string' ),
-            'script'                          : vdr( vtype='string', default='echo Dummy task; sleep $(cylc rnd 1 16)'),
-            'post-script'                     : vdr( vtype='string' ),
-            'retry delays'                    : vdr( vtype='interval_minutes_list', default=[] ),
-            'manual completion'               : vdr( vtype='boolean', default=False ),
-            'extra log files'                 : vdr( vtype='string_list', default=[] ),
-            'enable resurrection'             : vdr( vtype='boolean', default=False ),
-            'work sub-directory'              : vdr( vtype='string', default='$CYLC_TASK_CYCLE_POINT/$CYLC_TASK_NAME' ),
-            'submission polling intervals'    : vdr( vtype='interval_minutes_list', default=[] ),
-            'execution polling intervals'     : vdr( vtype='interval_minutes_list', default=[] ),
-            'environment filter' : {
-                'include'                     : vdr( vtype='string_list' ),
-                'exclude'                     : vdr( vtype='string_list' ),
+            'suite state polling': {
+                'user': vdr(vtype='string'),
+                'host': vdr(vtype='string'),
+                'interval': vdr(vtype='interval_seconds'),
+                'max-polls': vdr(vtype='integer'),
+                'run-dir': vdr(vtype='string'),
+                'template': vdr(vtype='string'),
+                'verbose mode': vdr(vtype='boolean'),
             },
-            'simulation mode' :  {
-                'run time range'              : vdr( vtype='interval_seconds_list', default=[1, 16]),
-                'simulate failure'            : vdr( vtype='boolean', default=False ),
-                'disable task event hooks'    : vdr( vtype='boolean', default=True ),
-                'disable retries'             : vdr( vtype='boolean', default=True ),
-                },
-            'dummy mode' : {
-                'script'                      : vdr( vtype='string', default='echo Dummy task; sleep $(cylc rnd 1 16)'),
-                'disable pre-script'          : vdr( vtype='boolean', default=True ),
-                'disable post-script'         : vdr( vtype='boolean', default=True ),
-                'disable task event hooks'       : vdr( vtype='boolean', default=True ),
-                'disable retries'                : vdr( vtype='boolean', default=True ),
-                },
-            'job submission' : {
-                'method'                      : vdr( vtype='string', default='background' ),
-                'command template'            : vdr( vtype='string' ),
-                'shell'                       : vdr( vtype='string',  default='/bin/bash' ),
-                'retry delays'                : vdr( vtype='interval_minutes_list', default=[] ),
-                },
-            'remote' : {
-                'host'                        : vdr( vtype='string' ),
-                'owner'                       : vdr( vtype='string' ),
-                'suite definition directory'  : vdr( vtype='string' ),
-                },
-            'event hooks' : {
-                'submitted handler'           : vdr( vtype='string_list', default=[] ),
-                'started handler'             : vdr( vtype='string_list', default=[] ),
-                'succeeded handler'           : vdr( vtype='string_list', default=[] ),
-                'failed handler'              : vdr( vtype='string_list', default=[] ),
-                'submission failed handler'   : vdr( vtype='string_list', default=[] ),
-                'warning handler'             : vdr( vtype='string_list', default=[] ),
-                'retry handler'               : vdr( vtype='string_list', default=[] ),
-                'submission retry handler'    : vdr( vtype='string_list', default=[] ),
-                'submission timeout handler'  : vdr( vtype='string_list', default=[] ),
-                'submission timeout'          : vdr( vtype='interval_minutes' ),
-                'execution timeout handler'   : vdr( vtype='string_list', default=[] ),
-                'execution timeout'           : vdr( vtype='interval_minutes'),
-                'reset timer'                 : vdr( vtype='boolean', default=False ),
-                },
-            'suite state polling' : {
-                'user'                        : vdr( vtype='string' ),
-                'host'                        : vdr( vtype='string' ),
-                'interval'                    : vdr( vtype='interval_seconds' ),
-                'max-polls'                   : vdr( vtype='integer' ),
-                'run-dir'                     : vdr( vtype='string' ),
-                'verbose mode'                : vdr( vtype='boolean' ),
-                },
-            'environment' : {
-                '__MANY__'                    : vdr( vtype='string' ),
-                },
-            'directives' : {
-                '__MANY__'                    : vdr( vtype='string' ),
-                },
-            'outputs' : {
-                '__MANY__'                    : vdr( vtype='string' ),
-                },
+            'environment': {
+                '__MANY__': vdr(vtype='string'),
             },
-        },
-    'visualization' : {
-        'initial cycle point'                 : vdr( vtype='cycletime' ),
-        'final cycle point'                   : vdr( vtype='final_cycletime' ),
-        'number of cycle points'              : vdr( vtype='integer', default=3 ),
-        'collapsed families'                  : vdr( vtype='string_list', default=[] ),
-        'use node color for edges'            : vdr( vtype='boolean', default=True ),
-        'use node color for labels'           : vdr( vtype='boolean', default=False ),
-        'default node attributes'             : vdr( vtype='string_list', default=['style=unfilled', 'color=black', 'shape=box']),
-        'default edge attributes'             : vdr( vtype='string_list', default=['color=black']),
-        'node groups' : {
-            '__MANY__'                        : vdr( vtype='string_list', default=[] ),
+            'directives': {
+                '__MANY__': vdr(vtype='string'),
             },
-        'node attributes' : {
-            '__MANY__'                        : vdr( vtype='string_list', default=[] ),
+            'outputs': {
+                '__MANY__': vdr(vtype='string'),
             },
         },
-    }
+    },
+    'visualization': {
+        'initial cycle point': vdr(vtype='cycletime'),
+        'final cycle point': vdr(vtype='final_cycletime'),
+        'number of cycle points': vdr(vtype='integer', default=3),
+        'collapsed families': vdr(vtype='string_list', default=[]),
+        'use node color for edges': vdr(vtype='boolean', default=True),
+        'use node color for labels': vdr(vtype='boolean', default=False),
+        'default node attributes': vdr(
+            vtype='string_list',
+            default=['style=unfilled', 'color=black', 'shape=box']),
+        'default edge attributes': vdr(
+            vtype='string_list', default=['color=black']),
+        'node groups': {
+            '__MANY__': vdr(vtype='string_list', default=[]),
+        },
+        'node attributes': {
+            '__MANY__': vdr(vtype='string_list', default=[]),
+        },
+    },
+}
 
-def upg( cfg, descr ):
-    u = upgrader( cfg, descr )
-    u.deprecate( '5.2.0', ['cylc','event handler execution'], ['cylc','event handler submission'] )
+
+def upg(cfg, descr):
+    u = upgrader(cfg, descr)
+    u.deprecate(
+        '5.2.0',
+        ['cylc', 'event handler execution'],
+        ['cylc', 'event handler submission'])
     # TODO - should abort if obsoleted items are encountered
-    u.obsolete( '5.4.7', ['scheduling','special tasks','explicit restart outputs'] )
-    u.obsolete( '5.4.11', ['cylc', 'accelerated clock'] )
-    u.obsolete( '6.0.0', ['visualization', 'runtime graph'] )
+    u.obsolete(
+        '5.4.7', ['scheduling', 'special tasks', 'explicit restart outputs'])
+    u.obsolete('5.4.11', ['cylc', 'accelerated clock'])
+    u.obsolete('6.0.0', ['visualization', 'runtime graph'])
     u.obsolete('6.1.3', ['visualization', 'enable live graph movie'])
-    u.obsolete( '6.0.0', ['development'] )
+    u.obsolete('6.0.0', ['development'])
     u.deprecate(
         '6.0.0',
-        ['scheduling', 'initial cycle time'], ['scheduling', 'initial cycle point'],
-        converter( lambda x: x, 'changed naming to reflect non-date-time cycling' )
-    )
+        ['scheduling', 'initial cycle time'],
+        ['scheduling', 'initial cycle point'],
+        converter(
+            lambda x: x, 'changed naming to reflect non-date-time cycling'))
     u.deprecate(
         '6.0.0',
-        ['scheduling', 'final cycle time'], ['scheduling', 'final cycle point'],
-        converter( lambda x: x, 'changed naming to reflect non-date-time cycling' )
-    )
+        ['scheduling', 'final cycle time'],
+        ['scheduling', 'final cycle point'],
+        converter(
+            lambda x: x, 'changed naming to reflect non-date-time cycling'))
     u.deprecate(
         '6.0.0',
-        ['visualization', 'initial cycle time'], ['visualization', 'initial cycle point'],
-        converter( lambda x: x, 'changed naming to reflect non-date-time cycling' )
-    )
+        ['visualization', 'initial cycle time'],
+        ['visualization', 'initial cycle point'],
+        converter(
+            lambda x: x, 'changed naming to reflect non-date-time cycling'))
     u.deprecate(
         '6.0.0',
-        ['visualization', 'final cycle time'], ['visualization', 'final cycle point'],
-        converter( lambda x: x, 'changed naming to reflect non-date-time cycling' )
-    )
+        ['visualization', 'final cycle time'],
+        ['visualization', 'final cycle point'],
+        converter(
+            lambda x: x, 'changed naming to reflect non-date-time cycling'))
     u.obsolete('6.0.0', ['cylc', 'job submission'])
     u.obsolete('6.0.0', ['cylc', 'event handler submission'])
     u.obsolete('6.0.0', ['cylc', 'poll and kill command submission'])
@@ -428,26 +480,42 @@ def upg( cfg, descr ):
         'initial scripting': 'init-script'
     }
     for old, new in dep.items():
-        u.deprecate('6.4.0',
+        u.deprecate(
+            '6.4.0',
             ['runtime', '__MANY__', old],
             ['runtime', '__MANY__', new],
             silent=True)
+        u.deprecate(
+            '6.4.0',
+            ['runtime', '__MANY__', 'dummy mode', old],
+            ['runtime', '__MANY__', 'dummy mode', new],
+            silent=True)
+    u.deprecate(
+        '6.5.0',
+        ['scheduling', 'special tasks', 'clock-triggered'],
+        ['scheduling', 'special tasks', 'clock-trigger'],
+    )
+    u.deprecate(
+        '6.5.0',
+        ['scheduling', 'special tasks', 'external-triggered'],
+        ['scheduling', 'special tasks', 'external-trigger'],
+    )
     u.upgrade()
 
     # Force pre cylc-6 "cycling = Yearly" type suites to the explicit
     # dependency heading form for which backward compatibility is provided:
-    #____________________________
+    # ___________________________
     # [scheduling]
     #    cycling = Yearly
     #    [[dependencies]]
     #        [[[2014,2]]]
-    #----------------------------
+    # ---------------------------
     # Same as (for auto upgrade):
-    #----------------------------
+    # ---------------------------
     # [scheduling]
     #    [[dependencies]]
     #        [[[Yearly(2014,2)]]]
-    #____________________________
+    # ___________________________
     try:
         old_cycling_mode = cfg['scheduling']['cycling']
     except:
@@ -465,7 +533,7 @@ def upg( cfg, descr ):
             pass
 
 
-class sconfig( config ):
+class sconfig(config):
     pass
 
 
@@ -473,11 +541,14 @@ suitecfg = None
 cfpath = None
 
 
-def get_suitecfg( fpath, force=False, tvars=[], tvars_file=None, write_proc=False ):
+def get_suitecfg(
+        fpath, force=False, tvars=[], tvars_file=None, write_proc=False):
     global suitecfg, cfpath
     if not suitecfg or fpath != cfpath or force:
         cfpath = fpath
         # TODO - write_proc should be in loadcfg
-        suitecfg = sconfig( SPEC, upg, tvars=tvars, tvars_file=tvars_file, write_proc=write_proc )
-        suitecfg.loadcfg( fpath, "suite definition", strict=True )
-        return suitecfg
+        suitecfg = sconfig(
+            SPEC, upg, tvars=tvars, tvars_file=tvars_file,
+            write_proc=write_proc)
+        suitecfg.loadcfg(fpath, "suite definition")
+    return suitecfg
diff --git a/lib/cylc/cfgspec/utils.py b/lib/cylc/cfgspec/utils.py
new file mode 100644
index 0000000..4220103
--- /dev/null
+++ b/lib/cylc/cfgspec/utils.py
@@ -0,0 +1,65 @@
+import re
+
+from parsec.validate import validator as vdr
+from parsec.validate import (
+    coercers, _strip_and_unquote, _strip_and_unquote_list, _expand_list,
+    IllegalValueError
+)
+from parsec.util import itemstr
+from parsec.upgrade import upgrader, converter
+from parsec.fileparse import parse
+from parsec.config import config
+from cylc.syntax_flags import (
+    set_syntax_version, VERSION_PREV, VERSION_NEW, SyntaxVersionError
+)
+from isodatetime.dumpers import TimePointDumper
+from isodatetime.data import Calendar, TimePoint
+from isodatetime.parsers import TimePointParser, DurationParser
+from cylc.cycling.integer import REC_INTERVAL as REC_INTEGER_INTERVAL
+
+interval_parser = DurationParser()
+
+
+def coerce_interval(value, keys, args, back_comp_unit_factor=1,
+                    check_syntax_version=True):
+    """Coerce an ISO 8601 interval (or number: back-comp) into seconds."""
+    value = _strip_and_unquote(keys, value)
+    try:
+        backwards_compat_value = float(value) * back_comp_unit_factor
+    except (TypeError, ValueError):
+        pass
+    else:
+        if check_syntax_version:
+            set_syntax_version(VERSION_PREV,
+                               "integer interval: %s" % itemstr(
+                                   keys[:-1], keys[-1], value))
+        return backwards_compat_value
+    try:
+        interval = interval_parser.parse(value)
+    except ValueError:
+        raise IllegalValueError("ISO 8601 interval", keys, value)
+    if check_syntax_version:
+        try:
+            set_syntax_version(VERSION_NEW,
+                               "ISO 8601 interval: %s" % itemstr(
+                                   keys[:-1], keys[-1], value))
+        except SyntaxVersionError as exc:
+            raise Exception(str(exc))
+    days, seconds = interval.get_days_and_seconds()
+    seconds += days * Calendar.default().SECONDS_IN_DAY
+    return seconds
+
+
+def coerce_interval_list(value, keys, args, back_comp_unit_factor=1,
+                         check_syntax_version=True):
+    """Coerce a list of intervals (or numbers: back-comp) into seconds."""
+    values_list = _strip_and_unquote_list(keys, value)
+    type_converter = (
+        lambda v: coerce_interval(
+            v, keys, args,
+            back_comp_unit_factor=back_comp_unit_factor,
+            check_syntax_version=check_syntax_version,
+        )
+    )
+    seconds_list = _expand_list(values_list, keys, type_converter, True)
+    return seconds_list
diff --git a/lib/cylc/command_polling.py b/lib/cylc/command_polling.py
index 4385ace..942ee70 100644
--- a/lib/cylc/command_polling.py
+++ b/lib/cylc/command_polling.py
@@ -19,23 +19,35 @@
 import sys
 from time import sleep, time
 
-class poller( object ):
+
+class poller(object):
     """Encapsulates polling activity for cylc commands. Derived classes
     must override the check() method to test the polling condition."""
 
     @classmethod
-    def add_to_cmd_options( cls, parser, d_interval=60, d_max_polls=10 ):
+    def add_to_cmd_options(cls, parser, d_interval=60, d_max_polls=10):
         # add command line options for polling
-        parser.add_option( "--max-polls",
+        parser.add_option(
+            "--max-polls",
             help="Maximum number of polls (default " + str(d_max_polls) + ").",
-            metavar="INT", action="store", dest="max_polls", default=d_max_polls )
-        parser.add_option( "--interval",
-            help="Polling interval in seconds (default " + str(d_interval) + ").",
-            metavar="SECS", action="store", dest="interval", default=d_interval )
+            metavar="INT",
+            action="store",
+            dest="max_polls",
+            default=d_max_polls)
+        parser.add_option(
+            "--interval",
+            help=(
+                "Polling interval in seconds (default " + str(d_interval) +
+                ")."
+            ),
+            metavar="SECS",
+            action="store",
+            dest="interval",
+            default=d_interval)
 
-    def __init__( self, condition, interval, max_polls, args={} ):
+    def __init__(self, condition, interval, max_polls, args={}):
 
-        self.condition = condition # e.g. "suite stopped"
+        self.condition = condition  # e.g. "suite stopped"
 
         """check max_polls is an int"""
         try:
@@ -52,32 +64,32 @@ class poller( object ):
             sys.exit(1)
 
         self.n_polls = 0
-        self.args = args # any extra parameters needed by check()
+        self.args = args  # any extra parameters needed by check()
 
-    def poll( self ):
+    def poll(self):
         """Poll for the condition embodied by self.check().
         Return True if condition met, or False if polling exhausted."""
 
         if self.max_polls == 0:
             # exit 1 as we can't know if the condition is satisfied
-            sys.exit( "WARNING: nothing to do (--max-polls=0)" )
+            sys.exit("WARNING: nothing to do (--max-polls=0)")
         elif self.max_polls == 1:
-            sys.stdout.write( "checking " )
+            sys.stdout.write("checking ")
         else:
-            sys.stdout.write( "polling " )
-        sys.stdout.write( "for '" + self.condition + "'" )
+            sys.stdout.write("polling ")
+        sys.stdout.write("for '" + self.condition + "'")
 
         done = False
-        while ( not done and self.n_polls < self.max_polls ):
+        while (not done and self.n_polls < self.max_polls):
             self.n_polls += 1
             if self.check():
                 done = True
             else:
                 if self.max_polls > 1:
                     sys.stdout.write('.')
-                    sleep( self.interval )
+                    sleep(self.interval)
         if done:
-            sys.stdout.write( ": satisfied\n" )
+            sys.stdout.write(": satisfied\n")
             return True
         else:
             print
diff --git a/lib/cylc/command_prep.py b/lib/cylc/command_prep.py
deleted file mode 100644
index cb7b661..0000000
--- a/lib/cylc/command_prep.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import os
-from cylc.passphrase import passphrase
-from cylc.registration import localdb
-from cylc.suite_host import is_remote_host
-from cylc.owner import is_remote_user
-import cylc.flags
-
-"""This module used to handle pseudo-backward-compatibility command
-re-invocation. That's been dropped, so the module doesn't do much now;
-the remaining functionality could be used more sensibly."""
-
-class prep( object ):
-    def __init__( self, suite, options ):
-        self.options = options
-        self.suite = suite
-        self.suiterc = None
-        self.suitedir = None
-        if not is_remote_host( options.host ) and not is_remote_user( options.owner ):
-            self.db = localdb(file=options.db )
-            try:
-                self.suiterc = self.db.get_suiterc( suite )
-                self.suitedir = os.path.dirname( self.suiterc )
-            except Exception, x:
-                if cylc.flags.debug:
-                    raise
-                raise SystemExit(x)
-
-    def execute( self ):
-        # This did once execute the command re-invocation. Now the
-        # method name is misleading!
-        return self.get_suite()
-
-class prep_pyro( prep ):
-    def __init__( self, suite, options ):
-        prep.__init__( self, suite, options )
-        # get the suite passphrase
-        try:
-            self.pphrase = passphrase( self.suite,
-                    self.options.owner, self.options.host ).get( None, self.suitedir )
-        except Exception, x:
-            if cylc.flags.debug:
-                raise
-            raise SystemExit(x)
-
-    def get_suite( self ):
-        return self.suite, self.pphrase
-
-class prep_file( prep ):
-    def __init__( self, suite, options ):
-        prep.__init__( self, suite, options )
-
-    def get_suite( self ):
-        return self.suite, self.suiterc
-
-    def get_rcfiles( self ):
-        return self.db.get_rcfiles( self.suite )
diff --git a/lib/cylc/prerequisites/simplify.py b/lib/cylc/conditional_simplifier.py
similarity index 66%
rename from lib/cylc/prerequisites/simplify.py
rename to lib/cylc/conditional_simplifier.py
index f40dfa4..6a63b33 100644
--- a/lib/cylc/prerequisites/simplify.py
+++ b/lib/cylc/conditional_simplifier.py
@@ -19,24 +19,25 @@
 import re
 import ast
 import copy
+import sys
 
 
-class conditional_simplifier( object ):
+class ConditionalSimplifier(object):
     """A class to simplify logical expressions"""
 
-    def __init__( self, expr, clean ):
+    def __init__(self, expr, clean):
         self.raw_expression = expr
         self.clean_list = clean
-        self.nested_expr = self.format_expr( self.raw_expression )
+        self.nested_expr = self.format_expr(self.raw_expression)
 
-    def listify( self, message ):
+    def listify(self, message):
         """Convert a string containing a logical expression to a list"""
-        message = message.replace("'","\"")
+        message = message.replace("'", "\"")
         RE_CONDITIONALS = "(&|\||\(|\))"
         tokenised = re.split("(&|\||\(|\))", message)
         listified = ["["]
         for item in tokenised:
-            if item.strip() != "" and item.strip() not in ["(",")"]:
+            if item.strip() != "" and item.strip() not in ["(", ")"]:
                 listified.append("'" + item.strip() + "',")
             elif item.strip() == "(":
                 listified.append("[")
@@ -51,7 +52,7 @@ class conditional_simplifier( object ):
         listified = ast.literal_eval(listified)
         return listified
 
-    def get_bracketed( self, nest_me ):
+    def get_bracketed(self, nest_me):
         """Nest a list according to any brackets in it"""
         start = 0
         finish = len(nest_me)
@@ -67,23 +68,23 @@ class conditional_simplifier( object ):
             if nest_me[i] == ")":
                 finish = i
                 break
-        bracket_nested = nest_me[0:start+1]
-        bracket_nested.append(self.get_bracketed(nest_me[start+1:finish]))
+        bracket_nested = nest_me[0:start + 1]
+        bracket_nested.append(self.get_bracketed(nest_me[start + 1:finish]))
         bracket_nested.extend(nest_me[finish:len(nest_me)])
         return bracket_nested
 
-    def get_cleaned( self ):
+    def get_cleaned(self):
         """Return the simplified logical expression"""
         cleaned = self.nested_expr
         for item in self.clean_list:
-            cleaned = self.clean_expr( cleaned, item )
-        cleaned = self.flatten_nested_expr( cleaned )
+            cleaned = self.clean_expr(cleaned, item)
+        cleaned = self.flatten_nested_expr(cleaned)
         return cleaned
 
-    def nest_by_oper( self, nest_me, oper ):
+    def nest_by_oper(self, nest_me, oper):
         """Nest a list based on a specified logical operation"""
         found = False
-        for i in range(0,len(nest_me)):
+        for i in range(len(nest_me)):
             if isinstance(nest_me[i], list):
                 nest_me[i] = self.nest_by_oper(nest_me[i], oper)
             if nest_me[i] == oper:
@@ -92,57 +93,66 @@ class conditional_simplifier( object ):
         if len(nest_me) <= 3:
             return nest_me
         if found:
-            nested = nest_me[0:found-1]
-            nested += [nest_me[found-1:found+2]]
-            if (found+2) < len(nest_me):
-                nested += nest_me[found+2:]
+            nested = nest_me[0:found - 1]
+            nested += [nest_me[found - 1:found + 2]]
+            if (found + 2) < len(nest_me):
+                nested += nest_me[found + 2:]
             return self.nest_by_oper(nested, oper)
         else:
             return nest_me
 
-    def clean_expr( self, nested_list, criteria ):
-        """Return a list with entries specified by 'critria' removed"""
-        cleaned = copy.deepcopy( nested_list )
+    def clean_expr(self, nested_list, criterion):
+        """Return a list with entries specified by 'criterion' removed"""
+        cleaned = copy.deepcopy(nested_list)
+
         # Make sure that we don't have extraneous nesting.
         while (isinstance(cleaned, list) and len(cleaned) == 1 and
                isinstance(cleaned[0], list)):
             cleaned = cleaned[0]
 
-        # Recurse through the nested list and remove criteria.
-        found = None
-        if isinstance(cleaned, str) or len(cleaned)==1:
-            if cleaned == criteria:
+        if len(cleaned) == 1:
+            cleaned = cleaned[0]
+
+        if isinstance(cleaned, str):
+            if cleaned == criterion:
                 return ""
             else:
                 return cleaned
+
+        # Recurse through the nested list and remove criterion.
+        found = None
         for i in range(0, len(cleaned)):
             if isinstance(cleaned[i], list):
-                cleaned[i] = self.clean_expr(cleaned[i], criteria)
-            if cleaned[i] == criteria:
+                cleaned[i] = self.clean_expr(cleaned[i], criterion)
+            if cleaned[i] in [criterion, '']:
                 found = i
+                break
+
         if found is not None:
+            # e.g. [ 'foo', '|', 'bar', '|']
             if found == 0:
-                return self.clean_expr(cleaned[2], criteria)
-            elif found == 2:
-                return self.clean_expr(cleaned[0], criteria)
+                cleaned = cleaned[2:]
+            else:
+                del cleaned[found - 1:found + 1]
+            return self.clean_expr(cleaned, criterion)
         else:
             return cleaned
 
-    def format_expr( self, expr ):
+    def format_expr(self, expr):
         """Carry out list conversion and nesting of a logical expression in
         the correct order."""
-        listified = self.listify( expr )
-        bracketed = self.get_bracketed( listified )
-        nested_by_and = self.nest_by_oper( bracketed, "&" )
-        nested_by_or = self.nest_by_oper( nested_by_and, "|" )
+        listified = self.listify(expr)
+        bracketed = self.get_bracketed(listified)
+        nested_by_and = self.nest_by_oper(bracketed, "&")
+        nested_by_or = self.nest_by_oper(nested_by_and, "|")
         return nested_by_or
 
-    def flatten_nested_expr( self, expr ):
+    def flatten_nested_expr(self, expr):
         """Convert a logical expression in a nested list back to a string"""
-        flattened = copy.deepcopy( expr )
-        for i in range(0,len(flattened)):
+        flattened = copy.deepcopy(expr)
+        for i in range(len(flattened)):
             if isinstance(flattened[i], list):
-                flattened[i] = self.flatten_nested_expr( flattened[i] )
+                flattened[i] = self.flatten_nested_expr(flattened[i])
         if isinstance(flattened, list):
             flattened = (" ").join(flattened)
         flattened = "(" + flattened
diff --git a/lib/cylc/config.py b/lib/cylc/config.py
index c82d5ca..f0420dc 100644
--- a/lib/cylc/config.py
+++ b/lib/cylc/config.py
@@ -16,7 +16,9 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import re, os, sys
+import re
+import os
+import sys
 import traceback
 from cylc.taskdef import TaskDef, TaskDefError
 from cylc.cfgspec.suite import get_suitecfg
@@ -26,23 +28,23 @@ from cylc.cycling.loader import (get_point, get_point_relative,
                                  init_cyclers, INTEGER_CYCLING_TYPE,
                                  ISO8601_CYCLING_TYPE)
 from cylc.cycling import IntervalParsingError
+from cylc.wallclock import get_current_time_string
 from isodatetime.data import Calendar
 from envvar import check_varnames, expandvars
 from copy import deepcopy, copy
 from output import output
 from graphnode import graphnode, GraphNodeError
 from print_tree import print_tree
-from prerequisites.conditionals import TriggerExpressionError
+from cylc.prerequisite import TriggerExpressionError
 from regpath import RegPath
 from trigger import trigger
 from parsec.util import replicate
 from cylc.task_id import TaskID
 from C3MRO import C3
-from parsec.OrderedDict import OrderedDict
+from parsec.OrderedDict import OrderedDictWithDefaults
 import flags
 from syntax_flags import (
     SyntaxVersion, set_syntax_version, VERSION_PREV, VERSION_NEW)
-from cylc.task_proxy import TaskProxy
 
 """
 Parse and validate the suite definition file, do some consistency
@@ -52,10 +54,11 @@ checking, then construct task proxy objects and graph structures.
 RE_SUITE_NAME_VAR = re.compile('\${?CYLC_SUITE_(REG_)?NAME}?')
 RE_TASK_NAME_VAR = re.compile('\${?CYLC_TASK_NAME}?')
 CLOCK_OFFSET_RE = re.compile(r'(' + TaskID.NAME_RE + r')(?:\(\s*(.+)\s*\))?')
+EXT_TRIGGER_RE = re.compile('(.*)\s*\(\s*(.+)\s*\)\s*')
 NUM_RUNAHEAD_SEQ_POINTS = 5  # Number of cycle points to look at per sequence.
 
 # TODO - unify this with task_state.py:
-TRIGGER_TYPES = [ 'submit', 'submit-fail', 'start', 'succeed', 'fail', 'finish' ]
+TRIGGER_TYPES = ['submit', 'submit-fail', 'start', 'succeed', 'fail', 'finish']
 FAM_TRIGGER_TYPES = (
     [trig_type + "-any" for trig_type in TRIGGER_TYPES] +
     [trig_type + "-all" for trig_type in TRIGGER_TYPES])
@@ -89,22 +92,23 @@ class Replacement(object):
         matched = match.group(0)
         replaced = match.expand(self.replacement)
         self.substitutions.append((matched, replaced))
-        self.match_groups.append( match.groups() )
+        self.match_groups.append(match.groups())
         return replaced
 
 
-class SuiteConfigError( Exception ):
+class SuiteConfigError(Exception):
     """
     Attributes:
         message - what the problem is.
         TODO - element - config element causing the problem
     """
-    def __init__( self, msg ):
+    def __init__(self, msg):
         self.msg = msg
 
-    def __str__( self ):
+    def __str__(self):
         return repr(self.msg)
 
+
 class TaskNotDefinedError(SuiteConfigError):
     """A named task not defined."""
 
@@ -113,17 +117,53 @@ class TaskNotDefinedError(SuiteConfigError):
 
 # TODO: separate config for run and non-run purposes?
 
-class config( object ):
+
+class SuiteConfig(object):
+    """Class for suite configuration items and derived quantities."""
+
+    _INSTANCE = None
+    _FORCE = False  # Override singleton behaviour (only used by "cylc diff"!)
+
+    @classmethod
+    def get_inst(cls, suite=None, fpath=None,
+                 template_vars=[], template_vars_file=None,
+                 owner=None, run_mode='live', validation=False, strict=False,
+                 collapsed=[], cli_initial_point_string=None,
+                 cli_start_point_string=None, cli_final_point_string=None,
+                 is_restart=False, is_reload=False, write_proc=True,
+                 vis_start_string=None, vis_stop_string=None,
+                 mem_log_func=None):
+        """Return a singleton instance.
+
+        On 1st call, instantiate the singleton.
+        Argument list is only relevant on 1st call.
+
+        """
+        if cls._INSTANCE is None or cls._FORCE:
+            cls._FORCE = False
+            cls._INSTANCE = cls(
+                suite, fpath, template_vars, template_vars_file, owner,
+                run_mode, validation, strict, collapsed,
+                cli_initial_point_string, cli_start_point_string,
+                cli_final_point_string, is_restart, is_reload, write_proc,
+                vis_start_string, vis_stop_string, mem_log_func)
+        return cls._INSTANCE
+
     def __init__(self, suite, fpath, template_vars=[], template_vars_file=None,
                  owner=None, run_mode='live', validation=False, strict=False,
                  collapsed=[], cli_initial_point_string=None,
                  cli_start_point_string=None, cli_final_point_string=None,
                  is_restart=False, is_reload=False, write_proc=True,
-                 vis_start_string=None, vis_stop_string=None):
+                 vis_start_string=None, vis_stop_string=None,
+                 mem_log_func=None):
 
+        self.mem_log = mem_log_func
+        if mem_log_func is None:
+            self.mem_log = lambda *a: False
+        self.mem_log("config.py:config.py: start init config")
         self.suite = suite  # suite name
         self.fpath = fpath  # suite definition
-        self.fdir  = os.path.dirname(fpath)
+        self.fdir = os.path.dirname(fpath)
         self.owner = owner
         self.run_mode = run_mode
         self.strict = strict
@@ -133,15 +173,17 @@ class config( object ):
         self.validation = validation
         self.initial_point = None
         self.start_point = None
-        self._cli_initial_point_string = cli_initial_point_string
-        self._cli_start_point_string = cli_start_point_string
         self.is_restart = is_restart
         self.first_graph = True
         self.clock_offsets = {}
+        self.expiration_offsets = {}
+        self.ext_triggers = {}
         self.suite_polling_tasks = {}
         self.triggering_families = []
         self.vis_start_point_string = vis_start_string
         self.vis_stop_point_string = vis_stop_string
+        self._last_graph_raw_id = None
+        self._last_graph_raw_edges = []
 
         self.sequences = []
         self.actual_first_point = None
@@ -152,52 +194,60 @@ class config( object ):
 
         # runtime hierarchy dicts keyed by namespace name:
         self.runtime = {
-                # lists of parent namespaces
-                'parents' : {},
-                # lists of C3-linearized ancestor namespaces
-                'linearized ancestors' : {},
-                # lists of first-parent ancestor namepaces
-                'first-parent ancestors' : {},
-                # lists of all descendant namespaces
-                # (not including the final tasks)
-                'descendants' : {},
-                # lists of all descendant namespaces from the first-parent hierarchy
-                # (first parents are collapsible in suite visualization)
-                'first-parent descendants' : {},
-                }
+            # lists of parent namespaces
+            'parents': {},
+            # lists of C3-linearized ancestor namespaces
+            'linearized ancestors': {},
+            # lists of first-parent ancestor namepaces
+            'first-parent ancestors': {},
+            # lists of all descendant namespaces
+            # (not including the final tasks)
+            'descendants': {},
+            # lists of all descendant namespaces from the first-parent
+            # hierarchy (first parents are collapsible in suite
+            # visualization)
+            'first-parent descendants': {},
+        }
         # tasks
         self.leaves = []
         # one up from root
         self.feet = []
 
-        # parse, upgrade, validate the suite, but don't expand with default items
-        self.pcfg = get_suitecfg( fpath, force=is_reload,
-                tvars=template_vars, tvars_file=template_vars_file,
-                write_proc=write_proc )
+        # parse, upgrade, validate the suite, but don't expand with default
+        # items
+        self.mem_log("config.py: before get_suitecfg")
+        self.pcfg = get_suitecfg(
+            fpath, force=is_reload, tvars=template_vars,
+            tvars_file=template_vars_file, write_proc=write_proc)
+        self.mem_log("config.py: after get_suitecfg")
+        self.mem_log("config.py: before get(sparse=True")
         self.cfg = self.pcfg.get(sparse=True)
+        self.mem_log("config.py: after get(sparse=True)")
 
-        if self._cli_initial_point_string is not None:
+        # First check for the essential scheduling section.
+        if 'scheduling' not in self.cfg:
+            raise SuiteConfigError("ERROR: missing [scheduling] section.")
+        if 'dependencies' not in self.cfg['scheduling']:
+            raise SuiteConfigError(
+                "ERROR: missing [scheduling][[dependencies]] section.")
+        # (The check that 'graph' is definied is below).
+        # The two runahead limiting schemes are mutually exclusive.
+        rlim = self.cfg['scheduling'].get('runahead limit', None)
+        mact = self.cfg['scheduling'].get('max active cycle points', None)
+        if rlim is not None and mact is not None:
+            raise SuiteConfigError(
+                "ERROR: use 'runahead limit' OR "
+                "'max active cycle points', not both")
+
+        # Override the suite defn with an initial point from the CLI.
+        if cli_initial_point_string is not None:
             self.cfg['scheduling']['initial cycle point'] = (
-                self._cli_initial_point_string)
+                cli_initial_point_string)
 
         dependency_map = self.cfg.get('scheduling', {}).get(
             'dependencies', {})
 
-        graph_found = False
-        for item, value in dependency_map.items():
-            if item == 'graph':
-                for line in value.split('\n'):
-                    m = re.search(r"(&&)|(\|\|)", line)
-                    if m:
-                        linemsg = line.strip()
-                        raise SuiteConfigError(
-                            "ERROR: Illegal '%s' in '%s' at %s" 
-                            % (m.group(0), item, linemsg)
-                        )
-            if item == 'graph' or value.get('graph'):
-                graph_found = True
-                break
-        if not graph_found:
+        if not self.is_graph_defined(dependency_map):
             raise SuiteConfigError('No suite dependency graph defined.')
 
         if 'cycling mode' not in self.cfg.get('scheduling', {}):
@@ -216,7 +266,8 @@ class config( object ):
                 fcp = self.cfg['scheduling'].get('final cycle point')
                 if just_has_async_graph and not (
                         icp in [None, "1"] and fcp in [None, icp]):
-                    raise SuiteConfigError('Conflicting syntax: integer vs ' +
+                    raise SuiteConfigError(
+                        'Conflicting syntax: integer vs ' +
                         'cycling suite, are you missing an [[R1]] section in' +
                         ' your graph?')
                 if just_has_async_graph:
@@ -231,10 +282,10 @@ class config( object ):
 
         # allow test suites with no [runtime]:
         if 'runtime' not in self.cfg:
-            self.cfg['runtime'] = {}
+            self.cfg['runtime'] = OrderedDictWithDefaults()
 
         if 'root' not in self.cfg['runtime']:
-            self.cfg['runtime']['root'] = {}
+            self.cfg['runtime']['root'] = OrderedDictWithDefaults()
 
         # Replace [runtime][name1,name2,...] with separate namespaces.
         if flags.verbose:
@@ -242,73 +293,94 @@ class config( object ):
         # This requires expansion into a new OrderedDict to preserve the
         # correct order of the final list of namespaces (add-or-override
         # by repeated namespace depends on this).
-        newruntime = OrderedDict()
+        newruntime = OrderedDictWithDefaults()
         for key, val in self.cfg['runtime'].items():
             if ',' in key:
                 for name in re.split(' *, *', key.rstrip(', ')):
                     if name not in newruntime:
-                        newruntime[name] = OrderedDict()
+                        newruntime[name] = OrderedDictWithDefaults()
                     replicate(newruntime[name], val)
             else:
                 if key not in newruntime:
-                    newruntime[key] = OrderedDict()
+                    newruntime[key] = OrderedDictWithDefaults()
                 replicate(newruntime[key], val)
         self.cfg['runtime'] = newruntime
+
         self.ns_defn_order = newruntime.keys()
 
         # check var names before inheritance to avoid repetition
         self.check_env_names()
 
+        self.mem_log("config.py: before compute_family_tree")
         # do sparse inheritance
         self.compute_family_tree()
+        self.mem_log("config.py: after compute_family_tree")
+        self.mem_log("config.py: before inheritance")
         self.compute_inheritance()
+        self.mem_log("config.py: after inheritance")
 
-        #self.print_inheritance() # (debugging)
+        # self.print_inheritance() # (debugging)
 
         # filter task environment variables after inheritance
         self.filter_env()
 
         # now expand with defaults
-        self.cfg = self.pcfg.get( sparse=False )
+        self.mem_log("config.py: before get(sparse=False)")
+        self.cfg = self.pcfg.get(sparse=False)
+        self.mem_log("config.py: after get(sparse=False)")
 
         # after the call to init_cyclers, we can start getting proper points.
         init_cyclers(self.cfg)
 
-        initial_point = None
-        if self.cfg['scheduling']['initial cycle point'] is not None:
-            initial_point = get_point(
-                self.cfg['scheduling']['initial cycle point']).standardise()
-            self.cfg['scheduling']['initial cycle point'] = str(initial_point)
+        # Running in UTC time? (else just use the system clock)
+        flags.utc = self.cfg['cylc']['UTC mode']
+        # Capture cycling mode
+        flags.cycling_mode = self.cfg['scheduling']['cycling mode']
 
-        self.cli_initial_point = get_point(self._cli_initial_point_string)
-        if self.cli_initial_point is not None:
-            self.cli_initial_point.standardise()
-
-        self.initial_point = self.cli_initial_point or initial_point
-        if self.initial_point is None:
+        # Initial point from suite definition (or CLI override above).
+        icp = self.cfg['scheduling']['initial cycle point']
+        if icp is None:
             raise SuiteConfigError(
                 "This suite requires an initial cycle point.")
+        if icp == "now":
+            icp = get_current_time_string()
+        self.initial_point = get_point(icp).standardise()
+        self.cfg['scheduling']['initial cycle point'] = str(self.initial_point)
+        if cli_start_point_string:
+            # Warm start from a point later than initial point.
+            if cli_start_point_string == "now":
+                cli_start_point_string = get_current_time_string()
+            cli_start_point = get_point(cli_start_point_string).standardise()
+            self.start_point = cli_start_point
         else:
-            self.initial_point.standardise()
+            # Cold start.
+            self.start_point = self.initial_point
 
         # Validate initial cycle point against any constraints
         if self.cfg['scheduling']['initial cycle point constraints']:
             valid_icp = False
-            for entry in self.cfg['scheduling']['initial cycle point constraints']:
-                possible_pt = get_point_relative(entry, initial_point).standardise()
+            for entry in (
+                    self.cfg['scheduling']['initial cycle point constraints']):
+                possible_pt = get_point_relative(
+                    entry, self.initial_point
+                ).standardise()
                 if self.initial_point == possible_pt:
                     valid_icp = True
                     break
             if not valid_icp:
+                constraints_str = str(
+                    self.cfg['scheduling']['initial cycle point constraints'])
                 raise SuiteConfigError(
-                    "Initial cycle point %s does not meet the constraints %s"%(
-                    str(self.initial_point),
-                    str(self.cfg['scheduling']['initial cycle point constraints']))
+                    ("Initial cycle point %s does not meet the constraints " +
+                     "%s") % (
+                        str(self.initial_point),
+                        constraints_str
                     )
+                )
 
-        if (self.cfg['scheduling']['final cycle point'] is not None and 
-            self.cfg['scheduling']['final cycle point'].strip() is ""):
-                self.cfg['scheduling']['final cycle point'] = None
+        if (self.cfg['scheduling']['final cycle point'] is not None and
+                self.cfg['scheduling']['final cycle point'].strip() is ""):
+            self.cfg['scheduling']['final cycle point'] = None
         final_point_string = (cli_final_point_string or
                               self.cfg['scheduling']['final cycle point'])
         final_point = None
@@ -318,8 +390,9 @@ class config( object ):
                 if "P" in final_point_string:
                     # Relative, integer cycling.
                     final_point = get_point_relative(
-                            self.cfg['scheduling']['final cycle point'],
-                        self.initial_point).standardise()
+                        self.cfg['scheduling']['final cycle point'],
+                        self.initial_point
+                    ).standardise()
             else:
                 try:
                     # Relative, ISO8601 cycling.
@@ -334,32 +407,30 @@ class config( object ):
             self.cfg['scheduling']['final cycle point'] = str(final_point)
 
         if final_point is not None and self.initial_point > final_point:
-            raise SuiteConfigError("The initial cycle point:" +
+            raise SuiteConfigError(
+                "The initial cycle point:" +
                 str(self.initial_point) + " is after the final cycle point:" +
                 str(final_point) + ".")
 
         # Validate final cycle point against any constraints
         if (final_point is not None and
-            self.cfg['scheduling']['final cycle point constraints']):
+                self.cfg['scheduling']['final cycle point constraints']):
             valid_fcp = False
-            for entry in self.cfg['scheduling']['final cycle point constraints']:
-                possible_pt = get_point_relative(entry, final_point).standardise()
+            for entry in (
+                    self.cfg['scheduling']['final cycle point constraints']):
+                possible_pt = get_point_relative(
+                    entry, final_point).standardise()
                 if final_point == possible_pt:
                     valid_fcp = True
                     break
             if not valid_fcp:
+                constraints_str = str(
+                    self.cfg['scheduling']['final cycle point constraints'])
                 raise SuiteConfigError(
-                    "Final cycle point %s does not meet the constraints %s"%(
-                    str(final_point),
-                    str(self.cfg['scheduling']['final cycle point constraints']))
-                    )
+                    "Final cycle point %s does not meet the constraints %s" % (
+                        str(final_point), constraints_str))
 
-        self.start_point = (
-            get_point(self._cli_start_point_string) or self.initial_point)
-        if self.start_point is not None:
-            self.start_point.standardise()
-
-        # [special tasks]: parse clock-offsets, and replace families with members
+        # Parse special task cycle point offsets, and replace family names.
         if flags.verbose:
             print "Parsing [special tasks]"
         for type in self.cfg['scheduling']['special tasks']:
@@ -367,23 +438,36 @@ class config( object ):
             extn = ''
             for item in self.cfg['scheduling']['special tasks'][type]:
                 name = item
-                # Get clock-trigger offsets.
-                if type == 'clock-triggered':
-                    m = re.match( CLOCK_OFFSET_RE, item )
+                if type == 'external-trigger':
+                    m = re.match(EXT_TRIGGER_RE, item)
+                    if m is None:
+                        raise SuiteConfigError(
+                            "ERROR: Illegal %s spec: %s" % (type, item)
+                        )
+                    name, ext_trigger_msg = m.groups()
+                    extn = "(" + ext_trigger_msg + ")"
+
+                elif type in ['clock-trigger', 'clock-expire']:
+                    m = re.match(CLOCK_OFFSET_RE, item)
                     if m is None:
                         raise SuiteConfigError(
-                            "ERROR: Illegal clock-trigger spec: %s" % item
+                            "ERROR: Illegal %s spec: %s" % (type, item)
                         )
                     if (self.cfg['scheduling']['cycling mode'] !=
                             Calendar.MODE_GREGORIAN):
                         raise SuiteConfigError(
-                            "ERROR: clock-triggered tasks require " +
-                            "[scheduling]cycling mode=%s" %
-                            Calendar.MODE_GREGORIAN
+                            "ERROR: %s tasks require "
+                            "[scheduling]cycling mode=%s" % (
+                                type, Calendar.MODE_GREGORIAN)
                         )
                     name, offset_string = m.groups()
                     if not offset_string:
                         offset_string = "PT0M"
+                    if flags.verbose:
+                        if offset_string.startswith("-"):
+                                print >> sys.stderr, (
+                                    "WARNING: %s offsets are "
+                                    "normally positive: %s" % (type, item))
                     offset_converted_from_prev = False
                     try:
                         float(offset_string)
@@ -391,45 +475,57 @@ class config( object ):
                         # So the offset should be an ISO8601 interval.
                         pass
                     else:
-                        # Backward-compatibility for a raw float number of hours.
+                        # Backward-compatibility for a raw float number of
+                        # hours.
                         set_syntax_version(
                             VERSION_PREV,
-                            "clock-triggered=%s: integer offset" % item
+                            "%s=%s: integer offset" % (type, item)
                         )
-                        if get_interval_cls().get_null().TYPE == ISO8601_CYCLING_TYPE:
-                            seconds = int(float(offset_string)*3600)
+                        if (get_interval_cls().get_null().TYPE ==
+                                ISO8601_CYCLING_TYPE):
+                            seconds = int(float(offset_string) * 3600)
                             offset_string = "PT%sS" % seconds
                         offset_converted_from_prev = True
                     try:
-                        offset_interval = get_interval(offset_string).standardise()
+                        offset_interval = (
+                            get_interval(offset_string).standardise())
                     except IntervalParsingError as exc:
                         raise SuiteConfigError(
-                            "ERROR: Illegal clock-trigger spec: %s" % offset_string
-                        )
+                            "ERROR: Illegal %s spec: %s" % (
+                                type, offset_string))
                     else:
                         if not offset_converted_from_prev:
                             set_syntax_version(
                                 VERSION_NEW,
-                                "clock-triggered=%s: ISO 8601 offset" % item
+                                "%s=%s: ISO 8601 offset" % (type, item)
                             )
                     extn = "(" + offset_string + ")"
 
                 # Replace family names with members.
                 if name in self.runtime['descendants']:
-                    result.remove( item )
+                    result.remove(item)
                     for member in self.runtime['descendants'][name]:
                         if member in self.runtime['descendants']:
                             # (sub-family)
                             continue
                         result.append(member + extn)
-                        if type == 'clock-triggered':
+                        if type == 'clock-trigger':
                             self.clock_offsets[member] = offset_interval
-                elif type == 'clock-triggered':
+                        if type == 'clock-expire':
+                            self.expiration_offsets[member] = offset_interval
+                        if type == 'external-trigger':
+                            self.ext_triggers[member] = ext_trigger_msg
+                elif type == 'clock-trigger':
                     self.clock_offsets[name] = offset_interval
+                elif type == 'clock-expire':
+                    self.expiration_offsets[name] = offset_interval
+                elif type == 'external-trigger':
+                    self.ext_triggers[name] = self.dequote(ext_trigger_msg)
 
             self.cfg['scheduling']['special tasks'][type] = result
 
-        self.collapsed_families_rc = self.cfg['visualization']['collapsed families']
+        self.collapsed_families_rc = (
+            self.cfg['visualization']['collapsed families'])
         if is_reload:
             # on suite reload retain an existing state of collapse
             # (used by the "cylc graph" viewer)
@@ -440,9 +536,11 @@ class config( object ):
             fromrc = True
         for cfam in self.closed_families:
             if cfam not in self.runtime['descendants']:
-                self.closed_families.remove( cfam )
+                self.closed_families.remove(cfam)
                 if fromrc and flags.verbose:
-                    print >> sys.stderr, 'WARNING, [visualization][collapsed families]: family ' + cfam + ' not defined'
+                    print >> sys.stderr, (
+                        'WARNING, [visualization][collapsed families]: ' +
+                        'family ' + cfam + ' not defined')
 
         # check for run mode override at suite level
         if self.cfg['cylc']['force run mode']:
@@ -450,7 +548,9 @@ class config( object ):
 
         self.process_directories()
 
+        self.mem_log("config.py: before load_graph()")
         self.load_graph()
+        self.mem_log("config.py: after load_graph()")
 
         self.compute_runahead_limits()
 
@@ -458,17 +558,35 @@ class config( object ):
 
         # Warn or abort (if --strict) if naked dummy tasks (no runtime
         # section) are found in graph or queue config.
-        if len( self.naked_dummy_tasks ) > 0:
+        if len(self.naked_dummy_tasks) > 0:
             if self.strict or flags.verbose:
-                print >> sys.stderr, 'WARNING: naked dummy tasks detected (no entry under [runtime]):'
+                print >> sys.stderr, (
+                    'WARNING: naked dummy tasks detected (no entry under ' +
+                    '[runtime]):')
                 for ndt in self.naked_dummy_tasks:
                     print >> sys.stderr, '  +', ndt
             if self.strict:
-                raise SuiteConfigError, 'ERROR: strict validation fails naked dummy tasks'
+                raise SuiteConfigError(
+                    'ERROR: strict validation fails naked dummy tasks')
 
         if self.validation:
             self.check_tasks()
 
+        # Check that external trigger messages are only used once (they have to
+        # be discarded immediately to avoid triggering the next instance of the
+        # just-triggered task).
+        seen = {}
+        for name, tdef in self.taskdefs.items():
+            for msg in tdef.external_triggers:
+                if msg not in seen:
+                    seen[msg] = name
+                else:
+                    print >> sys.stderr, (
+                        "External trigger '%s'\n  used in tasks %s and %s." % (
+                            msg, name, seen[msg]))
+                    raise SuiteConfigError(
+                        "ERROR: external triggers must be used only once.")
+
         ngs = self.cfg['visualization']['node groups']
         # If a node group member is a family, include its descendants too.
         replace = {}
@@ -490,10 +608,11 @@ class config( object ):
 
         if flags.verbose:
             print "Checking [visualization] node attributes"
+            # TODO - these should probably be done in non-verbose mode too.
             # 1. node groups should contain valid namespace names
             nspaces = self.cfg['runtime'].keys()
             bad = {}
-            for ng,mems in ngs.items():
+            for ng, mems in ngs.items():
                 n_bad = []
                 for m in mems:
                     if m not in nspaces:
@@ -502,7 +621,7 @@ class config( object ):
                     bad[ng] = n_bad
             if bad:
                 print >> sys.stderr, "  WARNING: undefined node group members"
-                for ng,mems in bad.items():
+                for ng, mems in bad.items():
                     print >> sys.stderr, " + " + ng + ":", ','.join(mems)
 
             # 2. node attributes must refer to node groups or namespaces
@@ -511,10 +630,27 @@ class config( object ):
                 if na not in ngs and na not in nspaces:
                     bad.append(na)
             if bad:
-                print >> sys.stderr, "  WARNING: undefined node attribute targets"
+                print >> sys.stderr, (
+                    "  WARNING: undefined node attribute targets")
                 for na in bad:
                     print >> sys.stderr, " + " + na
 
+        # 3. node attributes must be lists of quoted "key=value" pairs.
+        fail = False
+        for node, attrs in (
+                self.cfg['visualization']['node attributes'].items()):
+            for attr in attrs:
+                try:
+                    key, value = re.split('\s*=\s*', attr)
+                except ValueError as exc:
+                    fail = True
+                    print >> sys.stderr, (
+                        "ERROR: [visualization][node attributes]%s = %s" % (
+                            node, attr))
+        if fail:
+            raise SuiteConfigError("Node attributes must be of the form "
+                                   "'key1=value1', 'key2=value2', etc.")
+
         # (Note that we're retaining 'default node attributes' even
         # though this could now be achieved by styling the root family,
         # because putting default attributes for root in the suite.rc spec
@@ -526,7 +662,7 @@ class config( object ):
         self.leaves = self.get_task_name_list()
         for ns, ancestors in self.runtime['first-parent ancestors'].items():
             try:
-                foot = ancestors[-2] # one back from 'root'
+                foot = ancestors[-2]  # one back from 'root'
             except IndexError:
                 pass
             else:
@@ -535,15 +671,17 @@ class config( object ):
 
         # CLI override for visualization settings.
         if self.vis_start_point_string:
-            self.cfg['visualization']['initial cycle point'] = self.vis_start_point_string
+            self.cfg['visualization']['initial cycle point'] = (
+                self.vis_start_point_string)
         if self.vis_stop_point_string:
-            self.cfg['visualization']['final cycle point'] = self.vis_stop_point_string
+            self.cfg['visualization']['final cycle point'] = (
+                self.vis_stop_point_string)
 
         # For static visualization, start point defaults to suite initial
         # point; stop point must be explicit with initial point, or None.
         if self.cfg['visualization']['initial cycle point'] is None:
             self.cfg['visualization']['initial cycle point'] = (
-                    self.cfg['scheduling']['initial cycle point'])
+                self.cfg['scheduling']['initial cycle point'])
             # If viz initial point is None don't accept a final point.
             if self.cfg['visualization']['final cycle point'] is not None:
                 if flags.verbose:
@@ -552,20 +690,22 @@ class config( object ):
                         "  (it must be defined with an initial cycle point)")
                 self.cfg['visualization']['final cycle point'] = None
 
-
         vfcp = self.cfg['visualization']['final cycle point']
         if vfcp:
             try:
                 vfcp = get_point_relative(
                     self.cfg['visualization']['final cycle point'],
-                    initial_point).standardise()
+                    self.initial_point).standardise()
             except ValueError:
                 vfcp = get_point(
-                    self.cfg['visualization']['final cycle point']).standardise()
+                    self.cfg['visualization']['final cycle point']
+                ).standardise()
 
+        # A viz final point can't be beyond the suite final point.
         if vfcp is not None and final_point is not None:
             if vfcp > final_point:
-                self.cfg['visualization']['final cycle point'] = str(final_point)
+                self.cfg['visualization']['final cycle point'] = str(
+                    final_point)
 
         # Replace suite name in suite  URL.
         url = self.cfg['URL']
@@ -578,24 +718,79 @@ class config( object ):
                 cfg['URL'] = re.sub(RE_TASK_NAME_VAR, name, cfg['URL'])
                 cfg['URL'] = re.sub(RE_SUITE_NAME_VAR, self.suite, cfg['URL'])
 
-    def check_env_names( self ):
+        if self.validation:
+            if graphing_disabled:
+                print >> sys.stderr, (
+                    "WARNING: skipping cyclic dependence check"
+                    "  (could not import graphviz library)")
+            else:
+                # Detect cyclic dependence.
+                # (ignore suicide triggers as they look like cyclic dependence:
+                #    "foo:fail => bar => !foo" looks like "foo => bar => foo").
+                graph = self.get_graph(ungroup_all=True, ignore_suicide=True)
+                # Original edges.
+                o_edges = graph.edges()
+                # Reverse any back edges using graphviz 'acyclic'.
+                # (Note: use of acyclic(copy=True) reveals our CGraph class
+                # init should have the same arg list as its parent,
+                # pygraphviz.AGraph).
+                graph.acyclic()
+                # Look for reversed edges (note this does not detect
+                # self-edges).
+                n_edges = graph.edges()
+                back_edges = []
+                for e in o_edges:
+                    if e not in n_edges:
+                        back_edges.append(e)
+                if len(back_edges) > 0:
+                    print >> sys.stderr, "Back-edges:"
+                    for e in back_edges:
+                        print >> sys.stderr, '  %s => %s' % e
+                    raise SuiteConfigError(
+                        'ERROR: cyclic dependence detected '
+                        '(graph the suite to see back-edges).')
+
+        self.mem_log("config.py: end init config")
+
+    def is_graph_defined(self, dependency_map):
+        for item, value in dependency_map.items():
+            if item == 'graph':
+                # Async graph.
+                if value != '':
+                    return True
+            else:
+                # Cycling section.
+                for subitem, subvalue in value.items():
+                    if subitem == 'graph':
+                        if subvalue != '':
+                            return True
+        return False
+
+    def dequote(self, s):
+        """Strip quotes off a string."""
+        if (s[0] == s[-1]) and s.startswith(("'", '"')):
+            return s[1:-1]
+        return s
+
+    def check_env_names(self):
         # check for illegal environment variable names
-         bad = {}
-         for label in self.cfg['runtime']:
-             res = []
-             if 'environment' in self.cfg['runtime'][label]:
-                 res = check_varnames( self.cfg['runtime'][label]['environment'] )
-             if res:
-                 bad[label] = res
-         if bad:
-             print >> sys.stderr, "ERROR, bad env variable names:"
-             for label, vars in bad.items():
-                 print >> sys.stderr, 'Namespace:', label
-                 for var in vars:
-                     print >> sys.stderr, "  ", var
-             raise SuiteConfigError("Illegal env variable name(s) detected" )
-
-    def filter_env( self ):
+        bad = {}
+        for label in self.cfg['runtime']:
+            res = []
+            if 'environment' in self.cfg['runtime'][label]:
+                res = check_varnames(self.cfg['runtime'][label]['environment'])
+            if res:
+                bad[label] = res
+        if bad:
+            print >> sys.stderr, "ERROR, bad env variable names:"
+            for label, vars in bad.items():
+                print >> sys.stderr, 'Namespace:', label
+                for var in vars:
+                    print >> sys.stderr, "  ", var
+            raise SuiteConfigError(
+                "Illegal environment variable name(s) detected")
+
+    def filter_env(self):
         # filter environment variables after sparse inheritance
         for name, ns in self.cfg['runtime'].items():
             try:
@@ -620,13 +815,13 @@ class config( object ):
                 # no filtering to do
                 continue
 
-            nenv = OrderedDict()
+            nenv = OrderedDictWithDefaults()
             for key, val in oenv.items():
-                if ( not fincl or key in fincl ) and key not in fexcl:
+                if (not fincl or key in fincl) and key not in fexcl:
                     nenv[key] = val
             ns['environment'] = nenv
 
-    def compute_family_tree( self ):
+    def compute_family_tree(self):
         first_parents = {}
         demoted = {}
         for name in self.cfg['runtime']:
@@ -635,45 +830,47 @@ class config( object ):
                 first_parents[name] = []
                 continue
             # get declared parents, with implicit inheritance from root.
-            pts = self.cfg['runtime'][name].get( 'inherit', ['root'] )
+            pts = self.cfg['runtime'][name].get('inherit', ['root'])
             for p in pts:
                 if p == "None":
                     # see just below
                     continue
                 if p not in self.cfg['runtime']:
-                    raise SuiteConfigError, "ERROR, undefined parent for " + name +": " + p
+                    raise SuiteConfigError(
+                        "ERROR, undefined parent for " + name + ": " + p)
             if pts[0] == "None":
                 if len(pts) == 1:
-                    raise SuiteConfigError, "ERROR: null parentage for " + name
+                    raise SuiteConfigError(
+                        "ERROR: null parentage for " + name)
                 demoted[name] = pts[1]
                 pts = pts[1:]
                 first_parents[name] = ['root']
             else:
-                first_parents[name] = [ pts[0] ]
+                first_parents[name] = [pts[0]]
             self.runtime['parents'][name] = pts
 
         if flags.verbose and demoted:
             print "First parent(s) demoted to secondary:"
-            for n,p in demoted.items():
+            for n, p in demoted.items():
                 print " +", p, "as parent of '" + n + "'"
 
-        c3 = C3( self.runtime['parents'] )
-        c3_single = C3( first_parents )
+        c3 = C3(self.runtime['parents'])
+        c3_single = C3(first_parents)
 
         for name in self.cfg['runtime']:
             try:
                 self.runtime['linearized ancestors'][name] = c3.mro(name)
-                self.runtime['first-parent ancestors'][name] = c3_single.mro(name)
+                self.runtime['first-parent ancestors'][name] = (
+                    c3_single.mro(name))
             except RuntimeError as exc:
                 if flags.debug:
                     raise
-                exc_lines =  traceback.format_exc().splitlines()
+                exc_lines = traceback.format_exc().splitlines()
                 if exc_lines[-1].startswith(
-                    "RuntimeError: maximum recursion depth exceeded"):
-                    sys.stderr.write("ERROR: circular [runtime] inheritance?\n")
-                else:
-                    sys.stderr.write("ERROR: %s\n" % str(exc))
-                sys.exit(1)
+                        "RuntimeError: maximum recursion depth exceeded"):
+                    raise SuiteConfigError(
+                        "ERROR: circular [runtime] inheritance?")
+                raise
 
         for name in self.cfg['runtime']:
             ancestors = self.runtime['linearized ancestors'][name]
@@ -689,31 +886,34 @@ class config( object ):
                 if name not in self.runtime['first-parent descendants'][p]:
                     self.runtime['first-parent descendants'][p].append(name)
 
-        #for name in self.cfg['runtime']:
-        #    print name, self.runtime['linearized ancestors'][name]
+        # for name in self.cfg['runtime']:
+        #     print name, self.runtime['linearized ancestors'][name]
 
-    def compute_inheritance( self, use_simple_method=True ):
+    def compute_inheritance(self, use_simple_method=True):
         if flags.verbose:
             print "Parsing the runtime namespace hierarchy"
 
-        results = {}
+        results = OrderedDictWithDefaults()
         n_reps = 0
 
-        already_done = {} # to store already computed namespaces by mro
+        already_done = {}  # to store already computed namespaces by mro
 
-        for ns in self.cfg['runtime']:
+        # Loop through runtime members, 'root' first.
+        nses = self.cfg['runtime'].keys()
+        nses.sort(key=lambda ns: ns != 'root')
+        for ns in nses:
             # for each namespace ...
 
             hierarchy = copy(self.runtime['linearized ancestors'][ns])
             hierarchy.reverse()
 
-            result = {}
+            result = OrderedDictWithDefaults()
 
             if use_simple_method:
                 # Go up the linearized MRO from root, replicating or
                 # overriding each namespace element as we go.
                 for name in hierarchy:
-                    replicate( result, self.cfg['runtime'][name] )
+                    replicate(result, self.cfg['runtime'][name])
                     n_reps += 1
 
             else:
@@ -734,11 +934,11 @@ class config( object ):
                         if prev_shortcut:
                             prev_shortcut = False
                             # copy ad_result (to avoid altering already_done)
-                            result = {}
-                            replicate(result,ad_result) # ...and use stored
+                            result = OrderedDictWithDefaults()
+                            replicate(result, ad_result)  # ...and use stored
                             n_reps += 1
                         # override name content into tmp
-                        replicate( result, self.cfg['runtime'][name] )
+                        replicate(result, self.cfg['runtime'][name])
                         n_reps += 1
                         # record this mro as already done
                         already_done[i_mro] = result
@@ -758,13 +958,13 @@ class config( object ):
             for item, val in self.runtime[foo].items():
                 print '  ', '  ', item, val
 
-    def compute_runahead_limits( self ):
+    def compute_runahead_limits(self):
         """Extract the runahead limits information."""
         max_cycles = self.cfg['scheduling']['max active cycle points']
         if max_cycles == 0:
             raise SuiteConfigError(
-                "ERROR: max cycle points must be greater than %s"
-                 % (max_cycles)
+                "ERROR: max cycle points must be greater than %s" %
+                (max_cycles)
             )
         self.max_num_active_cycle_points = self.cfg['scheduling'][
             'max active cycle points']
@@ -778,18 +978,18 @@ class config( object ):
         # The custom runahead limit is None if not user-configured.
         self.custom_runahead_limit = get_interval(limit)
 
-    def get_custom_runahead_limit( self ):
+    def get_custom_runahead_limit(self):
         """Return the custom runahead limit (may be None)."""
         return self.custom_runahead_limit
 
-    def get_max_num_active_cycle_points( self ):
+    def get_max_num_active_cycle_points(self):
         """Return the maximum allowed number of pool cycle points."""
         return self.max_num_active_cycle_points
 
-    def get_config( self, args, sparse=False ):
-        return self.pcfg.get( args, sparse )
+    def get_config(self, args, sparse=False):
+        return self.pcfg.get(args, sparse)
 
-    def adopt_orphans( self, orphans ):
+    def adopt_orphans(self, orphans):
         # Called by the scheduler after reloading the suite definition
         # at run time and finding any live task proxies whose
         # definitions have been removed from the suite. Keep them
@@ -797,10 +997,10 @@ class config( object ):
         # run their course and disappear.
         queues = self.cfg['scheduling']['queues']
         for orphan in orphans:
-            self.runtime['linearized ancestors'][orphan] = [ orphan, 'root' ]
-            queues['default']['members'].append( orphan )
+            self.runtime['linearized ancestors'][orphan] = [orphan, 'root']
+            queues['default']['members'].append(orphan)
 
-    def configure_queues( self ):
+    def configure_queues(self):
         """Assign tasks to internal queues."""
         # Note this modifies the parsed config dict.
         queues = self.cfg['scheduling']['queues']
@@ -831,8 +1031,9 @@ class config( object ):
                                 queues['default']['members'].remove(fmem)
                             except ValueError:
                                 if fmem in requeued:
-                                    msg = "%s: ignoring %s from %s (already assigned to a queue)" % (
-                                            queue, fmem, qmember)
+                                    msg = "%s: ignoring %s from %s (%s)" % (
+                                        queue, fmem, qmember,
+                                        'already assigned to a queue')
                                     warnings.append(msg)
                                 else:
                                     # Ignore: task not used in the graph.
@@ -847,12 +1048,12 @@ class config( object ):
                             queues['default']['members'].remove(qmember)
                         except ValueError:
                             if qmember in requeued:
-                                msg = "%s: ignoring '%s' (task already assigned)" % (
-                                        queue, qmember)
+                                msg = "%s: ignoring '%s' (%s)" % (
+                                    queue, qmember, 'task already assigned')
                                 warnings.append(msg)
                             elif qmember not in all_task_names:
-                                msg = "%s: ignoring '%s' (task not defined)" % (
-                                        queue, qmember)
+                                msg = "%s: ignoring '%s' (%s)" % (
+                                    queue, qmember, 'task not defined')
                                 warnings.append(msg)
                             else:
                                 # Ignore: task not used in the graph.
@@ -877,16 +1078,16 @@ class config( object ):
                 if queue == 'default':
                     continue
                 print "  + %s: %s" % (
-                        queue, ', '.join(queues[queue]['members']))
+                    queue, ', '.join(queues[queue]['members']))
 
-    def get_parent_lists( self ):
+    def get_parent_lists(self):
         return self.runtime['parents']
 
-    def get_first_parent_ancestors( self, pruned=False ):
+    def get_first_parent_ancestors(self, pruned=False):
         if pruned:
             # prune non-task namespaces from ancestors dict
             pruned_ancestors = {}
-            for key,val in self.runtime['first-parent ancestors'].items():
+            for key, val in self.runtime['first-parent ancestors'].items():
                 if key not in self.taskdefs:
                     continue
                 pruned_ancestors[key] = val
@@ -894,13 +1095,13 @@ class config( object ):
         else:
             return self.runtime['first-parent ancestors']
 
-    def get_linearized_ancestors( self ):
+    def get_linearized_ancestors(self):
         return self.runtime['linearized ancestors']
 
-    def get_first_parent_descendants( self ):
+    def get_first_parent_descendants(self):
         return self.runtime['first-parent descendants']
 
-    def define_inheritance_tree( self, tree, hierarchy, titles=False ):
+    def define_inheritance_tree(self, tree, hierarchy, titles=False):
         # combine inheritance hierarchies into a tree structure.
         for rt in hierarchy:
             hier = copy(hierarchy[rt])
@@ -911,17 +1112,17 @@ class config( object ):
                     foo[item] = {}
                 foo = foo[item]
 
-    def add_tree_titles( self, tree ):
-        for key,val in tree.items():
+    def add_tree_titles(self, tree):
+        for key, val in tree.items():
             if val == {}:
                 if 'title' in self.cfg['runtime'][key]:
                     tree[key] = self.cfg['runtime'][key]['title']
                 else:
                     tree[key] = 'No title provided'
             elif isinstance(val, dict):
-                self.add_tree_titles( val )
+                self.add_tree_titles(val)
 
-    def get_namespace_list( self, which ):
+    def get_namespace_list(self, which):
         names = []
         if which == 'graphed tasks':
             # tasks used only in the graph
@@ -933,7 +1134,7 @@ class config( object ):
             for ns in self.cfg['runtime']:
                 if ns not in self.runtime['descendants']:
                     # tasks have no descendants
-                    names.append( ns )
+                    names.append(ns)
         result = {}
         for ns in names:
             if 'title' in self.cfg['runtime'][ns]:
@@ -945,23 +1146,23 @@ class config( object ):
 
         return result
 
-    def get_mro( self, ns ):
+    def get_mro(self, ns):
         try:
             mro = self.runtime['linearized ancestors'][ns]
         except KeyError:
-            mro = ["ERROR: no such namespace: " + ns ]
+            mro = ["ERROR: no such namespace: " + ns]
         return mro
 
-    def print_first_parent_tree( self, pretty=False, titles=False ):
+    def print_first_parent_tree(self, pretty=False, titles=False):
         # find task namespaces (no descendants)
         tasks = []
         for ns in self.cfg['runtime']:
             if ns not in self.runtime['descendants']:
                 tasks.append(ns)
 
-        pruned_ancestors = self.get_first_parent_ancestors( pruned=True )
+        pruned_ancestors = self.get_first_parent_ancestors(pruned=True)
         tree = {}
-        self.define_inheritance_tree( tree, pruned_ancestors, titles=titles )
+        self.define_inheritance_tree(tree, pruned_ancestors, titles=titles)
         padding = ''
         if titles:
             self.add_tree_titles(tree)
@@ -970,22 +1171,22 @@ class config( object ):
             for ns in pruned_ancestors:
                 items = copy(pruned_ancestors[ns])
                 items.reverse()
-                for i in range(0,len(items)):
-                    tmp = 2*i + 1 + len(items[i])
+                for i in range(len(items)):
+                    tmp = 2 * i + 1 + len(items[i])
                     if i == 0:
                         tmp -= 1
                     if tmp > maxlen:
                         maxlen = tmp
             padding = maxlen * ' '
 
-        print_tree( tree, padding=padding, use_unicode=pretty )
+        print_tree(tree, padding=padding, use_unicode=pretty)
 
     def process_directories(self):
         os.environ['CYLC_SUITE_NAME'] = self.suite
-        os.environ['CYLC_SUITE_REG_PATH'] = RegPath( self.suite ).get_fpath()
+        os.environ['CYLC_SUITE_REG_PATH'] = RegPath(self.suite).get_fpath()
         os.environ['CYLC_SUITE_DEF_PATH'] = self.fdir
 
-    def check_tasks( self ):
+    def check_tasks(self):
         # Call after all tasks are defined.
         # ONLY IF VALIDATING THE SUITE
         # because checking conditional triggers below may be slow for
@@ -995,99 +1196,65 @@ class config( object ):
         #       contains the task definition sections of the suite.rc file.
         #   (b) self.taskdefs[name]
         #       contains tasks that will be used, defined by the graph.
-        # Tasks (a) may be defined but not used (e.g. commented out of the graph)
-        # Tasks (b) may not be defined in (a), in which case they are dummied out.
+        # Tasks (a) may be defined but not used (e.g. commented out of the
+        # graph)
+        # Tasks (b) may not be defined in (a), in which case they are dummied
+        # out.
 
         for taskdef in self.taskdefs.values():
             try:
                 taskdef.check_for_explicit_cycling()
             except TaskDefError as exc:
                 raise SuiteConfigError(str(exc))
- 
+
         if flags.verbose:
             print "Checking for defined tasks not used in the graph"
             for name in self.cfg['runtime']:
                 if name not in self.taskdefs:
                     if name not in self.runtime['descendants']:
-                        # any family triggers have have been replaced with members by now.
-                        print >> sys.stderr, '  WARNING: task "' + name + '" is not used in the graph.'
-
-        # warn if listed special tasks are not defined
-        for type in self.cfg['scheduling']['special tasks']:
-            for name in self.cfg['scheduling']['special tasks'][type]:
-                if type == 'clock-triggered':
-                    name = re.sub('\(.*\)','',name)
-                if re.search( '[^0-9a-zA-Z_]', name ):
-                    raise SuiteConfigError, 'ERROR: Illegal ' + type + ' task name: ' + name
-                if name not in self.taskdefs and name not in self.cfg['runtime']:
-                    raise SuiteConfigError, 'ERROR: special task "' + name + '" is not defined.'
-
-        try:
-            import Pyro.constants
-        except:
-            print >> sys.stderr, "WARNING, INCOMPLETE VALIDATION: Pyro is not installed"
-            return
-
-        # Instantiate tasks and force evaluation of trigger expressions.
-        # TODO - This is not exhaustive, it only uses the initial cycle point.
-        if flags.verbose:
-            print "Instantiating tasks to check trigger expressions"
-        for name in self.taskdefs.keys():
-            try:
-                itask = TaskProxy(
-                    self.taskdefs[name],
-                    self.start_point,
-                    'waiting',
-                    is_startup=True,
-                    validate_mode=True)
-            except Exception, x:
-                raise SuiteConfigError(
-                    'ERROR, failed to instantiate task %s: %s' % (name, x))
-            if itask.point is None:
-                if flags.verbose:
-                    print " + Task out of bounds for " + str(self.start_point) + ": " + itask.name
-                continue
-
-            # warn for purely-implicit-cycling tasks (these are deprecated).
-            if itask.tdef.sequences == itask.tdef.implicit_sequences:
-                print >> sys.stderr, (
-                    "WARNING, " + name + ": not explicitly defined in " +
-                    "dependency graphs (deprecated)"
-                )
-
-            # force trigger evaluation now
-            try:
-                itask.prerequisites.eval_all()
-            except TriggerExpressionError, x:
-                print >> sys.stderr, x
-                raise SuiteConfigError, "ERROR, " + name + ": invalid trigger expression."
-            except Exception, x:
-                print >> sys.stderr, x
-                raise SuiteConfigError, 'ERROR, ' + name + ': failed to evaluate triggers.'
-            if flags.verbose:
-                print "  + " + itask.identity + " ok"
+                        # Family triggers have been replaced with members.
+                        print >> sys.stderr, (
+                            '  WARNING: task "%s" not used in the graph.' % (
+                                name))
+        # Check declared special tasks are valid.
+        for task_type in self.cfg['scheduling']['special tasks']:
+            for name in self.cfg['scheduling']['special tasks'][task_type]:
+                if task_type in ['clock-trigger', 'clock-expire',
+                                 'external-trigger']:
+                    name = re.sub('\(.*\)', '', name)
+                if not TaskID.is_valid_name(name):
+                    raise SuiteConfigError(
+                        'ERROR: Illegal %s task name: %s' % (task_type, name))
+                if (name not in self.taskdefs and
+                        name not in self.cfg['runtime']):
+                    msg = '%s task "%s" is not defined.' % (task_type, name)
+                    if self.strict:
+                        raise SuiteConfigError("ERROR: " + msg)
+                    else:
+                        print >> sys.stderr, "WARNING: " + msg
 
         # Check custom script is not defined for automatic suite polling tasks
         for l_task in self.suite_polling_tasks:
             try:
-                cs = self.pcfg.getcfg( sparse=True )['runtime'][l_task]['script']
+                cs = self.pcfg.getcfg(sparse=True)['runtime'][l_task]['script']
             except:
                 pass
             else:
                 if cs:
                     print cs
                     # (allow explicit blanking of inherited script)
-                    raise SuiteConfigError( "ERROR: script cannot be defined for automatic suite polling task " + l_task )
-
+                    raise SuiteConfigError(
+                        "ERROR: script cannot be defined for automatic" +
+                        " suite polling task " + l_task)
 
-    def get_coldstart_task_list( self ):
+    def get_coldstart_task_list(self):
         return self.cfg['scheduling']['special tasks']['cold-start']
 
-    def get_task_name_list( self ):
+    def get_task_name_list(self):
         # return a list of all tasks used in the dependency graph
         return self.taskdefs.keys()
 
-    def replace_family_triggers( self, line_in, fam, members, orig='' ):
+    def replace_family_triggers(self, line_in, fam, members, orig=''):
         # Replace family trigger expressions with member trigger expressions.
         # The replacements below handle optional [T-n] cycle offsets.
 
@@ -1097,32 +1264,48 @@ class config( object ):
         paren_open = ''
         paren_close = ''
         connector = ' & '
-        if orig.endswith( '-all' ):
+        if orig.endswith('-all'):
             pass
-        elif orig.endswith( '-any' ):
+        elif orig.endswith('-any'):
             connector = ' | '
-            paren_open = '( '
-            paren_close = ' )'
+            paren_open = '('
+            paren_close = ')'
         elif orig != '':
             print >> sys.stderr, line
-            raise SuiteConfigError, 'ERROR, illegal family trigger type: ' + orig
+            raise SuiteConfigError(
+                'ERROR, illegal family trigger type: ' + orig)
         repl = orig[:-4]
 
         # TODO - can we use Replacement here instead of findall and sub:
-        m = re.findall( "(!){0,1}" + r"\b" + fam + r"\b(\[.*?]){0,1}" + orig, line )
-        m.sort() # put empty offset '' first ...
-        m.reverse() # ... then last
+        m = re.findall(
+            "(!){0,1}" + r"\b" + fam + r"\b(\[.*?]){0,1}" + orig, line)
+        m.sort()  # put empty offset '' first ...
+        m.reverse()  # ... then last
         for grp in m:
             exclam, foffset = grp
             if fam not in self.triggering_families:
                 self.triggering_families.append(fam)
-            mems = paren_open + connector.join( [ exclam + i + foffset + repl for i in members ] ) + paren_close
-            line = re.sub( exclam + r"\b" + fam + r"\b" + re.escape(foffset) + orig, mems, line )
+            mems = paren_open + connector.join(
+                [exclam + i + foffset + repl for i in members]) + paren_close
+            line = re.sub(exclam + r"\b" + fam + r"\b" + re.escape(foffset) +
+                          orig, mems, line)
         return line
 
-    def process_graph_line( self, line, section, seq, offset_seq_map,
-                            tasks_to_prune=None,
-                            return_all_dependencies=False ):
+    def prune_expression(self, expression, pruned):
+        """Remove pruned nodes from a graph string left-side.
+
+        Used for pruning back-compat (cylc-5) start-up tasks from non-R1
+        sections.
+        """
+        # TODO - MAKE THIS TIDIER AND MORE GENERAL? (e.g. 'OR' EXPRESSIONS?)
+        for node in pruned:
+            expression = re.sub(node + ' *&', '', expression)
+            expression = re.sub('& *' + node, '', expression)
+        return expression
+
+    def process_graph_line(self, line, section, seq, offset_seq_map,
+                           tasks_to_prune=None,
+                           return_all_dependencies=False):
         """Extract dependent pairs from the suite.rc dependency text.
 
         Extract dependent pairs from the suite.rc textual dependency
@@ -1170,27 +1353,32 @@ class config( object ):
 
         base_interval = seq.get_interval()
 
-        ## SYNONYMS FOR TRIGGER-TYPES, e.g. 'fail' = 'failure' = 'failed' (NOT USED)
-        ## we can replace synonyms here with the standard type designator:
-        # line = re.sub( r':succe(ss|ed|eded){0,1}\b', '', line )
-        # line = re.sub( r':fail(ed|ure){0,1}\b', ':fail', line )
-        # line = re.sub( r':start(ed){0,1}\b', ':start', line )
-        # Replace "foo:finish(ed)" or "foo:complete(ed)" with "( foo | foo:fail )"
-        # line = re.sub(  r'\b(\w+(\[.*?]){0,1}):(complete(d){0,1}|finish(ed){0,1})\b', r'( \1 | \1:fail )', line )
+        # SYNONYMS FOR TRIGGER-TYPES, e.g. 'fail' = 'failure' = 'failed'
+        # (NOT USED)
+        # we can replace synonyms here with the standard type designator:
+        # line = re.sub(r':succe(ss|ed|eded){0,1}\b', '', line)
+        # line = re.sub(r':fail(ed|ure){0,1}\b', ':fail', line)
+        # line = re.sub(r':start(ed){0,1}\b', ':start', line)
+        # Replace "foo:finish(ed)" or "foo:complete(ed)"
+        # with "(foo | foo:fail)"
+        # line = re.sub(
+        #     r'\b(\w+(\[.*?]){0,1}):(complete(d){0,1}|finish(ed){0,1})\b',
+        #     r'(\1 | \1:fail)', line)
 
         # Find any dependence on other suites, record the polling target
         # info and replace with just the local task name, e.g.:
         # "foo<SUITE::TASK:fail> => bar"  becomes "foo => bar"
         # (and record that foo must automatically poll for TASK in SUITE)
-        repl = Replacement( '\\1' )
-        line = re.sub( '(\w+)(<([\w\.\-]+)::(\w+)(:\w+)?>)', repl, line )
+        repl = Replacement('\\1')
+        line = re.sub('(\w+)(<([\w\.\-]+)::(\w+)(:\w+)?>)', repl, line)
         for item in repl.match_groups:
             l_task, r_all, r_suite, r_task, r_status = item
             if r_status:
                 r_status = r_status[1:]
-            else: # default
+            else:  # default
                 r_status = 'succeed'
-            self.suite_polling_tasks[ l_task ] = ( r_suite, r_task, r_status, r_all )
+            self.suite_polling_tasks[l_task] = (
+                r_suite, r_task, r_status, r_all)
 
         # REPLACE FAMILY NAMES WITH MEMBER DEPENDENCIES
         # Sort so that longer family names get expanded first.
@@ -1198,9 +1386,9 @@ class config( object ):
         for fam in reversed(sorted(self.runtime['descendants'])):
             members = copy(self.runtime['descendants'][fam])
             for member in copy(members):
-                # (another copy here: don't remove items from the iterating list)
-                # remove family names from the member list, leave just tasks
-                # (allows using higher-level family names in the graph)
+                # (another copy here: don't remove items from the iterating
+                # list) remove family names from the member list, leave just
+                # tasks (allows using higher-level family names in the graph)
                 if member in self.runtime['descendants']:
                     members.remove(member)
             # Note, in the regular expressions below, the word boundary
@@ -1220,27 +1408,34 @@ class config( object ):
                 line = self.replace_family_triggers(
                     line, fam, members, ':' + trig_type)
 
-            if re.search( r"\b" + fam + r"\b:", line ):
+            if re.search(r"\b" + fam + r"\b:", line):
                 # fam:illegal
                 print >> sys.stderr, line
-                raise SuiteConfigError, 'ERROR, illegal family trigger detected'
+                raise SuiteConfigError(
+                    'ERROR, illegal family trigger detected')
 
-            if re.search( r"\b" + fam + r"\b[^:].*=>", line ) or re.search( r"\b" + fam + "\s*=>$", line ):
+            if (re.search(r"\b" + fam + r"\b[^:].*=>", line) or
+                    re.search(r"\b" + fam + "\s*=>$", line)):
                 # plain family names are not allowed on the left of a trigger
                 print >> sys.stderr, line
-                raise SuiteConfigError, 'ERROR, family triggers must be qualified, e.g. ' + fam + ':succeed-all'
+                raise SuiteConfigError(
+                    'ERROR, family triggers must be qualified, e.g. ' +
+                    fam + ':succeed-all')
 
             # finally replace plain family names on the right of a trigger
-            line = self.replace_family_triggers( line, fam, members )
+            line = self.replace_family_triggers(line, fam, members)
 
         # any remaining use of '-all' or '-any' implies a family trigger
         # on a non-family task, which is illegal.
         if any([":" + trig_type in line for trig_type in FAM_TRIGGER_TYPES]):
             print >> sys.stderr, line
-            raise SuiteConfigError, "ERROR: family triggers cannot be used on non-family namespaces"
+            raise SuiteConfigError(
+                "ERROR: family triggers cannot be used on non-family" +
+                " namespaces")
 
-        # Replace "foo:finish" with "( foo:succeed | foo:fail )"
-        line = re.sub(  r'\b(\w+(\[.*?]){0,1}):finish\b', r'( \1:succeed | \1:fail )', line )
+        # Replace "foo:finish" with "(foo:succeed | foo:fail)"
+        line = re.sub(
+            r'\b(\w+(\[.*?]){0,1}):finish\b', r'(\1:succeed | \1:fail)', line)
 
         if flags.verbose and line != orig_line:
             print 'Graph line substitutions occurred:'
@@ -1248,10 +1443,10 @@ class config( object ):
             print '  OUT:', line
 
         # Split line on dependency arrows.
-        tasks = re.split( '\s*=>\s*', line )
+        tasks = re.split('\s*=>\s*', line)
         # NOTE:  we currently use only one kind of arrow, but to use
         # several kinds we can split the string like this:
-        #     tokens = re.split( '\s*(=[>x])\s*', line ) # a => b =x c
+        #     tokens = re.split('\s*(=[>x])\s*', line) # a => b =x c
         #     tasks = tokens[0::2]                       # [a, b, c]
         #     arrow = tokens[1::2]                       # [=>, =x]
 
@@ -1259,40 +1454,46 @@ class config( object ):
         # results in empty or blank strings in the list of task names.
         arrowerr = False
         for task in tasks:
-            if re.match( '^\s*$', task ):
+            if re.match('^\s*$', task):
                 arrowerr = True
                 break
         if arrowerr:
             print >> sys.stderr, orig_line
-            raise SuiteConfigError, "ERROR: missing task name in graph line?"
+            raise SuiteConfigError("ERROR: missing task name in graph line?")
 
         # get list of pairs
         special_dependencies = []
-        for i in [0] + range( 1, len(tasks)-1 ):
+        for i in [0] + range(1, len(tasks) - 1):
             lexpression = tasks[i]
 
             if len(tasks) == 1:
                 # single node: no rhs group
                 rgroup = None
-                if re.search( '\|', lexpression ):
+                if re.search('\|', lexpression):
                     print >> sys.stderr, orig_line
-                    raise SuiteConfigError, "ERROR: Lone node groups cannot contain OR conditionals: " + lexpression
+                    raise SuiteConfigError(
+                        "ERROR: Lone node groups cannot contain OR" +
+                        " conditionals: " + lexpression)
             else:
-                rgroup = tasks[i+1]
+                rgroup = tasks[i + 1]
 
             if rgroup:
                 # '|' (OR) is not allowed on the right side
-                if re.search( '\|', rgroup ):
+                if re.search('\|', rgroup):
                     print >> sys.stderr, orig_line
-                    raise SuiteConfigError, "ERROR: OR '|' is not legal on the right side of dependencies: " + rgroup
+                    raise SuiteConfigError(
+                        "ERROR: OR '|' is not legal on the right side of" +
+                        " dependencies: " + rgroup)
 
                 # (T+/-N) offsets not allowed on the right side (as yet)
-                if re.search( '\[\s*T\s*[+-]\s*\w+\s*\]', rgroup ):
+                if re.search('\[\s*T\s*[+-]\s*\w+\s*\]', rgroup):
                     print >> sys.stderr, orig_line
-                    raise SuiteConfigError, "ERROR: time offsets are not legal on the right side of dependencies: " + rgroup
+                    raise SuiteConfigError(
+                        "ERROR: time offsets are not legal on the right" +
+                        " side of dependencies: " + rgroup)
 
                 # now split on '&' (AND) and generate corresponding pairs
-                right_nodes = re.split( '\s*&\s*', rgroup )
+                right_nodes = re.split('\s*&\s*', rgroup)
             else:
                 right_nodes = [None]
 
@@ -1301,10 +1502,10 @@ class config( object ):
                 if right_node:
                     # ignore output labels on the right (for chained
                     # tasks they are only meaningful on the left)
-                    new_right_nodes.append( re.sub( ':\w+', '', right_node ))
+                    new_right_nodes.append(re.sub(':\w+', '', right_node))
                 else:
                     # retain None's in order to handle lone nodes on the left
-                    new_right_nodes.append( None )
+                    new_right_nodes.append(None)
 
             right_nodes = new_right_nodes
 
@@ -1312,20 +1513,17 @@ class config( object ):
             n_open_brackets = lexpression.count("(")
             n_close_brackets = lexpression.count(")")
             if n_open_brackets != n_close_brackets:
-                raise SuiteConfigError, (
-                    "ERROR: missing bracket in: \"" + lexpression + "\"") 
-            nstr = re.sub( '[(|&)]', ' ', lexpression )
+                raise SuiteConfigError(
+                    "ERROR: missing bracket in: \"" + lexpression + "\"")
+            nstr = re.sub('[(|&)]', ' ', lexpression)
             nstr = nstr.strip()
-            left_nodes = re.split( ' +', nstr )
-
-            # detect and fail and self-dependence loops (foo => foo)
-            for right_node in right_nodes:
-                if right_node in left_nodes:
-                    print >> sys.stderr, (
-                        "Self-dependence detected in '" + right_node + "':")
-                    print >> sys.stderr, "  line:", line
-                    print >> sys.stderr, "  from:", orig_line
-                    raise SuiteConfigError, "ERROR: self-dependence loop detected"
+            left_nodes = re.split(' +', nstr)
+            for lnode in left_nodes:
+                if lnode.startswith('!'):
+                    print >> sys.stderr, line
+                    raise SuiteConfigError(
+                        "ERROR: suicide must be on the right of a trigger"
+                        " (%s)" % lnode)
 
             for right_node in right_nodes:
                 # foo => '!bar' means task bar should suicide if foo succeeds.
@@ -1336,14 +1534,15 @@ class config( object ):
                 else:
                     right_name = right_node
 
-                pruned_left_nodes = list(left_nodes)  # Create copy of LHS tasks.
+                # Create copy of LHS tasks.
+                pruned_left_nodes = list(left_nodes)
 
                 for left_node in left_nodes:
                     try:
                         left_graph_node = graphnode(left_node, base_interval)
                     except GraphNodeError, x:
                         print >> sys.stderr, orig_line
-                        raise SuiteConfigError, str(x)
+                        raise SuiteConfigError(str(x))
                     left_name = left_graph_node.name
                     left_output = left_graph_node.output
                     if (left_name in tasks_to_prune or
@@ -1361,12 +1560,12 @@ class config( object ):
                             special_dependencies.append(special_dep)
                     if left_name in tasks_to_prune:
                         pruned_left_nodes.remove(left_node)
-
+                        lexpression = self.prune_expression(lexpression,
+                                                            tasks_to_prune)
                 if right_name in tasks_to_prune:
                     continue
 
-                if not self.validation and not graphing_disabled:
-                    # edges not needed for validation
+                if not graphing_disabled:
                     left_edge_nodes = pruned_left_nodes
                     right_edge_node = right_name
                     if not left_edge_nodes and left_nodes:
@@ -1376,28 +1575,51 @@ class config( object ):
                     self.generate_edges(lexpression, left_edge_nodes,
                                         right_edge_node, seq, suicide)
                 self.generate_taskdefs(orig_line, pruned_left_nodes,
-                                        right_name, section,
-                                        seq, offset_seq_map,
-                                        base_interval)
+                                       right_name, section,
+                                       seq, offset_seq_map,
+                                       base_interval)
                 self.generate_triggers(lexpression, pruned_left_nodes,
-                                        right_name, seq, suicide)
+                                       right_name, seq, suicide)
         return special_dependencies
 
+    def generate_edges(self, lexpression, left_nodes, right, seq,
+                       suicide=False):
+        """Generate edges.
 
-    def generate_edges( self, lexpression, left_nodes, right, seq, suicide=False ):
-        """Add nodes from this graph section to the abstract graph edges structure."""
+        Add nodes from this graph section to the abstract graph edges
+        structure.
+        """
         conditional = False
-        if re.search( '\|', lexpression ):
+        if re.search('\|', lexpression):
             # plot conditional triggers differently
             conditional = True
 
         for left in left_nodes:
-            if left is not None:
-                e = graphing.edge( left, right, seq, suicide, conditional )
-                self.edges.append(e)
+            if left is None:
+                continue
+            if right is not None:
+                # Check for self-suicide and self-edges.
+                if left == right or left.startswith(right + ':'):
+                    # (This passes inter-cycle offsets: left[-P1D] => left)
+                    # (TODO - but not explicit null offsets like [-P0D]!)
+                    if suicide:
+                        # Self-suicide may be OK.
+                        print >> sys.stderr, (
+                            'WARNING: self-suicide is not recommended: '
+                            '%s => !%s.' % (left, right))
+                    else:
+                        # Self-edge.
+                        if left != lexpression:
+                            print >> sys.stderr, (
+                                "%s => %s" % (lexpression, right))
+                        raise SuiteConfigError(
+                            "ERROR, self-edge detected: %s => %s" % (
+                                left, right))
+            e = graphing.edge(left, right, seq, suicide, conditional)
+            self.edges.append(e)
 
-    def generate_taskdefs( self, line, left_nodes, right, section, seq,
-                           offset_seq_map, base_interval ):
+    def generate_taskdefs(self, line, left_nodes, right, section, seq,
+                          offset_seq_map, base_interval):
         """Generate task definitions for nodes on a given line."""
         for node in left_nodes + [right]:
             if not node:
@@ -1405,21 +1627,22 @@ class config( object ):
                 # for which we still define the taskdefs
                 continue
             try:
-                my_taskdef_node = graphnode( node, base_interval=base_interval )
+                my_taskdef_node = graphnode(node, base_interval=base_interval)
             except GraphNodeError, x:
                 print >> sys.stderr, line
-                raise SuiteConfigError, str(x)
+                raise SuiteConfigError(str(x))
 
             name = my_taskdef_node.name
             offset_string = my_taskdef_node.offset_string
 
             if name not in self.cfg['runtime']:
                 # naked dummy task, implicit inheritance from root
-                self.naked_dummy_tasks.append( name )
+                self.naked_dummy_tasks.append(name)
                 # These can't just be a reference to root runtime as we have to
                 # make some items task-specific: e.g. subst task name in URLs.
-                self.cfg['runtime'][name] = OrderedDict()
-                replicate(self.cfg['runtime'][name], self.cfg['runtime']['root'])
+                self.cfg['runtime'][name] = OrderedDictWithDefaults()
+                replicate(self.cfg['runtime'][name],
+                          self.cfg['runtime']['root'])
                 if 'root' not in self.runtime['descendants']:
                     # (happens when no runtimes are defined in the suite.rc)
                     self.runtime['descendants']['root'] = []
@@ -1436,22 +1659,23 @@ class config( object ):
             # check task name legality and create the taskdef
             if name not in self.taskdefs:
                 try:
-                    self.taskdefs[ name ] = self.get_taskdef( name )
+                    self.taskdefs[name] = self.get_taskdef(name)
                 except TaskDefError as exc:
                     print >> sys.stderr, line
                     raise SuiteConfigError(str(exc))
 
             if name in self.suite_polling_tasks:
                 self.taskdefs[name].suite_polling_cfg = {
-                        'suite'  : self.suite_polling_tasks[name][0],
-                        'task'   : self.suite_polling_tasks[name][1],
-                        'status' : self.suite_polling_tasks[name][2] }
+                    'suite': self.suite_polling_tasks[name][0],
+                    'task': self.suite_polling_tasks[name][1],
+                    'status': self.suite_polling_tasks[name][2]}
 
             if not my_taskdef_node.is_absolute:
                 if offset_string:
                     self.taskdefs[name].used_in_offset_trigger = True
                     if SyntaxVersion.VERSION == VERSION_PREV:
-                        # Implicit cycling means foo[T+6] generates a +6 sequence.
+                        # Implicit cycling means foo[T+6] generates a +6
+                        # sequence.
                         if offset_string in offset_seq_map:
                             seq_offset = offset_seq_map[offset_string]
                         else:
@@ -1469,33 +1693,29 @@ class config( object ):
                             self.sequences.append(seq_offset)
                     # We don't handle implicit cycling in new-style cycling.
                 else:
-                    self.taskdefs[ name ].add_sequence(seq)
+                    self.taskdefs[name].add_sequence(seq)
 
             if self.run_mode == 'live':
-                # register any explicit internal outputs
-                if 'outputs' in self.cfg['runtime'][name]:
-                    for lbl,msg in self.cfg['runtime'][name]['outputs'].items():
-                        outp = output(msg, base_interval)
-                        self.taskdefs[name].outputs.append(outp)
-
-    def generate_triggers( self, lexpression, left_nodes, right, seq, suicide ):
-        if not right:
-            # lefts are lone nodes; no more triggers to define.
-            return
-        
-        if not left_nodes:
-            # Nothing actually remains to trigger right.
+                # Record message outputs.
+                for lbl, msg in self.cfg['runtime'][name]['outputs'].items():
+                    outp = output(msg, base_interval)
+                    # Check for a cycle offset placeholder.
+                    if not re.search(r'\[[^\]]*\]', msg):
+                        print >> sys.stderr, (
+                            "Message outputs require an "
+                            "offset placeholder (e.g. '[]' or '[-P2M]'):")
+                        print >> sys.stderr, "  %s = %s" % (lbl, msg)
+                        raise SuiteConfigError(
+                            'ERROR: bad message output string')
+                    self.taskdefs[name].outputs.append(outp)
+
+    def generate_triggers(self, lexpression, left_nodes, right, seq, suicide):
+        if not right or not left_nodes:
+            # Lone nodes have no triggers.
             return
 
         base_interval = seq.get_interval()
 
-        conditional = False
-        if re.search( '\|', lexpression ):
-            conditional = True
-            # For single triggers or '&'-only ones, which will be the
-            # vast majority, we needn't use conditional prerequisites
-            # (they may be less efficient due to python eval at run time).
-
         ctrig = {}
         cname = {}
         for left in left_nodes:
@@ -1524,34 +1744,20 @@ class config( object ):
                 ltaskdef.intercycle_offsets.append(offset_tuple)
 
             trig = trigger(
-                    lnode.name, lnode.output, lnode.offset_string,
-                    cycle_point, suicide,
-                    self.cfg['runtime'][lnode.name]['outputs'],
-                    base_interval
-            )
-
-            if self.run_mode != 'live' and not trig.is_standard():
-                # Dummy tasks do not report message outputs.
-                continue
+                lnode.name, lnode.output, lnode.offset_string, cycle_point,
+                suicide, self.cfg['runtime'][lnode.name]['outputs'],
+                base_interval)
 
-            if not conditional:
-                self.taskdefs[right].add_trigger( trig, seq )
-                continue
-
-            # CONDITIONAL TRIGGERS
-            # Use fully qualified name for the expression label
-            # (task name is not unique, e.g.: "F | F:fail => G")
+            # Use fully qualified name for trigger expression label
+            # (task name is not unique, e.g.: "F | F:fail => G").
             label = self.get_conditional_label(left)
             ctrig[label] = trig
             cname[label] = lnode.name
 
-        if not conditional:
-            return
-
         expr = self.get_conditional_label(lexpression)
-        self.taskdefs[right].add_conditional_trigger( ctrig, expr, seq )
+        self.taskdefs[right].add_trigger(ctrig, expr, seq)
 
-    def get_actual_first_point( self, start_point ):
+    def get_actual_first_point(self, start_point):
         # Get actual first cycle point for the suite (get all
         # sequences to adjust the putative start time upward)
         if (self._start_point_for_actual_first_point is not None and
@@ -1561,17 +1767,17 @@ class config( object ):
         self._start_point_for_actual_first_point = start_point
         adjusted = []
         for seq in self.sequences:
-            foo = seq.get_first_point( start_point )
+            foo = seq.get_first_point(start_point)
             if foo:
-                adjusted.append( foo )
-        if len( adjusted ) > 0:
+                adjusted.append(foo)
+        if len(adjusted) > 0:
             adjusted.sort()
             self.actual_first_point = adjusted[0]
         else:
             self.actual_first_point = start_point
         return self.actual_first_point
 
-    def get_conditional_label( self, expression ):
+    def get_conditional_label(self, expression):
         """Return a label to ID the expression.
 
         Special characters such as [, or ^ are replaced with
@@ -1595,9 +1801,10 @@ class config( object ):
             label = re.sub(regex, replacement, label)
         return label
 
-    def get_graph_raw( self, start_point_string, stop_point_string,
-            group_nodes=[], ungroup_nodes=[], ungroup_recursive=False,
-            group_all=False, ungroup_all=False ):
+    def get_graph_raw(self, start_point_string, stop_point_string,
+                      group_nodes=[], ungroup_nodes=[],
+                      ungroup_recursive=False, group_all=False,
+                      ungroup_all=False):
         """Convert the abstract graph edges held in self.edges (etc.) to
         actual edges for a concrete range of cycle points."""
 
@@ -1619,7 +1826,7 @@ class config( object ):
                 for fam in members:
                     if fam != 'root':
                         if fam not in self.closed_families:
-                            self.closed_families.append( fam )
+                            self.closed_families.append(fam)
         elif ungroup_all:
             # Ungroup all family nodes
             self.closed_families = []
@@ -1629,7 +1836,7 @@ class config( object ):
                 parent = hierarchy[node][1]
                 if parent not in self.closed_families:
                     if parent != 'root':
-                        self.closed_families.append( parent )
+                        self.closed_families.append(parent)
         elif len(ungroup_nodes) > 0:
             # Ungroup chosen family nodes
             for node in ungroup_nodes:
@@ -1643,14 +1850,25 @@ class config( object ):
                         if fam in members[node]:
                             self.closed_families.remove(fam)
 
+        n_points = self.cfg['visualization']['number of cycle points']
+
+        graph_id = (start_point_string, stop_point_string, set(group_nodes),
+                    set(ungroup_nodes), ungroup_recursive, group_all,
+                    ungroup_all, set(self.closed_families),
+                    set(self.edges), n_points)
+        if graph_id == self._last_graph_raw_id:
+            return self._last_graph_raw_edges
+
         # Now define the concrete graph edges (pairs of nodes) for plotting.
         gr_edges = {}
         start_point = get_point(start_point_string)
         actual_first_point = self.get_actual_first_point(start_point)
 
+        suite_final_point = get_point(
+            self.cfg['scheduling']['final cycle point'])
+
         # For the computed stop point, we store n_points of each sequence,
         # and then cull later to the first n_points over all sequences.
-        n_points = self.cfg['visualization']['number of cycle points']
         if stop_point_string is not None:
             stop_point = get_point(stop_point_string)
         else:
@@ -1674,23 +1892,26 @@ class config( object ):
                 if stop_point is not None and point > stop_point:
                     # Beyond requested final cycle point.
                     break
+                if suite_final_point is not None and point > suite_final_point:
+                    # Beyond suite final cycle point.
+                    break
                 if stop_point is None and len(new_points) > n_points:
                     # Take n_points cycles from each sequence.
                     break
-                not_initial_cycle = (point != i_point)
 
                 r_id = e.get_right(point, start_point)
-                l_id = e.get_left(point, start_point, e.sequence.get_interval())
+                l_id = e.get_left(
+                    point, start_point, e.sequence.get_interval())
 
                 action = True
-                if l_id == None and r_id == None:
+                if l_id is None and r_id is None:
                     # Nothing to add to the graph.
                     action = False
-                if l_id != None:
+                if l_id is not None:
                     # Check that l_id is not earlier than start time.
                     tmp, lpoint_string = TaskID.split(l_id)
-                    ## NOTE BUG GITHUB #919
-                    ##sct = start_point
+                    # NOTE BUG GITHUB #919
+                    # sct = start_point
                     sct = actual_first_point
                     lct = get_point(lpoint_string)
                     if sct > lct:
@@ -1707,7 +1928,8 @@ class config( object ):
                     nl, nr = self.close_families(l_id, r_id)
                     if point not in gr_edges:
                         gr_edges[point] = []
-                    gr_edges[point].append((nl, nr, None, e.suicide, e.conditional))
+                    gr_edges[point].append(
+                        (nl, nr, None, e.suicide, e.conditional))
                 # Increment the cycle point.
                 point = e.sequence.get_next_point_on_sequence(point)
 
@@ -1722,16 +1944,19 @@ class config( object ):
             # Flatten nested list.
             edges = [i for sublist in values for i in sublist]
 
+        self._last_graph_raw_id = graph_id
+        self._last_graph_raw_edges = edges
         return edges
 
     def get_graph(self, start_point_string=None, stop_point_string=None,
-            group_nodes=[], ungroup_nodes=[], ungroup_recursive=False,
-            group_all=False, ungroup_all=False, ignore_suicide=False,
-            subgraphs_on=False):
+                  group_nodes=[], ungroup_nodes=[], ungroup_recursive=False,
+                  group_all=False, ungroup_all=False, ignore_suicide=False,
+                  subgraphs_on=False):
 
         # If graph extent is not given, use visualization settings.
         if start_point_string is None:
-            start_point_string = self.cfg['visualization']['initial cycle point']
+            start_point_string = (
+                self.cfg['visualization']['initial cycle point'])
 
         if stop_point_string is None:
             vfcp = self.cfg['visualization']['final cycle point']
@@ -1755,18 +1980,18 @@ class config( object ):
             group_all, ungroup_all
         )
         graph = graphing.CGraph(
-                self.suite, self.suite_polling_tasks, self.cfg['visualization'])
-        graph.add_edges( gr_edges, ignore_suicide )
+            self.suite, self.suite_polling_tasks, self.cfg['visualization'])
+        graph.add_edges(gr_edges, ignore_suicide)
         if subgraphs_on:
-            graph.add_cycle_point_subgraphs( gr_edges )
+            graph.add_cycle_point_subgraphs(gr_edges)
         return graph
 
-    def get_node_labels( self, start_point_string, stop_point_string):
-        graph = self.get_graph( start_point_string, stop_point_string,
-                                ungroup_all=True )
-        return [ i.attr['label'].replace('\\n','.') for i in graph.nodes() ]
+    def get_node_labels(self, start_point_string, stop_point_string):
+        graph = self.get_graph(start_point_string, stop_point_string,
+                               ungroup_all=True)
+        return [i.attr['label'].replace('\\n', '.') for i in graph.nodes()]
 
-    def close_families( self, nlid, nrid ):
+    def close_families(self, nlid, nrid):
         # Generate final node names, replacing family members with
         # family nodes if requested.
 
@@ -1787,18 +2012,19 @@ class config( object ):
             nr = nrid
 
         # for nested families, only consider the outermost one
-        clf = copy( self.closed_families )
+        clf = copy(self.closed_families)
         for i in self.closed_families:
             for j in self.closed_families:
                 if i in members[j]:
                     # i is a member of j
                     if i in clf:
-                        clf.remove( i )
+                        clf.remove(i)
 
         for fam in clf:
             if lname in members[fam] and rname in members[fam]:
                 # l and r are both members of fam
-                #nl, nr = None, None  # this makes 'the graph disappear if grouping 'root'
+                # nl, nr = None, None
+                # this makes 'the graph disappear if grouping 'root'
                 nl = TaskID.get(fam, lpoint_string)
                 nr = TaskID.get(fam, rpoint_string)
                 break
@@ -1823,8 +2049,6 @@ class config( object ):
             )
         back_comp_initial_tasks = list(start_up_tasks)
 
-        has_non_async_graphs = False
-
         section_seq_map = {}
 
         # Set up our backwards-compatibility handling of async graphs.
@@ -1846,7 +2070,6 @@ class config( object ):
         for item, value in self.cfg['scheduling']['dependencies'].items():
             if item == 'graph':
                 continue
-            has_non_async_graphs = True
             items.append((item, value, back_comp_initial_tasks))
 
         back_comp_initial_dep_points = {}
@@ -1983,6 +2206,17 @@ class config( object ):
             # ignore blank lines
             if not line:
                 continue
+            # Check for illegal double-char conditional operators.
+            m = re.search(r"(&&)|(\|\|)", line)
+            if m:
+                bad_opr = m.groups()[0]
+                if bad_opr == '&&':
+                    raise SuiteConfigError(
+                        "ERROR: the graph AND operator is '&': %s" % line)
+                else:
+                    raise SuiteConfigError(
+                        "ERROR: the graph OR operator is '|': %s" % line)
+
             # generate pygraphviz graph nodes and edges, and task definitions
             special_dependencies.extend(self.process_graph_line(
                 line, section, seq, offset_seq_map,
@@ -2009,9 +2243,12 @@ class config( object ):
         if name in self.cfg['scheduling']['special tasks']['cold-start']:
             taskd.is_coldstart = True
 
-        # Set clock-triggered tasks.
         if name in self.clock_offsets:
             taskd.clocktrigger_offset = self.clock_offsets[name]
+        if name in self.expiration_offsets:
+            taskd.expiration_offset = self.expiration_offsets[name]
+        if name in self.ext_triggers:
+            taskd.external_triggers.append(self.ext_triggers[name])
 
         taskd.sequential = (
             name in self.cfg['scheduling']['special tasks']['sequential'])
@@ -2022,14 +2259,6 @@ class config( object ):
 
         return taskd
 
-    def get_task_proxy(self, name, *args, **kwargs):
-        """Return a task proxy for a named task."""
-        try:
-            tdef = self.taskdefs[name]
-        except KeyError:
-            raise TaskNotDefinedError(name)
-        return TaskProxy(tdef, *args, **kwargs)
-
     def describe(self, name):
         """Return title and description of the named task."""
         return self.taskdefs[name].describe()
diff --git a/lib/cylc/cycling/__init__.py b/lib/cylc/cycling/__init__.py
index 47513be..e1d89a3 100644
--- a/lib/cylc/cycling/__init__.py
+++ b/lib/cylc/cycling/__init__.py
@@ -49,6 +49,17 @@ class IntervalParsingError(ValueError):
         return self.ERROR_MESSAGE.format(*self.args)
 
 
+class SequenceDegenerateError(Exception):
+
+    """An error raised when adjacent points on a sequence are equal."""
+
+    ERROR_MESSAGE = (
+        "Sequence {0}, point format {1}: equal adjacent points: {2} => {3}.")
+
+    def __str__(self):
+        return self.ERROR_MESSAGE.format(*self.args)
+
+
 class PointBase(object):
 
     """The base class for single points in a cycler sequence.
diff --git a/lib/cylc/cycling/integer.py b/lib/cylc/cycling/integer.py
index 65071b4..8f5dbe3 100755
--- a/lib/cylc/cycling/integer.py
+++ b/lib/cylc/cycling/integer.py
@@ -41,7 +41,7 @@ CYCLER_TYPE_SORT_KEY_INTEGER = "a"
 # can use absolute integer points such as "5" or "10". We also can't
 # extrapolate intervals from the date-time truncation information -
 # e.g. assuming 'T00/P1D' from 'T00'.
-# 
+#
 # We can also use relative point notation in a similar way to the
 # date-time offset notation. For example, we can write "5 after the
 # initial cycle point" as '+P5'.
@@ -68,7 +68,7 @@ RE_COMPONENTS = {
 
 RECURRENCE_FORMAT_RECS = [
     (re.compile(regex % RE_COMPONENTS), format_num)
-     for (regex, format_num) in [
+    for (regex, format_num) in [
         # START (not supported)
         # (r"^%(start)s$", 3),
         # Rn/START/END
@@ -99,7 +99,7 @@ RECURRENCE_FORMAT_RECS = [
         # e.g. R5/P2, R7/P1
         (r"^%(reps)s?/%(intv)s/?$", 4),
         # Rn//END (not supported)
-        #(r"^%(reps_1)s//%(end)s$", 4),
+        # (r"^%(reps_1)s//%(end)s$", 4),
         # R1, repeat once at INITIAL
         # e.g. R1, R1/
         (r"^%(reps_1)s/?(?P<start>$)", 3),
@@ -490,7 +490,7 @@ class IntegerSequence(SequenceBase):
             point = self.get_next_point(point)
         return point
 
-    def get_start_point( self ):
+    def get_start_point(self):
         """Return the first point in this sequence, or None."""
         return self.p_start
 
@@ -539,10 +539,10 @@ def get_point_from_expression(point_expr, context_point, is_required=False):
 def test():
     """Run some simple tests for integer cycling."""
     sequence = IntegerSequence('R/1/P3', 1, 10)
-    #sequence = IntegerSequence('R/c2/P2', 1, 10)
-    #sequence = IntegerSequence('R2/c2/P2', 1, 10)
-    #sequence = IntegerSequence('R2/c4/c6', 1, 10)
-    #sequence = IntegerSequence('R2/P2/c6', 1, 10)
+    # sequence = IntegerSequence('R/c2/P2', 1, 10)
+    # sequence = IntegerSequence('R2/c2/P2', 1, 10)
+    # sequence = IntegerSequence('R2/c4/c6', 1, 10)
+    # sequence = IntegerSequence('R2/P2/c6', 1, 10)
 
     sequence.set_offset(IntegerInterval('P4'))
 
diff --git a/lib/cylc/cycling/iso8601.py b/lib/cylc/cycling/iso8601.py
index 8559a75..9121bcb 100755
--- a/lib/cylc/cycling/iso8601.py
+++ b/lib/cylc/cycling/iso8601.py
@@ -28,7 +28,7 @@ from cylc.syntax_flags import set_syntax_version, VERSION_PREV, VERSION_NEW
 from cylc.time_parser import CylcTimeParser
 from cylc.cycling import (
     PointBase, IntervalBase, SequenceBase, PointParsingError,
-    IntervalParsingError)
+    IntervalParsingError, SequenceDegenerateError)
 from parsec.validate import IllegalValueError
 
 CYCLER_TYPE_ISO8601 = "iso8601"
@@ -389,6 +389,10 @@ class ISO8601Sequence(SequenceBase):
         prev_point = self.recurrence.get_prev(point_parse(point.value))
         if prev_point:
             res = ISO8601Point(str(prev_point))
+            if res == point:
+                raise SequenceDegenerateError(self.recurrence,
+                                              SuiteSpecifics.DUMP_FORMAT,
+                                              res, point)
         return res
 
     def get_nearest_prev_point(self, point):
@@ -404,7 +408,13 @@ class ISO8601Sequence(SequenceBase):
             prev_iso_point = recurrence_iso_point
         if prev_iso_point is None:
             return None
-        return ISO8601Point(str(prev_iso_point))
+        nearest_point = ISO8601Point(str(prev_iso_point))
+        if nearest_point == point:
+            raise SequenceDegenerateError(
+                self.recurrence, SuiteSpecifics.DUMP_FORMAT,
+                nearest_point, point
+            )
+        return nearest_point
 
     def get_next_point(self, point):
         """Return the next point > p, or None if out of bounds."""
@@ -420,7 +430,13 @@ class ISO8601Sequence(SequenceBase):
                         self._MAX_CACHED_POINTS):
                     self._cached_next_point_values.popitem()
                 self._cached_next_point_values[point.value] = next_point_value
-                return ISO8601Point(next_point_value)
+                next_point = ISO8601Point(next_point_value)
+                if next_point == point:
+                    raise SequenceDegenerateError(
+                        self.recurrence, SuiteSpecifics.DUMP_FORMAT,
+                        nearest_point, point
+                    )
+                return next_point
         return None
 
     def get_next_point_on_sequence(self, point):
@@ -430,6 +446,11 @@ class ISO8601Sequence(SequenceBase):
         next_point = self.recurrence.get_next(point_parse(point.value))
         if next_point:
             result = ISO8601Point(str(next_point))
+            if result == point:
+                raise SequenceDegenerateError(
+                    self.recurrence, SuiteSpecifics.DUMP_FORMAT,
+                    point, result
+                )
         return result
 
     def get_first_point(self, point):
@@ -450,7 +471,7 @@ class ISO8601Sequence(SequenceBase):
                 return ISO8601Point(first_point_value)
         return None
 
-    def get_start_point( self ):
+    def get_start_point(self):
         """Return the first point in this sequence, or None."""
         for recurrence_iso_point in self.recurrence:
             return ISO8601Point(str(recurrence_iso_point))
@@ -561,8 +582,8 @@ def init_from_cfg(cfg):
     while dep_sections:
         dep_section = dep_sections.pop(0)
         if re.search("(?![^(]+\)),", dep_section):
-            dep_sections.extend([i.strip() for i in
-                                    re.split("(?![^(]+\)),", dep_section)])
+            dep_sections.extend(
+                [i.strip() for i in re.split("(?![^(]+\)),", dep_section)])
             continue
         if dep_section == "graph":
             if cfg['scheduling']['dependencies']['graph']:
@@ -627,7 +648,6 @@ def init(num_expanded_year_digits=0, custom_dump_format=None, time_zone=None,
             SuiteSpecifics.DUMP_FORMAT = EXPANDED_DATE_TIME_FORMAT + time_zone
         else:
             SuiteSpecifics.DUMP_FORMAT = DATE_TIME_FORMAT + time_zone
-        
     else:
         SuiteSpecifics.DUMP_FORMAT = custom_dump_format
         if u"+X" not in custom_dump_format and num_expanded_year_digits:
@@ -667,8 +687,7 @@ def get_point_relative(offset_string, base_point):
     return ISO8601Point(str(
         SuiteSpecifics.abbrev_util.parse_timepoint(
             offset_string, context_point=_point_parse(base_point.value))
-        )
-    )
+    ))
 
 
 def interval_parse(interval_string):
diff --git a/lib/cylc/cylc_mode.py b/lib/cylc/cylc_mode.py
index 894fec9..9097d11 100644
--- a/lib/cylc/cylc_mode.py
+++ b/lib/cylc/cylc_mode.py
@@ -18,19 +18,19 @@
 
 import os
 
-class mode( object ):
-    def __init__( self ):
 
+class mode(object):
+    def __init__(self):
         self.mode = 'raw'
         if 'CYLC_MODE' in os.environ:
-            self.mode = os.environ[ 'CYLC_MODE' ]
+            self.mode = os.environ['CYLC_MODE']
             # 'scheduler' or 'submit'
 
-    def is_raw( self ):
+    def is_raw(self):
         return self.mode == 'raw'
 
-    def is_scheduler( self ):
+    def is_scheduler(self):
         return self.mode == 'scheduler'
 
-    def is_submit( self ):
+    def is_submit(self):
         return self.mode == 'submit'
diff --git a/lib/cylc/cylc_pyro_client.py b/lib/cylc/cylc_pyro_client.py
deleted file mode 100644
index d2d8cfb..0000000
--- a/lib/cylc/cylc_pyro_client.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-try:
-    import Pyro.core
-except ImportError, x:
-    raise SystemExit("ERROR: Pyro is not installed")
-
-import sys
-from optparse import OptionParser
-from suite_host import get_hostname
-from time import sleep
-from passphrase import passphrase
-from owner import user
-from port_file import port_retriever
-import flags
-
-class client( object ):
-    def __init__( self, suite, pphrase=None, owner=user, host=get_hostname(),
-            pyro_timeout=None, port=None ):
-        self.suite = suite
-        self.owner = owner
-        self.host = host
-        self.port = port
-        if pyro_timeout:
-            self.pyro_timeout = float(pyro_timeout)
-        else:
-            self.pyro_timeout = None
-        self.pphrase = pphrase
-
-    def get_proxy( self, target ):
-        if self.port:
-            if flags.verbose:
-                print "Port number given:", self.port
-        else:
-            self.port = port_retriever( self.suite, self.host, self.owner ).get()
-
-        # get a pyro proxy for the target object
-        objname = self.owner + '.' + self.suite + '.' + target
-
-        uri = 'PYROLOC://' + self.host + ':' + str(self.port) + '/' + objname
-        # callers need to check for Pyro.NamingError if target object not found:
-        proxy = Pyro.core.getProxyForURI(uri)
-
-        proxy._setTimeout(self.pyro_timeout)
-
-        if self.pphrase:
-            proxy._setIdentification( self.pphrase )
-
-        return proxy
diff --git a/lib/cylc/cylc_xdot.py b/lib/cylc/cylc_xdot.py
index 25b04ed..62eb31d 100644
--- a/lib/cylc/cylc_xdot.py
+++ b/lib/cylc/cylc_xdot.py
@@ -22,8 +22,9 @@ import subprocess
 import gtk
 import time
 import gobject
-import config
-import os, sys
+from cylc.config import SuiteConfig
+import os
+import sys
 import re
 from graphing import CGraphPlain
 from cylc.task_id import TaskID
@@ -33,6 +34,7 @@ Cylc-modified xdot windows for the "cylc graph" command.
 TODO - factor more commonality out of MyDotWindow, MyDotWindow2
 """
 
+
 class CylcDotViewerCommon(xdot.DotWindow):
     def load_config(self):
         if self.suiterc:
@@ -42,12 +44,13 @@ class CylcDotViewerCommon(xdot.DotWindow):
             is_reload = False
             collapsed = []
         try:
-            self.suiterc = config.config(self.suite, self.file,
-                    template_vars=self.template_vars,
-                    template_vars_file=self.template_vars_file,
-                    is_reload=is_reload, collapsed=collapsed,
-                    vis_start_string=self.start_point_string,
-                    vis_stop_string=self.stop_point_string)
+            self.suiterc = SuiteConfig(
+                self.suite, self.file,
+                template_vars=self.template_vars,
+                template_vars_file=self.template_vars_file,
+                is_reload=is_reload, collapsed=collapsed,
+                vis_start_string=self.start_point_string,
+                vis_stop_string=self.stop_point_string)
         except Exception, x:
             print >> sys.stderr, "Failed - parsing error?"
             print >> sys.stderr, x
@@ -72,7 +75,7 @@ class CylcDotViewerCommon(xdot.DotWindow):
 
 
 class MyDotWindow2(CylcDotViewerCommon):
-    """Override xdot to get rid of some buttons and parse graph from suite.rc"""
+    """Override xdot to get rid of some buttons + parse graph from suite.rc"""
     # used by "cylc graph" to plot runtime namespace graphs
 
     ui = '''
@@ -90,9 +93,9 @@ class MyDotWindow2(CylcDotViewerCommon):
         </toolbar>
     </ui>
     '''
+
     def __init__(self, suite, suiterc, template_vars,
-            template_vars_file, orientation="TB",
-            should_hide=False):
+                 template_vars_file, orientation="TB", should_hide=False):
         self.outfile = None
         self.disable_output_image = False
         self.suite = suite
@@ -115,7 +118,7 @@ class MyDotWindow2(CylcDotViewerCommon):
 
         window.set_title('Cylc Suite Runtime Inheritance Graph Viewer')
         window.set_default_size(512, 512)
-        window.set_icon( util.get_icon() )
+        window.set_icon(util.get_icon())
 
         vbox = gtk.VBox()
         window.add(vbox)
@@ -173,17 +176,17 @@ class MyDotWindow2(CylcDotViewerCommon):
             self.show_all()
         self.load_config()
 
-    def get_graph( self ):
+    def get_graph(self):
         title = self.suite + ': runtime inheritance graph'
-        graph = CGraphPlain( title )
+        graph = CGraphPlain(title)
         graph.graph_attr['rankdir'] = self.orientation
         for ns in self.inherit:
             for p in self.inherit[ns]:
                 attr = {}
                 attr['color'] = 'royalblue'
-                graph.add_edge( p, ns, **attr )
-                nl = graph.get_node( p )
-                nr = graph.get_node( ns )
+                graph.add_edge(p, ns, **attr)
+                nl = graph.get_node(p)
+                nr = graph.get_node(ns)
                 for n in nl, nr:
                     n.attr['shape'] = 'box'
                     n.attr['style'] = 'filled'
@@ -192,15 +195,15 @@ class MyDotWindow2(CylcDotViewerCommon):
 
         self.graph = graph
         self.filter_graph()
-        self.set_dotcode( graph.string() )
+        self.set_dotcode(graph.string())
 
-    def on_left_to_right( self, toolitem ):
+    def on_left_to_right(self, toolitem):
         if toolitem.get_active():
-            self.set_orientation( "LR" )  # Left to right ordering of nodes
+            self.set_orientation("LR")  # Left to right ordering of nodes
         else:
-            self.set_orientation( "TB" )  # Top to bottom (default) ordering
+            self.set_orientation("TB")  # Top to bottom (default) ordering
 
-    def save_action( self, toolitem ):
+    def save_action(self, toolitem):
         chooser = gtk.FileChooserDialog(title="Save Graph",
                                         action=gtk.FILE_CHOOSER_ACTION_SAVE,
                                         buttons=(gtk.STOCK_CANCEL,
@@ -215,7 +218,7 @@ class MyDotWindow2(CylcDotViewerCommon):
             self.outfile = chooser.get_filename()
             if self.outfile:
                 try:
-                    self.graph.draw( self.outfile, prog='dot' )
+                    self.graph.draw(self.outfile, prog='dot')
                 except IOError, x:
                     msg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
                                             buttons=gtk.BUTTONS_OK,
@@ -226,7 +229,7 @@ class MyDotWindow2(CylcDotViewerCommon):
         else:
             chooser.destroy()
 
-    def set_orientation( self, orientation="TB" ):
+    def set_orientation(self, orientation="TB"):
         """Set the orientation of the graph node ordering."""
         if orientation == self.orientation:
             return False
@@ -239,8 +242,8 @@ class MyDotWindow2(CylcDotViewerCommon):
         return True
 
 
-class MyDotWindow( CylcDotViewerCommon ):
-    """Override xdot to get rid of some buttons and parse graph from suite.rc"""
+class MyDotWindow(CylcDotViewerCommon):
+    """Override xdot to get rid of some buttons + parse graph from suite.rc"""
     # used by "cylc graph" to plot dependency graphs
 
     ui = '''
@@ -262,9 +265,10 @@ class MyDotWindow( CylcDotViewerCommon ):
         </toolbar>
     </ui>
     '''
+
     def __init__(self, suite, suiterc, start_point_string, stop_point_string,
-            template_vars, template_vars_file, orientation="TB",
-            subgraphs_on=False, should_hide=False):
+                 template_vars, template_vars_file, orientation="TB",
+                 subgraphs_on=False, ignore_suicide=True, should_hide=False):
         self.outfile = None
         self.disable_output_image = False
         self.suite = suite
@@ -274,7 +278,7 @@ class MyDotWindow( CylcDotViewerCommon ):
         self.subgraphs_on = subgraphs_on
         self.template_vars = template_vars
         self.template_vars_file = template_vars_file
-        self.ignore_suicide = False
+        self.ignore_suicide = ignore_suicide
         self.start_point_string = start_point_string
         self.stop_point_string = stop_point_string
         self.filter_recs = []
@@ -289,7 +293,7 @@ class MyDotWindow( CylcDotViewerCommon ):
 
         window.set_title('Cylc Suite Dependency Graph Viewer')
         window.set_default_size(512, 512)
-        window.set_icon( util.get_icon() )
+        window.set_icon(util.get_icon())
         vbox = gtk.VBox()
         window.add(vbox)
 
@@ -307,12 +311,12 @@ class MyDotWindow( CylcDotViewerCommon ):
         self.actiongroup = actiongroup
 
         # create new stock icons for group and ungroup actions
-        imagedir = os.environ[ 'CYLC_DIR' ] + '/images/icons'
+        imagedir = os.environ['CYLC_DIR'] + '/images/icons'
         factory = gtk.IconFactory()
-        for i in [ 'group', 'ungroup' ]:
-            pixbuf = gtk.gdk.pixbuf_new_from_file( imagedir + '/' + i + '.png' )
+        for i in ['group', 'ungroup']:
+            pixbuf = gtk.gdk.pixbuf_new_from_file(imagedir + '/' + i + '.png')
             iconset = gtk.IconSet(pixbuf)
-            factory.add( i, iconset )
+            factory.add(i, iconset)
         factory.add_default()
 
         actiongroup.add_actions((
@@ -358,6 +362,10 @@ class MyDotWindow( CylcDotViewerCommon ):
             '/ToolBar/Subgraphs')
         subgraphs_toolitem.set_active(self.subgraphs_on)
 
+        igsui_toolitem = uimanager.get_widget(
+            '/ToolBar/IgnoreSuicide')
+        igsui_toolitem.set_active(self.ignore_suicide)
+
         # Create a Toolbar
 
         toolbar = uimanager.get_widget('/ToolBar')
@@ -365,9 +373,9 @@ class MyDotWindow( CylcDotViewerCommon ):
         vbox.pack_start(self.widget)
 
         eb = gtk.EventBox()
-        eb.add( gtk.Label( "right-click on nodes to control family grouping" ) )
-        eb.modify_bg( gtk.STATE_NORMAL, gtk.gdk.color_parse( '#8be' ) )
-        vbox.pack_start( eb, False )
+        eb.add(gtk.Label("right-click on nodes to control family grouping"))
+        eb.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse('#8be'))
+        vbox.pack_start(eb, False)
 
         self.set_focus(self.widget)
 
@@ -375,26 +383,28 @@ class MyDotWindow( CylcDotViewerCommon ):
             self.show_all()
         self.load_config()
 
-    def group_all( self, w ):
-        self.get_graph( group_all=True )
+    def group_all(self, w):
+        self.get_graph(group_all=True)
 
-    def ungroup_all( self, w ):
-        self.get_graph( ungroup_all=True )
+    def ungroup_all(self, w):
+        self.get_graph(ungroup_all=True)
 
-    def get_graph( self, group_nodes=[], ungroup_nodes=[],
-            ungroup_recursive=False, ungroup_all=False, group_all=False ):
+    def get_graph(self, group_nodes=[], ungroup_nodes=[],
+                  ungroup_recursive=False, ungroup_all=False, group_all=False):
+        if not self.suiterc:
+            return
         family_nodes = self.suiterc.get_first_parent_descendants().keys()
         graphed_family_nodes = self.suiterc.triggering_families
         suite_polling_tasks = self.suiterc.suite_polling_tasks
         # Note this is used by "cylc graph" but not gcylc.
         # self.start_ and self.stop_point_string come from CLI.
         graph = self.suiterc.get_graph(
-                group_nodes=group_nodes,
-                ungroup_nodes=ungroup_nodes,
-                ungroup_recursive=ungroup_recursive,
-                group_all=group_all, ungroup_all=ungroup_all,
-                ignore_suicide=self.ignore_suicide,
-                subgraphs_on=self.subgraphs_on )
+            group_nodes=group_nodes,
+            ungroup_nodes=ungroup_nodes,
+            ungroup_recursive=ungroup_recursive,
+            group_all=group_all, ungroup_all=ungroup_all,
+            ignore_suicide=self.ignore_suicide,
+            subgraphs_on=self.subgraphs_on)
 
         graph.graph_attr['rankdir'] = self.orientation
 
@@ -408,23 +418,23 @@ class MyDotWindow( CylcDotViewerCommon ):
 
         self.graph = graph
         self.filter_graph()
-        self.set_dotcode( graph.string() )
+        self.set_dotcode(graph.string())
 
-    def on_left_to_right( self, toolitem ):
+    def on_left_to_right(self, toolitem):
         if toolitem.get_active():
-            self.set_orientation( "LR" )  # Left to right ordering of nodes
+            self.set_orientation("LR")  # Left to right ordering of nodes
         else:
-            self.set_orientation( "TB" )  # Top to bottom (default) ordering
+            self.set_orientation("TB")  # Top to bottom (default) ordering
 
-    def on_subgraphs( self, toolitem ):
+    def on_subgraphs(self, toolitem):
         self.subgraphs_on = toolitem.get_active()
         self.get_graph()
- 
-    def on_igsui( self, toolitem ):
+
+    def on_igsui(self, toolitem):
         self.ignore_suicide = toolitem.get_active()
         self.get_graph()
 
-    def save_action( self, toolitem ):
+    def save_action(self, toolitem):
         chooser = gtk.FileChooserDialog(title="Save Graph",
                                         action=gtk.FILE_CHOOSER_ACTION_SAVE,
                                         buttons=(gtk.STOCK_CANCEL,
@@ -439,7 +449,7 @@ class MyDotWindow( CylcDotViewerCommon ):
             self.outfile = chooser.get_filename()
             if self.outfile:
                 try:
-                    self.graph.draw( self.outfile, prog='dot' )
+                    self.graph.draw(self.outfile, prog='dot')
                 except IOError, x:
                     msg = gtk.MessageDialog(type=gtk.MESSAGE_ERROR,
                                             buttons=gtk.BUTTONS_OK,
@@ -450,7 +460,7 @@ class MyDotWindow( CylcDotViewerCommon ):
         else:
             chooser.destroy()
 
-    def set_orientation( self, orientation="TB" ):
+    def set_orientation(self, orientation="TB"):
         """Set the orientation of the graph node ordering."""
         if orientation == self.orientation:
             return False
@@ -483,33 +493,33 @@ class xdot_widgets(object):
 
         self.widget = DotTipWidget()
 
-        zoomin_button = gtk.Button( stock=gtk.STOCK_ZOOM_IN )
+        zoomin_button = gtk.Button(stock=gtk.STOCK_ZOOM_IN)
         zoomin_button.connect('clicked', self.widget.on_zoom_in)
-        zoomout_button = gtk.Button( stock=gtk.STOCK_ZOOM_OUT )
+        zoomout_button = gtk.Button(stock=gtk.STOCK_ZOOM_OUT)
         zoomout_button.connect('clicked', self.widget.on_zoom_out)
-        zoomfit_button = gtk.Button( stock=gtk.STOCK_ZOOM_FIT )
+        zoomfit_button = gtk.Button(stock=gtk.STOCK_ZOOM_FIT)
         zoomfit_button.connect('clicked', self.widget.on_zoom_fit)
-        zoom100_button = gtk.Button( stock=gtk.STOCK_ZOOM_100 )
+        zoom100_button = gtk.Button(stock=gtk.STOCK_ZOOM_100)
         zoom100_button.connect('clicked', self.widget.on_zoom_100)
 
-        self.graph_disconnect_button = gtk.ToggleButton( '_DISconnect' )
+        self.graph_disconnect_button = gtk.ToggleButton('_DISconnect')
         self.graph_disconnect_button.set_active(False)
-        self.graph_update_button = gtk.Button( '_Update' )
+        self.graph_update_button = gtk.Button('_Update')
         self.graph_update_button.set_sensitive(False)
 
         bbox = gtk.HButtonBox()
-        bbox.add( zoomin_button )
-        bbox.add( zoomout_button )
-        bbox.add( zoomfit_button )
-        bbox.add( zoom100_button )
-        bbox.add( self.graph_disconnect_button )
-        bbox.add( self.graph_update_button )
+        bbox.add(zoomin_button)
+        bbox.add(zoomout_button)
+        bbox.add(zoomfit_button)
+        bbox.add(zoom100_button)
+        bbox.add(self.graph_disconnect_button)
+        bbox.add(self.graph_update_button)
         bbox.set_layout(gtk.BUTTONBOX_SPREAD)
 
         self.vbox.pack_start(self.widget)
         self.vbox.pack_start(bbox, False)
 
-    def get( self ):
+    def get(self):
         return self.vbox
 
     def set_filter(self, filter):
@@ -520,18 +530,18 @@ class xdot_widgets(object):
             old_zoom_func = self.widget.zoom_image
             self.widget.zoom_image = lambda *a, **b: self.widget.queue_draw()
         if self.widget.set_dotcode(dotcode, filename):
-            #self.set_title(os.path.basename(filename) + ' - Dot Viewer')
+            # self.set_title(os.path.basename(filename) + ' - Dot Viewer')
             # disable automatic zoom-to-fit on update
-            #self.widget.zoom_to_fit()
+            # self.widget.zoom_to_fit()
             pass
         if no_zoom:
             self.widget.zoom_image = old_zoom_func
 
     def set_xdotcode(self, xdotcode, filename='<stdin>'):
         if self.widget.set_xdotcode(xdotcode):
-            #self.set_title(os.path.basename(filename) + ' - Dot Viewer')
+            # self.set_title(os.path.basename(filename) + ' - Dot Viewer')
             # disable automatic zoom-to-fit on update
-            #self.widget.zoom_to_fit()
+            # self.widget.zoom_to_fit()
             pass
 
     def on_reload(self, action):
diff --git a/lib/cylc/daemonize.py b/lib/cylc/daemonize.py
index faa36d4..8421a23 100644
--- a/lib/cylc/daemonize.py
+++ b/lib/cylc/daemonize.py
@@ -1,9 +1,11 @@
 #!/usr/bin/env python
 
-import os, sys
+import os
+import sys
 from suite_output import suite_output
 
-def daemonize( suite, port ):
+
+def daemonize(suite, port):
     """
     ATTRIBUTION: base on a public domain code recipe by Jurgen Hermann:
     http://code.activestate.com/recipes/66012-fork-a-daemon-process-on-unix/
@@ -12,7 +14,7 @@ def daemonize( suite, port ):
     # Do the UNIX double-fork magic, see Stevens' "Advanced
     # Programming in the UNIX Environment" for details (ISBN 0201563177)
 
-    sout = suite_output( suite )
+    sout = suite_output(suite)
 
     try:
         pid = os.fork()
@@ -37,7 +39,8 @@ def daemonize( suite, port ):
             print " + Name:", suite
             print " + PID: ", pid
             print " + Port:", port
-            print " + Logs: %s/{log,out,err}" % os.path.dirname( sout.get_path() )
+            print " + Logs: %s/{log,out,err}" % (
+                os.path.dirname(sout.get_path()))
             print
             print "To see if this suite is still running:"
             print " * cylc scan"
@@ -53,7 +56,7 @@ def daemonize( suite, port ):
         sys.exit(1)
 
     # reset umask
-    os.umask(022) # octal
+    os.umask(022)  # octal
 
     # redirect output to the suite log files
     sout.redirect()
diff --git a/lib/cylc/dbstatecheck.py b/lib/cylc/dbstatecheck.py
index 18a3f67..5fa34a1 100644
--- a/lib/cylc/dbstatecheck.py
+++ b/lib/cylc/dbstatecheck.py
@@ -23,7 +23,7 @@ import sys
 
 class DBOperationError(Exception):
 
-    """An exception raised when a db operation fails, typically due to a lock."""
+    """An exception raised on db operation failure, typically due to a lock."""
 
     def __str__(self):
         return "Suite database operation failed: %s" % self.args
@@ -31,7 +31,7 @@ class DBOperationError(Exception):
 
 class DBNotFoundError(Exception):
 
-    """An exception raised when a suite is already running."""
+    """An exception raised when a suite database is not found."""
 
     def __str__(self):
         return "Suite database not found at: %s" % self.args
@@ -41,17 +41,21 @@ class CylcSuiteDBChecker(object):
     """Object for querying a suite database"""
     DB_FILE_BASE_NAME = "cylc-suite.db"
     STATE_ALIASES = {}
-    STATE_ALIASES['finish' ] = ['failed', 'succeeded']
-    STATE_ALIASES['start'  ] = ['running', 'succeeded', 'failed', 'retrying']
-    STATE_ALIASES['submit' ] = ['submitted', 'submit-retrying', 'running','succeeded','failed','retrying']
-    STATE_ALIASES['fail'   ] = ['failed']
+    STATE_ALIASES['finish'] = ['failed', 'succeeded']
+    STATE_ALIASES['start'] = ['running', 'succeeded', 'failed', 'retrying']
+    STATE_ALIASES['submit'] = [
+        'submitted', 'submit-retrying', 'running', 'succeeded', 'failed',
+        'retrying']
+    STATE_ALIASES['fail'] = ['failed']
     STATE_ALIASES['succeed'] = ['succeeded']
 
-    def __init__(self, suite_dir, suite, dbname=None): # possible to set suite_dir to system default cylc-run dir?
+    def __init__(self, suite_dir, suite, dbname=None):
+        # possible to set suite_dir to system default cylc-run dir?
         suite_dir = os.path.expanduser(suite_dir)
         if dbname is not None:
             self.DB_FILE_BASE_NAME = dbname
-        self.db_address = suite_dir + "/" + suite + "/" + self.DB_FILE_BASE_NAME
+        self.db_address = (
+            suite_dir + "/" + suite + "/" + self.DB_FILE_BASE_NAME)
         if not os.path.exists(self.db_address):
             raise DBNotFoundError(self.db_address)
         self.conn = sqlite3.connect(self.db_address, timeout=10.0)
@@ -64,8 +68,9 @@ class CylcSuiteDBChecker(object):
             for row in res:
                 sys.stdout.write((", ").join(row).encode("utf-8") + "\n")
 
-    def state_lookup(self, state): #allows for multiple states to be searched via a status alias
-        if self.STATE_ALIASES.has_key(state):
+    def state_lookup(self, state):
+        """allows for multiple states to be searched via a status alias"""
+        if state in self.STATE_ALIASES:
             return self.STATE_ALIASES[state]
         else:
             return state
@@ -101,7 +106,7 @@ class CylcSuiteDBChecker(object):
             q = q_base
 
         try:
-            self.c.execute(q,vals)
+            self.c.execute(q, vals)
             next = self.c.fetchmany()
             while next:
                 res.append(next[0])
@@ -125,7 +130,7 @@ class CylcSuiteDBChecker(object):
         return len(res) > 0
 
     def validate_mask(self, mask):
-        fieldnames = ["name", "status", "cycle"] # extract from rundb.py?
+        fieldnames = ["name", "status", "cycle"]  # extract from rundb.py?
         for term in mask.split(","):
             if term.strip(" ") not in fieldnames:
                 return False
diff --git a/lib/cylc/dump.py b/lib/cylc/dump.py
index 90c131b..8fa54d6 100644
--- a/lib/cylc/dump.py
+++ b/lib/cylc/dump.py
@@ -30,12 +30,14 @@ def get_stop_state(suite, owner=None, host=None):
         return None
     command = "cylc cat-state"
     if host:
-        command += " --host=" +host
+        command += " --host=" + host
     if owner:
         command += " --user=" + owner
     command += " " + suite
     try:
-        p = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
+        p = subprocess.Popen(
+            command, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
+            shell=True)
         stdout, stderr = p.communicate()
     except:
         return None
@@ -45,7 +47,7 @@ def get_stop_state(suite, owner=None, host=None):
         return None
 
 
-def get_stop_state_summary(suite, owner=None, hostname=None, lines=None ):
+def get_stop_state_summary(suite, owner=None, hostname=None, lines=None):
     """Load the contents of the last 'state' file into summary maps."""
     global_summary = {}
     task_summary = {}
@@ -61,8 +63,8 @@ def get_stop_state_summary(suite, owner=None, hostname=None, lines=None ):
         if line.startswith('Remote command'):
             lines.remove(line)
     line0 = lines.pop(0)
-    if line0.startswith( 'suite time' ) or \
-            line0.startswith( 'simulation time' ):
+    if line0.startswith('suite time') or \
+            line0.startswith('simulation time'):
         # backward compatibility with pre-5.4.11 state dumps
         global_summary["last_updated"] = time.time()
     else:
@@ -77,11 +79,9 @@ def get_stop_state_summary(suite, owner=None, hostname=None, lines=None ):
         except (TypeError, ValueError, IndexError):
             # back compat pre cylc-6
             global_summary["last_updated"] = time.time()
-  
+
     start = lines.pop(0).rstrip().rsplit(None, 1)[-1]
     stop = lines.pop(0).rstrip().rsplit(None, 1)[-1]
-    if start != "(none)":
-        global_summary["start time"] = start
     if stop != "(none)":
         global_summary["will_stop_at"] = stop
     while lines:
@@ -89,7 +89,7 @@ def get_stop_state_summary(suite, owner=None, hostname=None, lines=None ):
         if line.startswith("class") or line.startswith("Begin task"):
             continue
         try:
-            ( task_id, info ) = line.split(' : ')
+            (task_id, info) = line.split(' : ')
             name, point_string = TaskID.split(task_id)
         except ValueError:
             continue
@@ -104,25 +104,26 @@ def get_stop_state_summary(suite, owner=None, hostname=None, lines=None ):
         if state == 'submitting':
             # backward compabitility for state dumps generated prior to #787
             state = 'ready'
-        task_summary[task_id].update({"state": state })
+        task_summary[task_id].update({"state": state})
         task_summary[task_id].update({"spawned": items.get("spawned")})
     global_summary["run_mode"] = "dead"
     for key in ["paused", "stopping", "will_pause_at", "will_stop_at"]:
         global_summary.setdefault(key, "")
     return global_summary, task_summary, family_summary
 
-def dump_to_stdout( states, sort_by_cycle=False ):
+
+def dump_to_stdout(states, sort_by_cycle=False):
     lines = []
-    #print 'TASK INFORMATION'
+    # print 'TASK INFORMATION'
     task_ids = states.keys()
-    #task_ids.sort()
+    # task_ids.sort()
 
-    for id in task_ids:
-        name  = states[ id ][ 'name' ]
-        label = states[ id ][ 'label' ]
-        state = states[ id ][ 'state' ]
+    for task_id in task_ids:
+        name = states[task_id]['name']
+        label = states[task_id]['label']
+        state = states[task_id]['state']
 
-        if states[ id ][ 'spawned' ]:
+        if states[task_id]['spawned']:
             spawned = 'spawned'
         else:
             spawned = 'unspawned'
@@ -134,7 +135,7 @@ def dump_to_stdout( states, sort_by_cycle=False ):
 
         line += state + ', ' + spawned
 
-        lines.append( line )
+        lines.append(line)
 
     lines.sort()
     for line in lines:
diff --git a/lib/cylc/envvar.py b/lib/cylc/envvar.py
index 48b4872..be05cc7 100644
--- a/lib/cylc/envvar.py
+++ b/lib/cylc/envvar.py
@@ -18,30 +18,35 @@
 
 """ environment variable utility functions """
 
-import os, re, sys
+import os
+import re
 
-class EnvVarError( Exception ):
-    def __init__( self, msg ):
+
+class EnvVarError(Exception):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
-def check_varnames( env ):
+
+def check_varnames(env):
     """ check a bunch of putative environment names for legality,
     returns a list of bad names (empty implies success)."""
     bad = []
     for varname in env:
-        if not re.match( '^[a-zA-Z_][\w]*$', varname ):
+        if not re.match('^[a-zA-Z_][\w]*$', varname):
             bad.append(varname)
     return bad
 
-def expandvars( item, owner=None ):
+
+def expandvars(item, owner=None):
     if owner:
-        homedir = os.path.expanduser( '~' + owner )
+        homedir = os.path.expanduser('~' + owner)
     else:
-        homedir = os.environ[ 'HOME' ]
+        homedir = os.environ['HOME']
     # first replace '$HOME' with actual home dir
-    item = item.replace( '$HOME', homedir )
+    item = item.replace('$HOME', homedir)
     # now expand any other environment variable or tilde-username
-    item = os.path.expandvars( os.path.expanduser( item ))
+    item = os.path.expandvars(os.path.expanduser(item))
     return item
diff --git a/lib/cylc/exceptions.py b/lib/cylc/exceptions.py
index 8c14d4c..a0da579 100644
--- a/lib/cylc/exceptions.py
+++ b/lib/cylc/exceptions.py
@@ -16,14 +16,18 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-class SchedulerStop( Exception ):
-    def __init__( self, msg ):
+
+class SchedulerStop(Exception):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
-class SchedulerError( Exception ):
-    def __init__( self, msg ):
+
+class SchedulerError(Exception):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
diff --git a/lib/cylc/execute.py b/lib/cylc/execute.py
index 2b31ae3..1b0b594 100644
--- a/lib/cylc/execute.py
+++ b/lib/cylc/execute.py
@@ -16,29 +16,34 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import sys, subprocess
+import sys
+import subprocess
 
 # subprocess.call() - if shell=True, command is string, not list.
 
-def execute( command_list, ignore_output=False, notify=False ):
+
+def execute(command_list, ignore_output=False, notify=False):
     try:
         if ignore_output:
             # THIS BLOCKS UNTIL THE COMMAND COMPLETES
-            retcode = subprocess.call( command_list, stdout=open('/dev/null', 'w'), \
-                stderr=subprocess.STDOUT )
+            retcode = subprocess.call(
+                command_list,
+                stdout=open('/dev/null', 'w'),
+                stderr=subprocess.STDOUT)
         else:
             # THIS BLOCKS UNTIL THE COMMAND COMPLETES
-            retcode = subprocess.call( command_list )
+            retcode = subprocess.call(command_list)
         if retcode != 0:
             # the command returned non-zero exist status
-            print >> sys.stderr, ' '.join( command_list ), ' failed: ', retcode
+            print >> sys.stderr, ' '.join(command_list), ' failed: ', retcode
             sys.exit(1)
         else:
             if notify:
-                #print ' '.join( command_list ), ' succeeded'
+                # print ' '.join(command_list), ' succeeded'
                 print 'DONE'
             sys.exit(0)
     except OSError:
         # the command was not invoked
-        print >> sys.stderr, 'ERROR: unable to execute ', ' '.join(command_list)
+        print >> sys.stderr, (
+            'ERROR: unable to execute ', ' '.join(command_list))
         sys.exit(1)
diff --git a/lib/cylc/owner.py b/lib/cylc/get_task_proxy.py
similarity index 66%
copy from lib/cylc/owner.py
copy to lib/cylc/get_task_proxy.py
index dda7737..98b5bf6 100644
--- a/lib/cylc/owner.py
+++ b/lib/cylc/get_task_proxy.py
@@ -16,15 +16,15 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-"""In analogy with cylc.hostname.is_remote_host(), determine if a
-username is "remote"."""
+from cylc.task_proxy import TaskProxy
+from cylc.config import SuiteConfig, TaskNotDefinedError
 
-import os, pwd
 
-user = os.environ.get( 'USER', pwd.getpwuid(os.getuid()).pw_name )
-
-def is_remote_user(name):
-    """Return True if name is different than the current username.
-    Return False if name is None.
-    """
-    return name and name != user
+def get_task_proxy(name, *args, **kwargs):
+    config = SuiteConfig.get_inst()
+    """Return a task proxy for a named task."""
+    try:
+        tdef = config.taskdefs[name]
+    except KeyError:
+        raise TaskNotDefinedError(name)
+    return TaskProxy(tdef, *args, **kwargs)
diff --git a/lib/cylc/graphing.py b/lib/cylc/graphing.py
index 6fddb5b..452bcd1 100644
--- a/lib/cylc/graphing.py
+++ b/lib/cylc/graphing.py
@@ -27,18 +27,19 @@ from graphnode import graphnode
 
 # TODO: Do we still need autoURL below?
 
-class CGraphPlain( pygraphviz.AGraph ):
+
+class CGraphPlain(pygraphviz.AGraph):
     """Directed Acyclic Graph class for cylc dependency graphs."""
 
-    def __init__( self, title, suite_polling_tasks={} ):
+    def __init__(self, title, suite_polling_tasks={}):
         self.title = title
-        pygraphviz.AGraph.__init__( self, directed=True )
+        pygraphviz.AGraph.__init__(self, directed=True, strict=True)
         # graph attributes
         # - label (suite name)
         self.graph_attr['label'] = title
         self.suite_polling_tasks = suite_polling_tasks
 
-    def node_attr_by_taskname( self, node_string ):
+    def node_attr_by_taskname(self, node_string):
         try:
             name, point_string = TaskID.split(node_string)
         except ValueError:
@@ -51,11 +52,11 @@ class CGraphPlain( pygraphviz.AGraph ):
         else:
             return []
 
-    def style_edge( self, left, right ):
+    def style_edge(self, left, right):
         pass
 
-    def style_node( self, node_string, autoURL, base=False ):
-        node = self.get_node( node_string )
+    def style_node(self, node_string, autoURL, base=False):
+        node = self.get_node(node_string)
         try:
             name, point_string = TaskID.split(node_string)
         except ValueError:
@@ -69,35 +70,31 @@ class CGraphPlain( pygraphviz.AGraph ):
         if name in self.suite_polling_tasks:
             label += "\\n" + self.suite_polling_tasks[name][3]
         label += "\\n" + point_string
-        node.attr[ 'label' ] = label
+        node.attr['label'] = label
         if autoURL:
             if base:
                 # TODO - This is only called from cylc_add_edge in this
                 # base class ... should it also be called from add_node?
-                node.attr[ 'URL' ] = 'base:' + node_string
+                node.attr['URL'] = 'base:' + node_string
             else:
                 node.attr['URL'] = node_string
 
-    def cylc_add_node( self, node_string, autoURL, **attr ):
-        pygraphviz.AGraph.add_node( self, node_string, **attr )
-        self.style_node( node_string, autoURL )
+    def cylc_add_node(self, node_string, autoURL, **attr):
+        pygraphviz.AGraph.add_node(self, node_string, **attr)
+        self.style_node(node_string, autoURL)
 
-    def cylc_add_edge( self, left, right, autoURL, **attr ):
-        if left == None and right == None:
+    def cylc_add_edge(self, left, right, autoURL, **attr):
+        if left is None and right is None:
             pass
-        elif left == None:
-            self.cylc_add_node( right, autoURL )
-        elif right == None:
-            self.cylc_add_node( left, autoURL )
-        elif left == right:
-            # pygraphviz 1.1 adds a node instead of a self-edge
-            # which results in a KeyError in get_edge() below.
-            self.cylc_add_node( left, autoURL )
+        elif left is None:
+            self.cylc_add_node(right, autoURL)
+        elif right is None:
+            self.cylc_add_node(left, autoURL)
         else:
-            pygraphviz.AGraph.add_edge( self, left, right, **attr )
-            self.style_node( left, autoURL, base=True )
-            self.style_node( right, autoURL, base=True )
-            self.style_edge( left, right )
+            pygraphviz.AGraph.add_edge(self, left, right, **attr)
+            self.style_node(left, autoURL, base=True)
+            self.style_node(right, autoURL, base=True)
+            self.style_edge(left, right)
 
     def cylc_remove_nodes_from(self, nodes):
         """Remove nodes, returning extra edge structure if possible.
@@ -129,7 +126,7 @@ class CGraphPlain( pygraphviz.AGraph ):
                     outgoing_remove_edges.append((l_node, r_node))
             elif r_node in remove_nodes:
                 incoming_remove_edges.append((l_node, r_node))
-        
+
         if not outgoing_remove_edges:
             # Preserving edges doesn't matter - ditch this whole set.
             self.remove_nodes_from(nodes)
@@ -226,8 +223,8 @@ class CGraphPlain( pygraphviz.AGraph ):
         self.remove_nodes_from(nodes)
         self.add_edges(list(new_edges))
 
-    def add_edges( self, edges, ignore_suicide=False ):
-        edges.sort() # TODO: does sorting help layout stability?
+    def add_edges(self, edges, ignore_suicide=False):
+        edges.sort()  # TODO: does sorting help layout stability?
         for edge in edges:
             left, right, skipped, suicide, conditional = edge
             if suicide and ignore_suicide:
@@ -258,7 +255,7 @@ class CGraphPlain( pygraphviz.AGraph ):
                 left, right, True, **attrs
             )
 
-    def add_cycle_point_subgraphs( self, edges ):
+    def add_cycle_point_subgraphs(self, edges):
         """Draw nodes within cycle point groups (subgraphs)."""
         point_string_id_map = {}
         for edge_entry in edges:
@@ -300,33 +297,29 @@ class CGraphPlain( pygraphviz.AGraph ):
         nodes = self.prepare_nbunch(nbunch)
         subgraph.add_nodes_from(nodes)
 
-        for left, right in self.edges():
-            if left in subgraph and right in subgraph: 
-                subgraph.add_edge(left, right)
-
         return subgraph
 
 
-class CGraph( CGraphPlain ):
+class CGraph(CGraphPlain):
     """Directed Acyclic Graph class for cylc dependency graphs.
     This class automatically adds node and edge attributes
     according to the suite.rc file visualization config."""
 
-    def __init__( self, title, suite_polling_tasks={}, vizconfig={} ):
+    def __init__(self, title, suite_polling_tasks={}, vizconfig={}):
 
         # suite.rc visualization config section
         self.vizconfig = vizconfig
-        CGraphPlain.__init__( self, title, suite_polling_tasks )
+        CGraphPlain.__init__(self, title, suite_polling_tasks)
 
         # graph attributes
         # - default node attributes
         for item in vizconfig['default node attributes']:
-            attr, value = re.split( '\s*=\s*', item )
-            self.node_attr[ attr ] = value
+            attr, value = re.split('\s*=\s*', item)
+            self.node_attr[attr] = value
         # - default edge attributes
         for item in vizconfig['default edge attributes']:
-            attr, value = re.split( '\s*=\s*', item )
-            self.edge_attr[ attr ] = value
+            attr, value = re.split('\s*=\s*', item)
+            self.edge_attr[attr] = value
 
         # non-default node attributes by task name
         # TODO - ERROR CHECKING FOR INVALID TASK NAME
@@ -340,26 +333,26 @@ class CGraph( CGraphPlain ):
                     for attr in self.vizconfig['node attributes'][item]:
                         if task not in self.task_attr:
                             self.task_attr[task] = []
-                        self.task_attr[task].append( attr )
+                        self.task_attr[task].append(attr)
             else:
                 # item must be a task name
                 for attr in self.vizconfig['node attributes'][item]:
                     if item not in self.task_attr:
                         self.task_attr[item] = []
-                    self.task_attr[item].append( attr )
+                    self.task_attr[item].append(attr)
 
-    def style_node( self, node_string, autoURL, base=False ):
-        super( self.__class__, self ).style_node(
+    def style_node(self, node_string, autoURL, base=False):
+        super(self.__class__, self).style_node(
             node_string, autoURL, False)
         node = self.get_node(node_string)
-        for item in self.node_attr_by_taskname( node_string ):
-            attr, value = re.split( '\s*=\s*', item )
-            node.attr[ attr ] = value
+        for item in self.node_attr_by_taskname(node_string):
+            attr, value = re.split('\s*=\s*', item)
+            node.attr[attr] = value
         if self.vizconfig['use node color for labels']:
             node.attr['fontcolor'] = node.attr['color']
 
-    def style_edge( self, left, right ):
-        super( self.__class__, self ).style_edge( left, right )
+    def style_edge(self, left, right):
+        super(self.__class__, self).style_edge(left, right)
         left_node = self.get_node(left)
         edge = self.get_edge(left, right)
         if self.vizconfig['use node color for edges']:
@@ -369,29 +362,29 @@ class CGraph( CGraphPlain ):
                 edge.attr['color'] = left_node.attr['color']
 
 
-class edge( object):
-    def __init__( self, left, right, sequence, suicide=False,
-                  conditional=False ):
+class edge(object):
+    def __init__(self, left, right, sequence, suicide=False,
+                 conditional=False):
         """contains qualified node names, e.g. 'foo[T-6]:out1'"""
         self.left = left
         self.right = right
-        self.sequence = sequence
         self.suicide = suicide
+        self.sequence = sequence
         self.conditional = conditional
 
-    def get_right( self, inpoint, start_point):
+    def get_right(self, inpoint, start_point):
         inpoint_string = str(inpoint)
-        if self.right == None:
+        if self.right is None:
             return None
 
         # strip off special outputs
-        self.right = re.sub( ':\w+', '', self.right )
+        self.right = re.sub(':\w+', '', self.right)
 
         return TaskID.get(self.right, inpoint_string)
 
-    def get_left( self, inpoint, start_point, base_interval ):
+    def get_left(self, inpoint, start_point, base_interval):
         # strip off special outputs
-        left = re.sub( ':[\w-]+', '', self.left )
+        left = re.sub(':[\w-]+', '', self.left)
 
         left_graphnode = graphnode(left, base_interval=base_interval)
         if left_graphnode.offset_is_from_ict:
diff --git a/lib/cylc/graphnode.py b/lib/cylc/graphnode.py
index f216174..42999e9 100644
--- a/lib/cylc/graphnode.py
+++ b/lib/cylc/graphnode.py
@@ -75,20 +75,23 @@ IRREGULAR_OFFSET_RE = re.compile(
         $            # End of string
     """, re.X)
 
-class GraphNodeError( Exception ):
+
+class GraphNodeError(Exception):
     """
     Attributes:
         message - what the problem is.
     """
-    def __init__( self, msg ):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
-class graphnode( object ):
+
+class graphnode(object):
     """A node in the cycle suite.rc dependency graph."""
 
-    def __init__( self, node, base_interval=None ):
+    def __init__(self, node, base_interval=None):
         node_in = node
         # Get task name and properties from a graph node name.
 
@@ -103,7 +106,7 @@ class graphnode( object ):
         self.offset_is_irregular = False
         self.is_absolute = False
 
-        m = re.match( NODE_ISO_ICT_RE, node )
+        m = re.match(NODE_ISO_ICT_RE, node)
         if m:
             # node looks like foo[^], foo[^-P4D], foo[^]:fail, etc.
             self.is_absolute = True
@@ -114,9 +117,9 @@ class graphnode( object ):
             # Can't always set syntax here, as we use [^] for backwards comp.
             if offset_string:
                 set_syntax_version(
-                        VERSION_NEW, "graphnode: %s: ISO 8601 offset" % node)
+                    VERSION_NEW, "graphnode: %s: ISO 8601 offset" % node)
         else:
-            m = re.match( NODE_ISO_RE, node )
+            m = re.match(NODE_ISO_RE, node)
             if m:
                 # node looks like foo, foo:fail, foo[-PT6H], foo[-P4D]:fail...
                 name, offset_string, outp = m.groups()
@@ -126,9 +129,9 @@ class graphnode( object ):
                     set_syntax_version(
                         VERSION_NEW, "graphnode: %s: ISO 8601 offset" % node)
             else:
-                m = re.match( NODE_PREV_RE, node )
+                m = re.match(NODE_PREV_RE, node)
                 if not m:
-                    raise GraphNodeError( 'Illegal graph node: ' + node )
+                    raise GraphNodeError('Illegal graph node: ' + node)
                 # node looks like foo[T-6], foo[T-12]:fail...
                 name, sign, offset_string, outp = m.groups()
                 if sign and offset_string:
@@ -141,7 +144,7 @@ class graphnode( object ):
 
         if outp:
             self.special_output = True
-            self.output = outp[1:] # strip ':'
+            self.output = outp[1:]  # strip ':'
         else:
             self.special_output = False
             self.output = None
@@ -149,7 +152,7 @@ class graphnode( object ):
         if name:
             self.name = name
         else:
-            raise GraphNodeError( 'Illegal graph node: ' + node )
+            raise GraphNodeError('Illegal graph node: ' + node)
 
         if self.offset_is_from_ict and not offset_string:
             offset_string = str(get_interval_cls().get_null_offset())
diff --git a/lib/cylc/gui/app_gcylc.py b/lib/cylc/gui/app_gcylc.py
index 8970c75..93b4418 100644
--- a/lib/cylc/gui/app_gcylc.py
+++ b/lib/cylc/gui/app_gcylc.py
@@ -15,16 +15,18 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
 """The main control GUI of gcylc."""
 
 import os
 import re
 import sys
 import gtk
-import gobject
 import pango
+import gobject
 import socket
 import subprocess
+from uuid import uuid4
 from isodatetime.parsers import TimePointParser
 
 from cylc.suite_host import is_remote_host
@@ -51,8 +53,7 @@ from cylc.gui.updater import Updater
 from cylc.gui.util import (
     get_icon, get_image_dir, get_logo, EntryTempText,
     EntryDialog, setup_icons, set_exception_hook_dialog)
-from cylc import cylc_pyro_client
-from cylc.state_summary import extract_group_state
+from cylc.network.suite_state import extract_group_state
 from cylc.task_id import TaskID
 from cylc.version import CYLC_VERSION
 from cylc.gui.option_group import controlled_option_group
@@ -60,9 +61,7 @@ from cylc.gui.color_rotator import rotator
 from cylc.gui.cylc_logviewer import cylc_logviewer
 from cylc.gui.gcapture import gcapture_tmpfile
 from cylc.task_state import task_state
-from cylc.passphrase import passphrase
 from cylc.suite_logging import suite_log
-from cylc.registration import localdb
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.cfgspec.gcylc import gcfg
 from cylc.wallclock import get_time_string_from_unix_time
@@ -157,19 +156,11 @@ Class to hold initialisation data.
             ['disable interactive command prompts']
         )
         self.imagedir = get_image_dir()
-
-        if suite:
-            self.reset(suite)
+        self.my_uuid = uuid4()
 
     def reset(self, suite):
         self.suite = suite
-        suitedir = None
-        if not is_remote_host(self.host) and not is_remote_user(self.owner):
-            db = localdb(file=self.db)
-            suitedir = db.get_suitedir(suite)
-        # get the suite passphrase (required for local or remote suites)
-        self.pphrase = passphrase(suite, self.owner,
-                                  self.host).get(suitedir=suitedir)
+        self.logdir = suite_log(suite).get_dir()
         self.logdir = suite_log(suite).get_dir()
 
 
@@ -197,6 +188,8 @@ Class to create an information bar.
         self.filter_state_widget = gtk.HBox()
         self._set_tooltip(self.state_widget, "states")
         self._set_tooltip(self.filter_state_widget, "states filtered out")
+        self.prog_bar_timer = None
+        self.prog_bar_disabled = False
 
         self._status = "status..."
         self.notify_status_changed = status_changed_hook
@@ -225,46 +218,58 @@ Class to create an information bar.
         self._set_tooltip(self.mode_widget, "mode")
 
         self._runahead = ""
-        self.runahead_widget = gtk.Label()
-        self._set_tooltip(self.runahead_widget, "runahead limit")
+        # self.runahead_widget = gtk.Label()
+        # self._set_tooltip(self.runahead_widget, "runahead limit")
 
         self._time = "time..."
         self.time_widget = gtk.Label()
         self._set_tooltip(self.time_widget, "last update time")
 
-        hbox = gtk.HBox()
-        self.pack_start(hbox, False, True)
+        hbox = gtk.HBox(spacing=0)
+        self.pack_start(hbox, False, False)
 
-        eb = gtk.EventBox()
-        eb.add(self.status_widget)
-        hbox.pack_start(eb, False)
+        # Note: using box padding or spacing creates spurious spacing around
+        # the hidden widgets; instead we add spaces to text widgets labels.
 
+        # From the left.
+        vbox = gtk.VBox()
+        self.prog_bar = gtk.ProgressBar()
+        vbox.pack_end(self.prog_bar, False, True)
+        # Add some text to get full height.
+        self.prog_bar.set_text("...")
         eb = gtk.EventBox()
-        eb.add(self.filter_state_widget)
+        eb.add(vbox)
+        eb.connect('button-press-event', self.prog_bar_disable)
         hbox.pack_start(eb, False)
 
         eb = gtk.EventBox()
-        hbox.pack_start(eb, True)
+        eb.add(self.status_widget)
+        hbox.pack_start(eb, False)
 
         eb = gtk.EventBox()
-        eb.add(self.state_widget)
+        eb.add(self.filter_state_widget)
         hbox.pack_start(eb, False)
 
         eb = gtk.EventBox()
         eb.add(self.mode_widget)
         hbox.pack_start(eb, False)
 
+        # From the right.
         eb = gtk.EventBox()
-        eb.add(self.runahead_widget)
-        hbox.pack_start(eb, False)
+        eb.add(self.log_widget)
+        hbox.pack_end(eb, False)
 
         eb = gtk.EventBox()
         eb.add(self.time_widget)
-        hbox.pack_start(eb, False)
+        hbox.pack_end(eb, False)
+
+        # eb = gtk.EventBox()
+        # eb.add(self.runahead_widget)
+        # hbox.pack_end(eb, False)
 
         eb = gtk.EventBox()
-        eb.add(self.log_widget)
-        hbox.pack_start(eb, False)
+        eb.add(self.state_widget)
+        hbox.pack_end(eb, False)
 
     def set_theme(self, theme, dot_size):
         self.dots = DotMaker(theme, size=dot_size)
@@ -300,8 +305,7 @@ Class to create an information bar.
         if mode == self._mode:
             return False
         self._mode = mode
-        gobject.idle_add(self.mode_widget.set_markup,
-                         "  " + self._mode + "  ")
+        gobject.idle_add(self.mode_widget.set_markup, " %s " % self._mode)
 
     def set_runahead(self, runahead):
         """Set runahead limit."""
@@ -311,7 +315,7 @@ Class to create an information bar.
         text = "runahead:" + str(runahead) + "h  "
         if runahead is None:
             text = ""
-        gobject.idle_add(self.runahead_widget.set_text, text)
+        # gobject.idle_add(self.runahead_widget.set_text, text)
 
     def set_state(self, suite_states, is_suite_stopped=None):
         """Set state text."""
@@ -367,7 +371,7 @@ Class to create an information bar.
             ttip_text = "Current filtering (click to alter):\n%s" % (
                 ", ".join(self._filter_states_excl))
             hbox = gtk.HBox()
-            hbox.pack_start(gtk.Label("(filtered:"))
+            hbox.pack_start(gtk.Label(" (filtered:"))
             for state in self._filter_states_excl:
                 icon = self.dots.get_image(state, is_filtered=True)
                 icon.show()
@@ -376,7 +380,7 @@ Class to create an information bar.
                 label = gtk.Label(" %s" % self._filter_name_string)
                 hbox.pack_start(label)
                 ttip_text += ", %s" % self._filter_name_string
-            hbox.pack_start(gtk.Label(")"))
+            hbox.pack_start(gtk.Label(") "))
             ebox = gtk.EventBox()
             ebox.add(hbox)
             ebox.connect("button_press_event", self.filter_launcher)
@@ -390,7 +394,7 @@ Class to create an information bar.
             return False
         self._status = status
         gobject.idle_add(
-            self.status_widget.set_text, " " + self._status + "   ")
+            self.status_widget.set_text, " %s " % self._status)
         gobject.idle_add(self.notify_status_changed, self._status)
 
     def set_stop_summary(self, summary_maps):
@@ -417,6 +421,8 @@ Class to create an information bar.
         self.set_status(summary)
         dt = glob["last_updated"]
         self.set_time(get_time_string_from_unix_time(dt))
+        # (called on idle_add)
+        return False
 
     def set_time(self, time):
         """Set last update text."""
@@ -424,7 +430,7 @@ Class to create an information bar.
             return False
         self._time = time
         time_for_display = time.strip().rsplit(".", 1)[0]
-        gobject.idle_add(self.time_widget.set_text, time_for_display + " ")
+        gobject.idle_add(self.time_widget.set_text, " %s " % time_for_display)
 
     def _set_tooltip(self, widget, text):
         tooltip = gtk.Tooltips()
@@ -435,6 +441,50 @@ Class to create an information bar.
         self._log_widget_image.set_sensitive(False)
         self.log_launch_hook()
 
+    def prog_bar_start(self, msg):
+        """Start the progress bar running"""
+        if self.prog_bar_active():
+            # Already started (multiple calls are possible via idle_add).
+            return False
+        self.prog_bar_timer = gobject.timeout_add(100, self.prog_bar_pulse)
+        self.prog_bar.set_text(msg)
+        self.prog_bar.show()
+        self.status_widget.hide()
+        self.prog_bar.show()
+        self._set_tooltip(
+            self.prog_bar,
+            "%s\n(click to remove the progress bar)." % msg)
+        return False
+
+    def prog_bar_pulse(self):
+        self.prog_bar.pulse()
+        return True
+
+    def prog_bar_stop(self):
+        """Stop the progress bar running."""
+        if not self.prog_bar_active():
+            # Already stopped (multiple calls are possible via idle_add).
+            return False
+        gobject.source_remove(self.prog_bar_timer)
+        self.prog_bar.set_fraction(0)
+        self.prog_bar.set_text('')
+        self.prog_bar_timer = None
+        self.prog_bar.hide()
+        self.status_widget.show()
+        return False
+
+    def prog_bar_disable(self, w=None, e=None):
+        """Disable the progress bar (users may find it annoying)"""
+        self.prog_bar_stop()
+        self.prog_bar_disabled = True
+
+    def prog_bar_can_start(self):
+        if not self.prog_bar_active() and not self.prog_bar_disabled:
+            return True
+
+    def prog_bar_active(self):
+        return self.prog_bar_timer is not None
+
 
 class ControlApp(object):
     """
@@ -489,8 +539,7 @@ Main Control GUI that displays one or more views or interfaces to the suite.
 
         setup_icons()
 
-        self.view_layout_horizontal = False
-
+        self.view_layout_horizontal = gcfg.get(['initial side-by-side views'])
         self.quitters = []
         self.gcapture_windows = []
 
@@ -548,28 +597,22 @@ Main Control GUI that displays one or more views or interfaces to the suite.
         bigbox.pack_start(hbox, False)
 
         self.window.add(bigbox)
-        title = "gcylc"
-        if self.restricted_display:
-            title += " -r (restricted display)"
-        self.window.set_title(title)
-
+        self.window.set_title('')
         self.window.show_all()
+        self.info_bar.prog_bar.hide()
 
         self.setup_views()
         if suite:
             self.reset(suite)
 
     def reset(self, suite):
-        title = suite
-        self.cfg.suite = suite
-        if self.cfg.host != socket.getfqdn():
-            title += " - " + self.cfg.host
-        title += " - gcylc"
-        if self.restricted_display:
-            title += " -r (restricted display)"
-        self.window.set_title(title)
         self.cfg.reset(suite)
 
+        win_title = suite
+        if self.cfg.host != socket.getfqdn():
+            win_title += " - %s" % self.cfg.host
+        self.window.set_title(win_title)
+
         self.tool_bar_box.set_sensitive(True)
         for menu in self.suite_menus:
             menu.set_sensitive(True)
@@ -628,7 +671,7 @@ Main Control GUI that displays one or more views or interfaces to the suite.
         new_pane.pack2(self.view_containers[1], resize=True, shrink=True)
         new_pane.set_position(extent / 2)
         top_parent.pack_start(new_pane, expand=True, fill=True)
-        self.window.show_all()
+        self.window_show_all()
 
     def set_theme(self, item):
         """Change self.theme and then replace each view with itself"""
@@ -836,7 +879,7 @@ Main Control GUI that displays one or more views or interfaces to the suite.
         for toolitem in reversed(new_toolitems):
             self.tool_bars[view_num].insert(toolitem, index + 1)
         self.current_view_toolitems[view_num] = new_toolitems
-        self.window.show_all()
+        self.window_show_all()
 
     def remove_view(self, view_num):
         """Remove a view instance."""
@@ -903,32 +946,16 @@ Main Control GUI that displays one or more views or interfaces to the suite.
             self.reset(chosen)
 
     def pause_suite(self, bt):
-        try:
-            result = self.get_pyro('command-interface').put('hold suite now')
-        except Exception, x:
-            warning_dialog(x.__str__(), self.window).warn()
-        else:
-            if not result[0]:
-                warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('hold_suite')
 
     def resume_suite(self, bt):
-        try:
-            result = self.get_pyro('command-interface').put('release suite')
-        except Exception, x:
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('release_suite')
 
     def stopsuite_default(self, *args):
         """Try to stop the suite (after currently running tasks...)."""
-        try:
-            result = self.get_pyro('command-interface').put('stop cleanly')
-        except Exception, x:
-            warning_dialog(x.__str__(), self.window).warn()
-        else:
-            if not result[0]:
-                warning_dialog(result[1], self.window).warn()
+        if not self.get_confirmation("Stop suite %s?" % self.cfg.suite):
+            return
+        self.put_pyro_command('set_stop_cleanly')
 
     def stopsuite(self, bt, window, kill_rb, stop_rb, stopat_rb, stopct_rb,
                   stoptt_rb, stopnow_rb, stoppoint_entry, stopclock_entry,
@@ -992,25 +1019,18 @@ Main Control GUI that displays one or more views or interfaces to the suite.
             return
 
         window.destroy()
-        try:
-            god = self.get_pyro('command-interface')
-            if stop:
-                result = god.put('stop cleanly', False)
-            elif stopkill:
-                result = god.put('stop cleanly', True)
-            elif stopat:
-                result = god.put('stop after point', stop_point_string)
-            elif stopnow:
-                result = god.put('stop now')
-            elif stopclock:
-                result = god.put('stop after clock time', stopclock_time)
-            elif stoptask:
-                result = god.put('stop after task', stoptask_id)
-        except Exception, x:
-            warning_dialog(x.__str__(), self.window).warn()
-        else:
-            if not result[0]:
-                warning_dialog(result[1], self.window).warn()
+        if stop:
+            self.put_pyro_command('set_stop_cleanly', False)
+        elif stopkill:
+            self.put_pyro_command('set_stop_cleanly', True)
+        elif stopat:
+            self.put_pyro_command('set_stop_after_point', stop_point_string)
+        elif stopnow:
+            self.put_pyro_command('stop_now')
+        elif stopclock:
+            self.put_pyro_command('set_stop_after_clock_time', stopclock_time)
+        elif stoptask:
+            self.put_pyro_command('set_stop_after_task', stoptask_id)
 
     def load_point_strings(self, bt, startentry, stopentry):
         item1 = " -i '[scheduling]initial cycle point'"
@@ -1088,18 +1108,12 @@ been defined for this suite""").inform()
         options += self.get_remote_run_opts()
 
         command += ' ' + options + ' ' + self.cfg.suite + ' ' + point_string
-
         print command
 
         if method == 'restart':
             if statedump_entry.get_text():
                 command += ' ' + statedump_entry.get_text()
 
-        # #DEBUGGING:
-        # info_dialog("I'm about to run this command: \n" + command,
-        #             self.window).inform()
-        # return
-
         try:
             subprocess.Popen([command], shell=True)
         except OSError, e:
@@ -1119,10 +1133,7 @@ been defined for this suite""").inform()
         about.set_copyright("Copyright (C) 2008-2015 NIWA")
 
         about.set_comments(
-            """
-The Cylc Suite Engine.
-"""
-        )
+            "The Cylc Suite Engine.\n\nclient UUID:\n%s" % self.cfg.my_uuid)
         about.set_logo(get_logo())
         about.set_transient_for(self.window)
         about.run()
@@ -1139,32 +1150,20 @@ The Cylc Suite Engine.
         if hasattr(e, "button") and e.button != 1:
             return False
         try:
-            [glbl, states, fam_states] = (
-                self.get_pyro('state_summary').get_state_summary())
-        except Exception, x:
-            warning_dialog(str(x), self.window).warn()
-            return
-        view = True
-        reasons = []
-        try:
-            logfiles = states[task_id]['logfiles']
+            task_state_summary = self.updater.full_state_summary[task_id]
         except KeyError:
-            warning_dialog(task_id + ' is no longer live', self.window).warn()
+            warning_dialog(task_id + ' is not live', self.window).warn()
             return False
 
-        if len(logfiles) == 0:
-            view = False
-            reasons.append(task_id + ' has no associated log files')
-
-        if states[task_id]['state'] == 'waiting' or \
-                states[task_id]['state'] == 'ready' or \
-                states[task_id]['state'] == 'submit-failed' or \
-                states[task_id]['state'] == 'queued':
-            view = False
-            reasons.append(task_id + ' has not started running yet')
-
-        if not view:
-            warning_dialog('\n'.join(reasons), self.window).warn()
+        logfiles = task_state_summary['logfiles']
+        warnings = []
+        if not logfiles:
+            warnings.append(task_id + ' has no associated log files')
+        if task_state_summary['state'] in [
+                'waiting', 'ready', 'submit-failed', 'queued']:
+            warnings.append(task_id + ' has not started running yet')
+        if warnings:
+            warning_dialog('\n'.join(warnings), self.window).warn()
         else:
             self.popup_logview(task_id, logfiles, choice)
 
@@ -1231,15 +1230,15 @@ The Cylc Suite Engine.
             img = gtk.image_new_from_stock(gtk.STOCK_DND, gtk.ICON_SIZE_MENU)
             out_item.set_image(img)
             view_menu.append(out_item)
-            out_item.connect('button-press-event', self.view_task_info, task_id,
-                             'job.out')
+            out_item.connect(
+                'button-press-event', self.view_task_info, task_id, 'job.out')
 
             err_item = gtk.ImageMenuItem('job stderr')
             img = gtk.image_new_from_stock(gtk.STOCK_DND, gtk.ICON_SIZE_MENU)
             err_item.set_image(img)
             view_menu.append(err_item)
-            err_item.connect('button-press-event', self.view_task_info, task_id,
-                             'job.err')
+            err_item.connect(
+                'button-press-event', self.view_task_info, task_id, 'job.err')
 
             info_item = gtk.ImageMenuItem('prereq\'s & outputs')
             img = gtk.image_new_from_stock(
@@ -1285,6 +1284,8 @@ The Cylc Suite Engine.
         items.append(poll_item)
         poll_item.connect('activate', self.poll_task, task_id, task_is_family)
 
+        items.append(gtk.SeparatorMenuItem())
+
         kill_item = gtk.ImageMenuItem('Kill')
         img = gtk.image_new_from_stock(gtk.STOCK_CANCEL, gtk.ICON_SIZE_MENU)
         kill_item.set_image(img)
@@ -1377,23 +1378,6 @@ The Cylc Suite Engine.
         remove_nospawn_item.connect('activate', self.remove_task_nospawn,
                                     task_id, task_is_family)
 
-        if not task_is_family:
-            purge_item = gtk.ImageMenuItem('Remove Tree (Recursive Purge)')
-            img = gtk.image_new_from_stock(gtk.STOCK_DELETE,
-                                           gtk.ICON_SIZE_MENU)
-            purge_item.set_image(img)
-            items.append(purge_item)
-            purge_item.connect('activate', self.popup_purge, task_id)
-
-            items.append(gtk.SeparatorMenuItem())
-
-            addprereq_item = gtk.ImageMenuItem('Add A Prerequisite')
-            img = gtk.image_new_from_stock(gtk.STOCK_ADD, gtk.ICON_SIZE_MENU)
-            addprereq_item.set_image(img)
-            items.append(addprereq_item)
-            addprereq_item.connect('activate', self.add_prerequisite_popup,
-                                   task_id)
-
         return items
 
     def change_runahead_popup(self, b):
@@ -1440,6 +1424,11 @@ The Cylc Suite Engine.
         window.add(vbox)
         window.show_all()
 
+    def window_show_all(self):
+        self.window.show_all()
+        if not self.info_bar.prog_bar_active():
+            self.info_bar.prog_bar.hide()
+
     def change_runahead(self, w, entry, window):
         ent = entry.get_text()
         if ent == '':
@@ -1454,89 +1443,7 @@ The Cylc Suite Engine.
             else:
                 limit = ent
         window.destroy()
-        try:
-            result = self.get_pyro(
-                'command-interface').put('set runahead', limit)
-        except Exception, x:
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
-
-    def add_prerequisite_popup(self, b, task_id):
-        window = gtk.Window()
-        window.modify_bg(gtk.STATE_NORMAL,
-                         gtk.gdk.color_parse(self.log_colors.get_color()))
-        window.set_border_width(5)
-        window.set_title("Add A Prequisite")
-        window.set_transient_for(self.window)
-        window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
-
-        sw = gtk.ScrolledWindow()
-        sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
-
-        vbox = gtk.VBox()
-
-        label = gtk.Label('SUITE: ' + self.cfg.suite)
-        vbox.pack_start(label, True)
-
-        label = gtk.Label('TASK: ' + task_id)
-        vbox.pack_start(label, True)
-
-        label = gtk.Label(
-            'DEP (' + TaskID.SYNTAX + ' or message)')
-
-        entry = gtk.Entry()
-
-        hbox = gtk.HBox()
-        hbox.pack_start(label, True)
-        hbox.pack_start(entry, True)
-        vbox.pack_start(hbox)
-
-        cancel_button = gtk.Button("_Cancel")
-        cancel_button.connect("clicked", lambda x: window.destroy())
-
-        start_button = gtk.Button("_Add")
-        start_button.connect(
-            "clicked", self.add_prerequisite, entry, window, task_id)
-
-        help_button = gtk.Button("_Help")
-        help_button.connect("clicked", self.command_help, "control", "depend")
-
-        hbox = gtk.HBox()
-        hbox.pack_start(start_button, True)
-        hbox.pack_start(help_button, True)
-        hbox.pack_start(cancel_button, True)
-        vbox.pack_start(hbox)
-
-        window.add(vbox)
-        window.show_all()
-
-    def add_prerequisite(self, w, entry, window, task_id):
-        dep = entry.get_text()
-        if TaskID.is_valid_id(dep):
-            msg = dep + ' succeeded'
-        else:
-            msg = dep
-
-        try:
-            name, cycle = TaskID.split(task_id)
-        except ValueError:
-            warning_dialog(
-                "ERROR, Task or Group ID must be " + TaskID.SYNTAX,
-                self.window
-            ).warn()
-            return
-
-        window.destroy()
-        try:
-            result = self.get_pyro(
-                'command-interface').put('add prerequisite', task_id, msg)
-        except Exception, x:
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('set_runahead', limit)
 
     def update_tb(self, tb, line, tags=None):
         if tags:
@@ -1545,14 +1452,8 @@ The Cylc Suite Engine.
             tb.insert(tb.get_end_iter(), line)
 
     def popup_requisites(self, w, e, task_id):
-        try:
-            name, point_string = TaskID.split(task_id)
-            result = self.get_pyro('suite-info').get(
-                'task requisites', name, point_string)
-        except Exception, x:
-            warning_dialog(str(x), self.window).warn()
-            return
-
+        name, point_string = TaskID.split(task_id)
+        result = self.get_pyro_info('get_task_requisites', name, point_string)
         if result:
             # (else no tasks were found at all -suite shutting down)
             if task_id not in result:
@@ -1643,77 +1544,47 @@ shown here in the state they were in at the time of triggering.''')
         self.quitters.remove(lv)
         w.destroy()
 
-    def get_confirmation(self, cmd, name, msg=None):
-
-        if self.cfg.no_prompt:
+    def get_confirmation(self, question, force_prompt=False):
+        if self.cfg.no_prompt and not force_prompt:
             return True
-
-        if not msg:
-            msg = cmd + " " + name + "?"
-
         prompt = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL,
                                    gtk.MESSAGE_QUESTION,
-                                   gtk.BUTTONS_OK_CANCEL, msg)
-
-        prompt.add_button(gtk.STOCK_HELP, gtk.RESPONSE_HELP)
+                                   gtk.BUTTONS_YES_NO, question)
         response = prompt.run()
-
-        while response == gtk.RESPONSE_HELP:
-            self.command_help(cmd)
-            response = prompt.run()
-
         prompt.destroy()
-        if response != gtk.RESPONSE_OK:
-            return False
+        return response == gtk.RESPONSE_YES
 
     def hold_task(self, b, task_id, stop=True, is_family=False):
         if stop:
-            cmd = "hold"
+            cmd = "Hold"
         else:
-            cmd = "release"
-        if not self.get_confirmation(cmd, task_id):
+            cmd = "Release"
+        if not self.get_confirmation("%s %s?" % (cmd, task_id)):
             return
-
         name, point_string = TaskID.split(task_id)
-        try:
-            if stop:
-                result = self.get_pyro('command-interface').put(
-                    'hold task now', name, point_string, is_family)
-            else:
-                result = self.get_pyro('command-interface').put(
-                    'release task', name, point_string, is_family)
-        except Exception, x:
-            # the suite was probably shut down by another process
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        if stop:
+            self.put_pyro_command('hold_task', name, point_string,
+                                  is_family)
+        else:
+            self.put_pyro_command('release_task', name, point_string,
+                                  is_family)
 
     def trigger_task_now(self, b, task_id, is_family=False):
         """Trigger task via the suite daemon's command interface."""
-        cmd = "trigger"
-        if not self.get_confirmation(cmd, task_id):
+        if not self.get_confirmation("Trigger %s?" % task_id):
             return
-
         name, point_string = TaskID.split(task_id)
-        try:
-            result = self.get_pyro('command-interface').put(
-                'trigger task', name, point_string, is_family)
-        except Exception, x:
-            # the suite was probably shut down by another process
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('trigger_task', name, point_string, is_family)
 
     def trigger_task_edit_run(self, b, task_id):
         """
         Do an edit-run by invoking 'cylc trigger --edit' on the suite host.
         """
+        if not self.get_confirmation("Edit run %s?" % task_id):
+            return
         name, point_string = TaskID.split(task_id)
         command = (
-            "cylc trigger --notify-completion --use-ssh --edit --geditor -f" +
+            "cylc trigger --use-ssh --edit --geditor -f" +
             self.get_remote_run_opts() + " " + self.cfg.suite +
             " %s %s" % (name, point_string))
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 400, 400)
@@ -1721,112 +1592,40 @@ shown here in the state they were in at the time of triggering.''')
         foo.run()
 
     def poll_task(self, b, task_id, is_family=False):
-        cmd = "poll"
-        if not self.get_confirmation(cmd, task_id):
+        if not self.get_confirmation("Poll %s?" % task_id):
             return
-
         name, point_string = TaskID.split(task_id)
-        try:
-            result = self.get_pyro('command-interface').put(
-                'poll tasks', name, point_string, is_family)
-        except Exception, x:
-            # the suite was probably shut down by another process
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('poll_tasks', name, point_string, is_family)
 
     def kill_task(self, b, task_id, is_family=False):
-        cmd = "kill"
-        if not self.get_confirmation(cmd, task_id):
+        if not self.get_confirmation("Kill %s?" % task_id, force_prompt=True):
             return
-
         name, point_string = TaskID.split(task_id)
-        try:
-            result = self.get_pyro('command-interface').put(
-                'kill tasks', name, point_string, is_family)
-        except Exception, x:
-            # the suite was probably shut down by another process
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('kill_tasks', name, point_string, is_family)
 
     def reset_task_state(self, b, e, task_id, state, is_family=False):
         if hasattr(e, "button") and e.button != 1:
             return False
         cmd = "reset"
-
         name, point_string = TaskID.split(task_id)
-        msg = "reset " + task_id + " to " + state + "?"
-        if not self.get_confirmation(cmd, task_id, msg):
+        if not self.get_confirmation("reset %s to %s?" % (task_id, state)):
             return
-
-        try:
-            result = self.get_pyro('command-interface').put(
-                'reset task state', name, point_string, state, is_family)
-        except Exception, x:
-            # the suite was probably shut down by another process
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('reset_task_state', name, point_string, state,
+                              is_family)
 
     def remove_task(self, b, task_id, is_family):
-        cmd = "remove"
-        msg = "remove " + task_id + " (after spawning)?"
-        if not self.get_confirmation(cmd, task_id, msg):
+        if not self.get_confirmation("Remove %s after spawning?" % task_id):
             return
-
         name, point_string = TaskID.split(task_id)
-        try:
-            result = self.get_pyro('command-interface').put(
-                'remove task', name, point_string, is_family, True)
-        except Exception, x:
-            warning_dialog(str(x), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('remove_task', name, point_string, is_family,
+                              True)
 
     def remove_task_nospawn(self, b, task_id, is_family=False):
-        cmd = "remove"
-        msg = "remove " + task_id + " (without spawning)?"
-        if not self.get_confirmation(cmd, task_id, msg):
+        if not self.get_confirmation("Remove %s without spawning?" % task_id):
             return
-
         name, point_string = TaskID.split(task_id)
-        try:
-            result = self.get_pyro('command-interface').put(
-                'remove task', name, point_string, is_family, False)
-        except Exception, x:
-            warning_dialog(str(x), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
-
-    def purge_cycle_entry(self, e, w, task_id):
-        stop = e.get_text()
-        w.destroy()
-        try:
-            result = self.get_pyro('command-interface').put(
-                'purge tree', task_id, stop)
-        except Exception, x:
-            warning_dialog(str(x), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
-
-    def purge_cycle_button(self, b, e, w, task_id):
-        stop = e.get_text()
-        w.destroy()
-        try:
-            result = self.get_pyro('command-interface').put(
-                'purge tree', task_id, stop)
-        except Exception, x:
-            warning_dialog(str(x), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command('remove_task', name, point_string, is_family,
+                              False)
 
     def stopsuite_popup(self, b):
         window = gtk.Window()
@@ -2083,7 +1882,7 @@ shown here in the state they were in at the time of triggering.''')
         warmstart_rb.connect(
             "toggled", self.startup_method, "warm", ic_box, is_box)
         restart_rb.connect(
-            "toggled", self.startup_method, "re",   ic_box, is_box)
+            "toggled", self.startup_method, "re", ic_box, is_box)
 
         hbox = gtk.HBox()
 
@@ -2140,49 +1939,6 @@ shown here in the state they were in at the time of triggering.''')
         window.add(vbox)
         window.show_all()
 
-    def popup_purge(self, b, task_id):
-        window = gtk.Window()
-        window.modify_bg(gtk.STATE_NORMAL,
-                         gtk.gdk.color_parse(self.log_colors.get_color()))
-        window.set_border_width(5)
-        window.set_title("Purge " + task_id)
-        window.set_transient_for(self.window)
-        window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
-
-        sw = gtk.ScrolledWindow()
-        sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
-
-        vbox = gtk.VBox()
-        label = gtk.Label('stop cycle (inclusive)')
-
-        entry = gtk.Entry()
-        entry.set_max_length(14)
-        entry.connect("activate", self.purge_cycle_entry, window, task_id)
-
-        hbox = gtk.HBox()
-        hbox.pack_start(label, True)
-        hbox.pack_start(entry, True)
-        vbox.pack_start(hbox)
-
-        start_button = gtk.Button("_Purge")
-        start_button.connect(
-            "clicked", self.purge_cycle_button, entry, window, task_id)
-
-        help_button = gtk.Button("_Help")
-        help_button.connect("clicked", self.command_help, "control", "purge")
-
-        cancel_button = gtk.Button("_Cancel")
-        cancel_button.connect("clicked", lambda x: window.destroy())
-
-        hbox = gtk.HBox()
-        hbox.pack_start(start_button, True)
-        hbox.pack_start(help_button, True)
-        hbox.pack_start(cancel_button, True)
-        vbox.pack_start(hbox)
-
-        window.add(vbox)
-        window.show_all()
-
     def point_string_entry_popup(self, b, callback, title):
         window = gtk.Window()
         window.modify_bg(gtk.STATE_NORMAL,
@@ -2288,67 +2044,34 @@ shown here in the state they were in at the time of triggering.''')
         point_string = entry_point_string.get_text()
         is_family = fam_cb.get_active()
         stop_point_string = entry_stoppoint.get_text()
-
         if match == '' or point_string == '':
             warning_dialog(
                 "Enter task or family name MATCH expression",
                 self.window).warn()
             return
-
         window.destroy()
-
         stop = None
         if stop_point_string != '':
             stop = stop_point_string
-
-        try:
-            result = self.get_pyro('command-interface').put(
-                'insert task', match, point_string, is_family, stop)
-        except Exception, x:
-            warning_dialog(x.__str__(), self.window).warn()
-            return
-        if not result[0]:
-            warning_dialog(result[1], self.window).warn()
+        self.put_pyro_command(
+            'insert_task', match, point_string, is_family, stop)
 
     def poll_all(self, w):
-        command = "cylc poll" + self.get_remote_run_opts() + " " + self.cfg.suite
+        command = (
+            "cylc poll" + self.get_remote_run_opts() + " " + self.cfg.suite)
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 600, 400)
         self.gcapture_windows.append(foo)
         foo.run()
 
     def reload_suite(self, w):
-        msg = """Reload the suite definition.
-This allows you change task runtime configuration and add
-or remove task definitions without restarting the suite."""
-        prompt = gtk.MessageDialog(self.window, gtk.DIALOG_MODAL,
-                                   gtk.MESSAGE_QUESTION,
-                                   gtk.BUTTONS_OK_CANCEL, msg)
-
-        prompt.add_button(gtk.STOCK_HELP, gtk.RESPONSE_HELP)
-        response = prompt.run()
-
-        while response == gtk.RESPONSE_HELP:
-            self.command_help("control", "reload")
-            response = prompt.run()
-
-        prompt.destroy()
-        if response != gtk.RESPONSE_OK:
+        if not self.get_confirmation("Reload suite definition?"):
             return
-
-        command = (
-            "cylc reload -f" + self.get_remote_run_opts() + " " + self.cfg.suite)
-        foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 600, 400)
-        self.gcapture_windows.append(foo)
-        foo.run()
+        self.put_pyro_command('reload_suite')
 
     def nudge_suite(self, w):
-        try:
-            result = self.get_pyro('command-interface').put('nudge suite')
-        except Exception, x:
-            warning_dialog(str(x), self.window).warn()
-            return False
-        if not result:
-            warning_dialog('Failed to nudge the suite', self.window).warn()
+        if not self.get_confirmation("Nudge suite?"):
+            return
+        self.put_pyro_command('nudge')
 
     def popup_logview(self, task_id, logfiles, choice=None):
         """Display task job log files in a combo log viewer."""
@@ -2357,11 +2080,11 @@ or remove task definitions without restarting the suite."""
                          gtk.gdk.color_parse(self.log_colors.get_color()))
         window.set_border_width(5)
         window.set_size_request(800, 400)
-        log_paths = list(logfiles)
-        log_paths.sort(key=self._sort_key_func, reverse=True)
+        filenames = list(logfiles)
+        filenames.sort(key=self._sort_key_func, reverse=True)
         init_active_index = None
         if choice:
-            for i, log in enumerate(log_paths):
+            for i, log in enumerate(filenames):
                 if log.endswith("/" + choice):
                     init_active_index = i
                     break
@@ -2372,23 +2095,55 @@ or remove task definitions without restarting the suite."""
         elif is_remote_user(self.cfg.owner):
             auth = self.cfg.owner + "@" + self.cfg.host
         if auth:
-            for i, log in enumerate(log_paths):
+            for i, log in enumerate(filenames):
                 if ":" not in log:
-                    log_paths[i] = auth + ":" + log
+                    filenames[i] = auth + ":" + log
         window.set_title(task_id + ": Log Files")
-        lv = ComboLogViewer(task_id, log_paths, init_active_index)
-        self.quitters.append(lv)
+        viewer = ComboLogViewer(
+            task_id, filenames,
+            self._get_logview_cmd_tmpls_map(task_id, filenames),
+            init_active_index)
+        self.quitters.append(viewer)
 
-        window.add(lv.get_widget())
+        window.add(viewer.get_widget())
 
         quit_button = gtk.Button("_Close")
-        quit_button.connect("clicked", self.on_popup_quit, lv, window)
+        quit_button.connect("clicked", self.on_popup_quit, viewer, window)
 
-        lv.hbox.pack_start(quit_button, False)
+        viewer.hbox.pack_start(quit_button, False)
 
-        window.connect("delete_event", lv.quit_w_e)
+        window.connect("delete_event", viewer.quit_w_e)
         window.show_all()
 
+    def _get_logview_cmd_tmpls_map(self, task_id, filenames):
+        """Helper for self.popup_logview()."""
+        summary = self.updater.full_state_summary[task_id]
+        if summary["state"] != "running":
+            return {}
+        ret = {}
+        for key in "out", "err":
+            suffix = "/%(submit_num)02d/job.%(key)s" % {
+                "submit_num": summary["submit_num"], "key": key}
+            for filename in filenames:
+                if not filename.endswith(suffix):
+                    continue
+                user_at_host = None
+                if ":" in filename:
+                    user_at_host = filename.split(":", 1)[0]
+                if user_at_host and "@" in user_at_host:
+                    owner, host = user_at_host.split("@", 1)
+                else:
+                    owner, host = (None, user_at_host)
+                try:
+                    conf = GLOBAL_CFG.get_host_item(
+                        "batch systems", host, owner)
+                    cmd_tmpl = conf[summary["batch_sys_name"]][key + " tailer"]
+                    ret[filename] = cmd_tmpl % {
+                        "job_id": summary["submit_method_id"]}
+                except (KeyError, TypeError):
+                    continue
+        return ret
+
     def _sort_key_func(self, log_path):
         """Sort key for a task job log path."""
         head, submit_num, base = log_path.rsplit("/", 2)
@@ -2435,6 +2190,8 @@ or remove task definitions without restarting the suite."""
 
         self.view1_align_item = gtk.CheckMenuItem(
             label="Toggle views _side-by-side")
+        if self.view_layout_horizontal is True:
+            self.view1_align_item.set_active(self.view_layout_horizontal)
         self._set_tooltip(
             self.view1_align_item, "Toggle horizontal layout of views.")
         self.view1_align_item.connect(
@@ -2515,6 +2272,15 @@ to reduce network traffic.""")
         thememenu = gtk.Menu()
         theme_item.set_submenu(thememenu)
 
+        self.view_menu.append(gtk.SeparatorMenuItem())
+        uuid_item = gtk.ImageMenuItem("Client _UUID")
+        img = gtk.image_new_from_stock(gtk.STOCK_INDEX,
+                                       gtk.ICON_SIZE_MENU)
+        uuid_item.set_image(img)
+        self._set_tooltip(
+            uuid_item, "View the client UUID for this gcylc instance")
+        self.view_menu.append(uuid_item)
+        uuid_item.connect('activate', self.popup_uuid_dialog)
         theme_items = {}
         theme = "default"
         theme_items[theme] = gtk.RadioMenuItem(label='_' + theme)
@@ -2691,12 +2457,6 @@ to reduce network traffic.""")
         tools_menu_root = gtk.MenuItem('_Suite')
         tools_menu_root.set_submenu(tools_menu)
 
-        url_item = gtk.ImageMenuItem('_Browse suite URL')
-        img = gtk.image_new_from_stock(gtk.STOCK_APPLY, gtk.ICON_SIZE_MENU)
-        url_item.set_image(img)
-        tools_menu.append(url_item)
-        url_item.connect('activate', self.browse, self.cfg.suite)
-
         val_item = gtk.ImageMenuItem('_Validate')
         img = gtk.image_new_from_stock(gtk.STOCK_APPLY, gtk.ICON_SIZE_MENU)
         val_item.set_image(img)
@@ -2711,12 +2471,11 @@ to reduce network traffic.""")
         tools_menu.append(des_item)
         des_item.connect('activate', self.describe_suite)
 
-        info_item = gtk.ImageMenuItem('_Info (Running)')
-        img = gtk.image_new_from_stock(gtk.STOCK_DIALOG_INFO,
-                                       gtk.ICON_SIZE_MENU)
-        info_item.set_image(img)
-        tools_menu.append(info_item)
-        info_item.connect('activate', self.run_suite_info)
+        url_item = gtk.ImageMenuItem('_Browse Suite URL')
+        img = gtk.image_new_from_stock(gtk.STOCK_DND, gtk.ICON_SIZE_MENU)
+        url_item.set_image(img)
+        tools_menu.append(url_item)
+        url_item.connect('activate', self.browse, self.cfg.suite)
 
         tools_menu.append(gtk.SeparatorMenuItem())
 
@@ -2878,15 +2637,20 @@ to reduce network traffic.""")
         self.menu_bar.append(help_menu_root)
 
     def describe_suite(self, w):
-        command = (
-            "echo '> TITLE:'; " +
-            "cylc get-suite-config -i title " + self.cfg.suite + "; " +
-            "echo '> DESCRIPTION:'; " +
-            "cylc get-suite-config --notify-completion -i description " +
-            self.cfg.suite)
-        foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 800, 400)
-        self.gcapture_windows.append(foo)
-        foo.run()
+        # Show suite title and description.
+        if self.updater.connected:
+            # Interrogate the suite daemon.
+            info = self.get_pyro_info('get_suite_info')
+            descr = '\n'.join(
+                "%s: %s" % (key, val) for key, val in info.items())
+            info_dialog(descr, self.window).inform()
+        else:
+            # Parse the suite definition.
+            command = ("cylc get-suite-config -i title -i description " +
+                       self.get_remote_run_opts() + " " + self.cfg.suite)
+            foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 800, 400)
+            self.gcapture_windows.append(foo)
+            foo.run()
 
     def search_suite_popup(self, w):
         reg = self.cfg.suite
@@ -2937,7 +2701,7 @@ to reduce network traffic.""")
         if not yesbin_cb.get_active():
             options += ' -x '
         command = (
-            "cylc search --notify-completion %s %s %s" % (
+            "cylc search %s %s %s" % (
                 options, reg, pattern)
         )
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, width=600,
@@ -3040,7 +2804,7 @@ This is what my suite does:..."""
             cat_menu.append(foo_item)
             com_menu = gtk.Menu()
             foo_item.set_submenu(com_menu)
-            cout = subprocess.Popen(["cylc", "category="+category],
+            cout = subprocess.Popen(["cylc", "category=" + category],
                                     stdout=subprocess.PIPE).communicate()[0]
             commands = cout.rstrip().split()
             for command in commands:
@@ -3069,11 +2833,13 @@ This is what my suite does:..."""
                         ebox.modify_bg(gtk.STATE_NORMAL,
                                        self.filter_highlight_color)
 
-        self.updater.filter_states_excl = task_states
         self.filter_states_excl = task_states
         self.info_bar.set_filter_state(task_states, self.filter_name_string)
-        self.updater.refilter()
-        self.refresh_views()
+        if self.updater is not None:
+            # Else no suite is connected yet.
+            self.updater.filter_states_excl = task_states
+            self.updater.refilter()
+            self.refresh_views()
 
     def reset_filter_box(self, w=None):
         for subbox in self.task_filter_box.get_children():
@@ -3263,18 +3029,35 @@ For more Stop options use the Control menu.""")
         self.tool_bar_box.pack2(self.tool_bars[1], resize=True, shrink=True)
 
     def _alter_status_toolbar_menu(self, new_status):
-        # Handle changes in status for some toolbar/menuitems.
+        """Handle changes in status for some toolbar/menuitems.
+
+       Example status strings:
+         * connected
+         * initialising
+         * running
+         * running to 20150601T0000Z
+         * running to 2015-08-08T01:00:00+12:00
+         * running to hold at 20150601T0000Z
+         * held
+         * reloading
+         * stopping
+         * stopped
+         * stopped with 'succeeded'
+         * stopped with 'running'
+        """
         if new_status == self._prev_status:
             return False
+        self.info_bar.prog_bar_disabled = False
         self._prev_status = new_status
-        if "connected" in new_status:
-            self.stop_toolbutton.set_sensitive(False)
-            return False
-        run_ok = bool("stopped" in new_status)
-        pause_ok = bool("running" in new_status)
-        unpause_ok = bool("hold at" in new_status or "held" in new_status or
-                          "stopping" in new_status)
-        stop_ok = bool("stopped" not in new_status)
+        run_ok = "stopped" in new_status
+        # Pause: avoid "stopped with 'running'".
+        pause_ok = (
+            new_status == "reloading" or
+            "running" in new_status and "stopped" not in new_status)
+        unpause_ok = "held" == new_status
+        stop_ok = ("stopped" not in new_status and
+                   "connected" != new_status and
+                   "initialising" != new_status)
         self.run_menuitem.set_sensitive(run_ok)
         self.pause_menuitem.set_sensitive(pause_ok)
         self.unpause_menuitem.set_sensitive(unpause_ok)
@@ -3305,6 +3088,7 @@ For more Stop options use the Control menu.""")
         icon_widget.show()
         self.run_pause_toolbutton.set_icon_widget(icon_widget)
         self.run_pause_toolbutton.set_label(label)
+        self.run_pause_toolbutton.set_sensitive(True)
         tip_tuple = gtk.tooltips_data_get(self.run_pause_toolbutton)
         if tip_tuple is None:
             tips = gtk.Tooltips()
@@ -3321,6 +3105,12 @@ For more Stop options use the Control menu.""")
             lambda: self.run_suite_log(None, type="err"))
         self._set_info_bar()
 
+    def popup_uuid_dialog(self, w):
+        info_dialog(
+            "Client UUID %s\n"
+            "(this identifies a client instance to the suite daemon)" % (
+                self.cfg.my_uuid), self.window).inform()
+
     def popup_theme_legend(self, widget=None):
         """Popup a theme legend window."""
         if self.theme_legend_window is None:
@@ -3363,14 +3153,25 @@ For more Stop options use the Control menu.""")
         """Handle a destroy of the theme legend window."""
         self.theme_legend_window = None
 
-    def get_pyro(self, object):
-        return cylc_pyro_client.client(
-            self.cfg.suite, self.cfg.pphrase, self.cfg.owner, self.cfg.host,
-            self.cfg.pyro_timeout, self.cfg.port).get_proxy(object)
+    def put_pyro_command(self, command, *args):
+        try:
+            success, msg = self.updater.suite_command_client.put_command(
+                command, *args)
+        except Exception, x:
+            warning_dialog(x.__str__(), self.window).warn()
+        else:
+            if not success:
+                warning_dialog(msg, self.window).warn()
+
+    def get_pyro_info(self, command, *args):
+        try:
+            return self.updater.suite_info_client.get_info(command, *args)
+        except Exception as exc:
+            warning_dialog(str(exc), self.window).warn()
 
     def run_suite_validate(self, w):
         command = ("cylc validate -v " + self.get_remote_run_opts() +
-                   " --notify-completion " + self.cfg.template_vars_opts +
+                   " " + self.cfg.template_vars_opts +
                    " " + self.cfg.suite)
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 700)
         self.gcapture_windows.append(foo)
@@ -3381,7 +3182,7 @@ For more Stop options use the Control menu.""")
         extra = ''
         if inlined:
             extra = '-i '
-        command = ("cylc edit --notify-completion -g" + " " +
+        command = ("cylc edit -g " +
                    self.cfg.template_vars_opts + " " +
                    self.get_remote_run_opts() + " " + extra + ' ' +
                    self.cfg.suite)
@@ -3392,7 +3193,7 @@ For more Stop options use the Control menu.""")
 
     def run_suite_graph(self, w, show_ns=False):
         if show_ns:
-            command = "cylc graph --notify-completion -n %s %s %s" % (
+            command = "cylc graph -n %s %s %s" % (
                 self.cfg.template_vars_opts,
                 self.get_remote_run_opts(),
                 self.cfg.suite)
@@ -3409,7 +3210,7 @@ For more Stop options use the Control menu.""")
 
     def run_suite_info(self, w):
         command = (
-            "cylc show --notify-completion" + self.get_remote_run_opts() +
+            "cylc show " + self.get_remote_run_opts() +
             " " + self.cfg.suite)
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 600, 400)
         self.gcapture_windows.append(foo)
@@ -3418,7 +3219,7 @@ For more Stop options use the Control menu.""")
     def run_suite_list(self, w, opt=''):
         command = (
             "cylc list " + self.get_remote_run_opts() + " " + opt +
-            " --notify-completion " + " " + self.cfg.template_vars_opts + " " +
+            " " + self.cfg.template_vars_opts + " " +
             self.cfg.suite)
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 600, 600)
         self.gcapture_windows.append(foo)
@@ -3433,7 +3234,7 @@ For more Stop options use the Control menu.""")
             else:
                 xopts = ' '
 
-            command = ("cylc cat-log --notify-completion" +
+            command = ("cylc cat-log " +
                        self.get_remote_run_opts() +
                        xopts + self.cfg.suite)
             foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 800, 400,
@@ -3454,7 +3255,7 @@ For more Stop options use the Control menu.""")
         elif method == 'processed':
             extra = ' -j'
 
-        command = ("cylc view --notify-completion -g " +
+        command = ("cylc view -g " +
                    self.get_remote_run_opts() + " " + extra + " " +
                    self.cfg.template_vars_opts + " " + self.cfg.suite)
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 400)
@@ -3466,7 +3267,8 @@ For more Stop options use the Control menu.""")
         return " --host=" + self.cfg.host + " --user=" + self.cfg.owner
 
     def browse(self, b, *args):
-        command = 'cylc doc ' + self.get_remote_run_opts() + ' ' + ' '.join(args)
+        command = (
+            'cylc doc ' + self.get_remote_run_opts() + ' ' + ' '.join(args))
         foo = gcapture_tmpfile(command, self.cfg.cylc_tmpdir, 700)
         self.gcapture_windows.append(foo)
         foo.run()
diff --git a/lib/cylc/gui/color_rotator.py b/lib/cylc/gui/color_rotator.py
index 3a19e90..2d614ce 100644
--- a/lib/cylc/gui/color_rotator.py
+++ b/lib/cylc/gui/color_rotator.py
@@ -16,15 +16,17 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+
 class rotator(object):
-    def __init__( self, colors=[ '#fcc', '#cfc', '#bbf', '#ffb' ] ):
+    def __init__(self, colors=['#fcc', '#cfc', '#bbf', '#ffb']):
         self.colors = colors
         self.current_color = 0
-    def get_color( self ):
+
+    def get_color(self):
         index = self.current_color
-        if index == len( self.colors ) - 1:
+        if index == len(self.colors) - 1:
             index = 0
         else:
             index += 1
         self.current_color = index
-        return self.colors[ index ]
+        return self.colors[index]
diff --git a/lib/cylc/gui/combo_logviewer.py b/lib/cylc/gui/combo_logviewer.py
index 295c27d..946d2b7 100644
--- a/lib/cylc/gui/combo_logviewer.py
+++ b/lib/cylc/gui/combo_logviewer.py
@@ -20,24 +20,40 @@ import gtk
 import os
 
 from cylc.gui.logviewer import logviewer
-from cylc.gui.tailer import tailer
+from cylc.gui.tailer import Tailer
 
 
 class ComboLogViewer(logviewer):
 
-    """Implement a log viewer for the "cylc gui".
-    
+    """Implement a viewer for task jobs in the "cylc gui".
+
     It has a a combo box for log file selection.
 
+    task_id -- The NAME.POINT of a task proxy.
+    filenames -- The names of the task job logs.
+    cmd_tmpls -- A dict to map file names and alternate commands to tail follow
+                 the file.
+    init_active_index -- The index for selecting the initial log file.
     """
 
     LABEL_TEXT = "Choose Log File: "
 
-    def __init__(self, name, file_list, init_active_index=None):
-        self.file_list = file_list
+    def __init__(self, task_id, filenames, cmd_tmpls, init_active_index):
+        self.filenames = filenames
         self.init_active_index = init_active_index
-        self.common_dir = os.path.dirname(os.path.commonprefix(self.file_list))
-        logviewer.__init__(self, name, None, self.file_list[init_active_index])
+        self.cmd_tmpls = cmd_tmpls
+        self.common_dir = os.path.dirname(os.path.commonprefix(self.filenames))
+        logviewer.__init__(
+            self, task_id, None, self.filenames[self.init_active_index])
+
+    def connect(self):
+        """Connect to the selected log file tailer."""
+        try:
+            cmd_tmpl = self.cmd_tmpls[self.filename]
+        except (KeyError, TypeError):
+            cmd_tmpl = None
+        self.t = Tailer(self.logview, self.filename, cmd_tmpl=cmd_tmpl)
+        self.t.start()
 
     def create_gui_panel(self):
         """Create the panel."""
@@ -45,8 +61,12 @@ class ComboLogViewer(logviewer):
         label = gtk.Label(self.LABEL_TEXT)
         combobox = gtk.combo_box_new_text()
 
-        for file_ in self.file_list:
-            combobox.append_text(os.path.relpath(file_, self.common_dir))
+        for filename in self.filenames:
+            relpath = os.path.relpath(filename, self.common_dir)
+            if len(relpath) < len(filename):
+                combobox.append_text(relpath)
+            else:
+                combobox.append_text(filename)
 
         combobox.connect("changed", self.switch_log)
         if self.init_active_index:
@@ -65,16 +85,19 @@ class ComboLogViewer(logviewer):
         index = callback.get_active()
 
         name = model[index][0]
-        file_ = os.path.join(self.common_dir, name)
-        if file_ != self.file:
-            self.file = file_
-            self.t.quit = True
+        if name in self.filenames:
+            filename = name
+        else:
+            filename = os.path.join(self.common_dir, name)
+        if filename != self.filename:
+            self.filename = filename
+            self.t.stop()
+            self.t.join()
             logbuffer = self.logview.get_buffer()
             pos_start, pos_end = logbuffer.get_bounds()
             self.reset_logbuffer()
             logbuffer.delete(pos_start, pos_end)
             self.log_label.set_text(name)
-            self.t = tailer(self.logview, file_)
-            self.t.start()
+            self.connect()
 
         return False
diff --git a/lib/cylc/gui/cylc_logviewer.py b/lib/cylc/gui/cylc_logviewer.py
index cb0ea3e..25ad0fe 100644
--- a/lib/cylc/gui/cylc_logviewer.py
+++ b/lib/cylc/gui/cylc_logviewer.py
@@ -16,79 +16,76 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from filtered_tailer import filtered_tailer
-from tailer import tailer
 import gtk
 import pygtk
-####pygtk.require('2.0')
-import time, os, re, sys
-from warning_dialog import warning_dialog
-from util import get_icon
-from logviewer import logviewer
+import os
+from cylc.gui.logviewer import logviewer
+from cylc.gui.tailer import Tailer
+from cylc.gui.util import get_icon
+from cylc.gui.warning_dialog import warning_dialog
 
-class cylc_logviewer( logviewer ):
 
-    def __init__( self, name, dir, task_list ):
+class cylc_logviewer(logviewer):
+
+    def __init__(self, name, dirname, task_list):
         self.task_list = task_list
         self.main_log = name
         self.level = 0
         self.task_filter = None
         self.custom_filter = None
 
-        logviewer.__init__( self, name, dir, name,
-                warning_re = 'WARNING', critical_re = 'CRITICAL' )
+        logviewer.__init__(self, name, dirname, name)
 
-    def create_gui_panel( self ):
-        logviewer.create_gui_panel( self )
+    def create_gui_panel(self):
+        logviewer.create_gui_panel(self)
 
         self.window = gtk.Window()
-        #self.window.set_border_width(5)
-        self.window.set_title( "log viewer" )
+        # self.window.set_border_width(5)
+        self.window.set_title("log viewer")
         self.window.set_size_request(800, 400)
-        self.window.set_icon( get_icon() )
+        self.window.set_icon(get_icon())
 
         combobox = gtk.combo_box_new_text()
-        combobox.append_text( 'Task' )
-        combobox.append_text( 'all' )
+        combobox.append_text('Task')
+        combobox.append_text('all')
         for task in self.task_list:
-            combobox.append_text( task )
+            combobox.append_text(task)
 
-        combobox.connect("changed", self.filter_log )
+        combobox.connect("changed", self.filter_log)
         combobox.set_active(0)
 
+        newer = gtk.Button("_newer")
+        newer.connect("clicked", self.rotate_log, False)
+        self.hbox.pack_end(newer, False)
 
-        newer = gtk.Button( "_newer" )
-        newer.connect("clicked", self.rotate_log, False )
-        self.hbox.pack_end( newer, False )
-
-        older = gtk.Button( "_older" )
-        older.connect("clicked", self.rotate_log, True )
-        self.hbox.pack_end( older, False )
+        older = gtk.Button("_older")
+        older.connect("clicked", self.rotate_log, True)
+        self.hbox.pack_end(older, False)
 
-        self.hbox.pack_end( combobox, False )
+        self.hbox.pack_end(combobox, False)
 
         filterbox = gtk.HBox()
         entry = gtk.Entry()
-        entry.connect( "activate", self.custom_filter_log )
+        entry.connect("activate", self.custom_filter_log)
         label = gtk.Label('Filter')
         filterbox.pack_start(label, True)
         filterbox.pack_start(entry, True)
-        self.hbox.pack_end( filterbox, False )
+        self.hbox.pack_end(filterbox, False)
 
-        close = gtk.Button( "_Close" )
-        close.connect("clicked", self.shutdown, None, self.window )
-        self.hbox.pack_start( close, False )
+        close = gtk.Button("_Close")
+        close.connect("clicked", self.shutdown, None, self.window)
+        self.hbox.pack_start(close, False)
 
-        self.window.add( self.vbox )
-        self.window.connect("delete_event", self.shutdown, self.window )
+        self.window.add(self.vbox)
+        self.window.connect("delete_event", self.shutdown, self.window)
 
         self.window.show_all()
 
-    def shutdown( self, w, e, wind ):
+    def shutdown(self, w, e, wind):
         self.quit()
         wind.destroy()
 
-    def filter_log( self, cb ):
+    def filter_log(self, cb):
         model = cb.get_model()
         index = cb.get_active()
         if index == 0:
@@ -106,7 +103,7 @@ class cylc_logviewer( logviewer ):
         # TODO - CHECK ALL BOOLEAN RETURN VALUES THROUGHOUT THE GUI
         return False
 
-    def custom_filter_log( self, e ):
+    def custom_filter_log(self, e):
         txt = e.get_text()
         if txt == '':
             filter = None
@@ -118,49 +115,44 @@ class cylc_logviewer( logviewer ):
 
         return False
 
-    def current_log( self ):
+    def current_log(self):
         if self.level == 0:
             return self.main_log
         else:
-            return self.main_log + '.' + str( self.level )
+            return self.main_log + '.' + str(self.level)
 
-    def rotate_log( self, bt, go_older ):
+    def rotate_log(self, bt, go_older):
         if go_older:
             self.level += 1
         else:
             self.level -= 1
         if self.level < 0:
-            warning_dialog( """
+            warning_dialog("""
 At newest rotation; reloading in case
-the suite has been restarted.""", self.window ).warn()
+the suite has been restarted.""", self.window).warn()
             self.level = 0
             # but update view in case user started suite after gui
-        if self.current_log() not in os.listdir( self.dir ):
+        if self.current_log() not in os.listdir(self.dirname):
             if go_older:
-                warning_dialog( "Older log not available", self.window ).warn()
+                warning_dialog("Older log not available", self.window).warn()
                 self.level -= 1
                 return
             else:
-                warning_dialog( "Newer log not available", self.window ).warn()
+                warning_dialog("Newer log not available", self.window).warn()
                 self.level += 1
                 return
         else:
-            self.file = self.current_log()
+            self.filename = self.current_log()
         self.update_view()
 
-    def update_view( self ):
-        self.t.quit = True
+    def update_view(self):
+        self.t.stop()
         logbuffer = self.logview.get_buffer()
-        s,e = logbuffer.get_bounds()
+        s, e = logbuffer.get_bounds()
         self.reset_logbuffer()
-        logbuffer.delete( s, e )
-        self.log_label.set_text( self.path() )
-        if self.task_filter or self.custom_filter:
-            filters = [self.task_filter, self.custom_filter ]
-            self.t = filtered_tailer( self.logview, self.path(), filters,
-                    warning_re = 'WARNING', critical_re = 'CRITICAL' )
-        else:
-            self.t = tailer( self.logview, self.path(),
-                    warning_re = 'WARNING', critical_re = 'CRITICAL' )
-        ###print "Starting log viewer thread"
+        logbuffer.delete(s, e)
+        self.log_label.set_text(self.path())
+        self.t = Tailer(
+            self.logview, self.path(),
+            filters=[f for f in [self.task_filter, self.custom_filter] if f])
         self.t.start()
diff --git a/lib/cylc/gui/dbchooser.py b/lib/cylc/gui/dbchooser.py
index f5e3f73..cec246d 100644
--- a/lib/cylc/gui/dbchooser.py
+++ b/lib/cylc/gui/dbchooser.py
@@ -17,22 +17,23 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import gobject
-#import pygtk
-#pygtk.require('2.0')
 import gtk
-import time, os, re, sys
+import time
+import os
+import re
+import sys
 import threading
 from util import EntryTempText, EntryDialog
 from cylc.run_get_stdout import run_get_stdout
 
 try:
-    from cylc import cylc_pyro_client
-except BaseException, x: # this catches SystemExit
+    import Pyro.core
+except BaseException, x:  # this catches SystemExit
     PyroInstalled = False
     print >> sys.stderr, "WARNING: Pyro is not installed."
 else:
     PyroInstalled = True
-    from cylc.port_scan import scan
+    from cylc.network.port_scan import scan
 
 from cylc.registration import localdb
 from cylc.regpath import RegPath
@@ -42,9 +43,12 @@ from gcapture import gcapture, gcapture_tmpfile
 
 debug = False
 
+
 class db_updater(threading.Thread):
+
     count = 0
-    def __init__(self, regd_treestore, db, filtr=None, pyro_timeout=None ):
+
+    def __init__(self, regd_treestore, db, filtr=None, pyro_timeout=None):
         self.__class__.count += 1
         self.me = self.__class__.count
         self.filtr = filtr
@@ -65,22 +69,22 @@ class db_updater(threading.Thread):
         self.regd_choices = self.db.get_list(filtr)
 
         # not needed:
-        # self.build_treestore( self.newtree )
+        # self.build_treestore(self.newtree)
         self.construct_newtree()
         self.update()
 
-    def construct_newtree( self ):
+    def construct_newtree(self):
         # construct self.newtree[one][two]...[nnn] = [state, descr, dir ]
         self.running_choices_changed()
         ports = {}
         for suite in self.running_choices:
             reg, port = suite
-            ports[ reg ] = port
+            ports[reg] = port
 
         self.newtree = {}
         for reg in self.regd_choices:
             suite, suite_dir, descr = reg
-            suite_dir = re.sub( '^' + os.environ['HOME'], '~', suite_dir )
+            suite_dir = re.sub('^' + os.environ['HOME'], '~', suite_dir)
             if suite in ports:
                 state = str(ports[suite])
             else:
@@ -91,52 +95,56 @@ class db_updater(threading.Thread):
                 if key not in nest2:
                     nest2[key] = {}
                 nest2 = nest2[key]
-            nest2[regp[-1]] = [ state, descr, suite_dir ]
+            nest2[regp[-1]] = [state, descr, suite_dir]
 
-    def build_treestore( self, data, piter=None ):
+    def build_treestore(self, data, piter=None):
         items = data.keys()
         items.sort()
         for item in items:
             value = data[item]
-            if isinstance( value, dict ):
+            if isinstance(value, dict):
                 # final three items are colours
-                iter = self.regd_treestore.append(piter, [item, None, None, None, None, None, None ] )
+                iter = self.regd_treestore.append(
+                    piter, [item, None, None, None, None, None, None])
                 self.build_treestore(value, iter)
             else:
                 state, descr, dir = value
-                iter = self.regd_treestore.append(piter, [item, state, descr, dir, None, None, None ] )
+                iter = self.regd_treestore.append(
+                    piter, [item, state, descr, dir, None, None, None])
 
-    def update( self ):
-        #print "Updating list of available suites"
+    def update(self):
+        # print "Updating list of available suites"
         self.construct_newtree()
         if self.reload:
             self.regd_treestore.clear()
-            self.build_treestore( self.newtree )
+            self.build_treestore(self.newtree)
             self.reload = False
         else:
-            self.update_treestore( self.newtree, self.regd_treestore.get_iter_first() )
+            self.update_treestore(
+                self.newtree, self.regd_treestore.get_iter_first())
 
-    def update_treestore( self, new, iter ):
+    def update_treestore(self, new, iter):
         # iter is None for an empty treestore (no suites registered)
         ts = self.regd_treestore
         if iter:
             opath = ts.get_path(iter)
-            # get parent iter before pruning in case we prune last item at this level
+            # get parent iter before pruning in case we prune last item at this
+            # level
             piter = ts.iter_parent(iter)
         else:
             opath = None
             piter = None
 
-        def my_get_iter( item ):
+        def my_get_iter(item):
             # find the TreeIter pointing at item at this level
             if not opath:
                 return None
             iter = ts.get_iter(opath)
             while iter:
-                val, = ts.get( iter, 0 )
+                val, = ts.get(iter, 0)
                 if val == item:
                     return iter
-                iter = ts.iter_next( iter )
+                iter = ts.iter_next(iter)
             return None
 
         # new items at this level
@@ -146,38 +154,41 @@ class db_updater(threading.Thread):
 
         while iter:
             # iterate through old items at this level
-            item, state, descr, dir = ts.get( iter, 0,1,2,3 )
+            item, state, descr, dir = ts.get(iter, 0, 1, 2, 3)
             if item not in new_items:
                 # old item is not in new - prune it
-                res = ts.remove( iter )
-                if not res: # Nec?
+                res = ts.remove(iter)
+                if not res:  # Nec?
                     iter = None
             else:
                 # old item is in new - update it in case it changed
                 old_items.append(item)
                 # update old items that do appear in new
                 chiter = ts.iter_children(iter)
-                if not isinstance( new[item], dict ):
+                if not isinstance(new[item], dict):
                     # new item is not a group - update title etc.
                     state, descr, dir = new[item]
                     sc = self.statecol(state)
                     ni = new[item]
-                    ts.set( iter, 0, item, 1, ni[0], 2, ni[1], 3, ni[2], 4, sc[0], 5, sc[1], 6, sc[2] )
+                    ts.set(iter, 0, item, 1, ni[0], 2, ni[1], 3, ni[2],
+                           4, sc[0], 5, sc[1], 6, sc[2])
                     if chiter:
                         # old item was a group - kill its children
                         while chiter:
-                            res = ts.remove( chiter )
+                            res = ts.remove(chiter)
                             if not res:
                                 chiter = None
                 else:
                     # new item is a group
                     if not chiter:
                         # old item was not a group
-                        ts.set( iter, 0, item, 1, None, 2, None, 3, None, 4, None, 5, None, 6, None )
-                        self.build_treestore( new[item], iter )
+                        ts.set(
+                            iter, 0, item, 1, None, 2, None, 3, None, 4,
+                            None, 5, None, 6, None)
+                        self.build_treestore(new[item], iter)
 
                 # continue
-                iter = ts.iter_next( iter )
+                iter = ts.iter_next(iter)
 
         # return to original iter
         if opath:
@@ -194,41 +205,43 @@ class db_updater(threading.Thread):
         for item in new_items:
             if item not in old_items:
                 # new data wasn't in old - add it
-                if isinstance( new[item], dict ):
-                    xiter = ts.append(piter, [item] + [None, None, None, None, None, None] )
-                    self.build_treestore( new[item], xiter )
+                if isinstance(new[item], dict):
+                    xiter = ts.append(
+                        piter, [item] + [None, None, None, None, None, None])
+                    self.build_treestore(new[item], xiter)
                 else:
                     state, descr, dir = new[item]
-                    yiter = ts.append(piter, [item] + new[item] + list( self.statecol(state)))
+                    yiter = ts.append(
+                        piter, [item] + new[item] + list(self.statecol(state)))
             else:
                 # new data was already in old
-                if isinstance( new[item], dict ):
+                if isinstance(new[item], dict):
                     # check lower levels
-                    niter = my_get_iter( item )
+                    niter = my_get_iter(item)
                     if niter:
                         chiter = ts.iter_children(niter)
                         if chiter:
-                            self.update_treestore( new[item], chiter )
+                            self.update_treestore(new[item], chiter)
 
-    def run( self ):
+    def run(self):
         global debug
         if debug:
             print '* thread', self.me, 'starting'
         while not self.quit:
             if self.running_choices_changed() or self.reload:
-                gobject.idle_add( self.update )
+                gobject.idle_add(self.update)
             time.sleep(1)
         else:
             if debug:
                 print '* thread', self.me, 'quitting'
             self.__class__.count -= 1
 
-    def running_choices_changed( self ):
+    def running_choices_changed(self):
         if not PyroInstalled:
             return
         # [(name, owner, host, port)]
         results = scan(pyro_timeout=self.pyro_timeout)
-        choices = [(result[0], result[3]) for result in results]
+        choices = [(result[1]['name'], result[0]) for result in results]
         choices.sort()
         if choices != self.running_choices:
             self.running_choices = choices
@@ -236,46 +249,40 @@ class db_updater(threading.Thread):
         else:
             return False
 
-    def statecol( self, state ):
-        grnbg = '#19ae0a'
-        grnfg = '#030'
-        #red = '#ff1a45'
-        red = '#845'
-        white = '#fff'
-        black='#000'
-        hilight = '#faf'
-        hilight2 = '#f98e3a'
+    def statecol(self, state):
+        bg = '#19ae0a'
+        fg = '#030'
         if state == '-':
-            #return (black, None, hilight)
             return (None, None, None)
         else:
-            #return (grnfg, grnbg, hilight2 )
-            return (grnfg, grnbg, grnbg )
+            return (fg, bg, bg)
 
-    def search_level( self, model, iter, func, data ):
+    def search_level(self, model, iter, func, data):
         while iter:
-            if func( model, iter, data):
+            if func(model, iter, data):
                 return iter
             iter = model.iter_next(iter)
         return None
 
-    def search_treemodel( self, model, iter, func, data ):
+    def search_treemodel(self, model, iter, func, data):
         while iter:
-            if func( model, iter, data):
+            if func(model, iter, data):
                 return iter
-            result = self.search_treemodel( model, model.iter_children(iter), func, data)
+            result = self.search_treemodel(
+                model, model.iter_children(iter), func, data)
             if result:
                 return result
             iter = model.iter_next(iter)
         return None
 
-    def match_func( self, model, iter, data ):
+    def match_func(self, model, iter, data):
         column, key = data
-        value = model.get_value( iter, column )
+        value = model.get_value(iter, column)
         return value == key
 
+
 class dbchooser(object):
-    def __init__(self, parent, db, db_owner, tmpdir, pyro_timeout ):
+    def __init__(self, parent, db, db_owner, tmpdir, pyro_timeout):
 
         self.db = db
         self.db_owner = db_owner
@@ -292,143 +299,161 @@ class dbchooser(object):
 
         gobject.threads_init()
 
-        #self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
-        self.window = gtk.Dialog( "Choose a suite", parent, gtk.DIALOG_MODAL|gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK))
-        #self.window.set_modal(True)
-        self.window.set_title("Suite Chooser" )
+        # self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+        self.window = gtk.Dialog(
+            "Choose a suite",
+            parent,
+            gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
+            (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK,
+             gtk.RESPONSE_OK))
+        # self.window.set_modal(True)
+        self.window.set_title("Suite Chooser")
         self.window.set_size_request(750, 400)
-        self.window.set_icon(get_icon()) # TODO: not needed for a dialog window?
-        #self.window.set_border_width( 5 )
+        # TODO: not needed for a dialog window?
+        self.window.set_icon(get_icon())
+        # self.window.set_border_width(5)
 
         self.window.connect("delete_event", self.delete_all_event)
 
         sw = gtk.ScrolledWindow()
-        sw.set_policy( gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC )
+        sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
 
         self.regd_treeview = gtk.TreeView()
-        self.regd_treestore = gtk.TreeStore( str, str, str, str, str, str, str )
+        self.regd_treestore = gtk.TreeStore(str, str, str, str, str, str, str)
         self.regd_treeview.set_model(self.regd_treestore)
         self.regd_treeview.set_rules_hint(True)
         # search column zero (Ctrl-F)
-        self.regd_treeview.connect( 'key_press_event', self.on_suite_select )
-        self.regd_treeview.connect( 'button_press_event', self.on_suite_select )
+        self.regd_treeview.connect('key_press_event', self.on_suite_select)
+        self.regd_treeview.connect('button_press_event', self.on_suite_select)
         self.regd_treeview.set_search_column(0)
 
         # Start updating the liststore now, as we need values in it
         # immediately below (it may be possible to delay this till the
         # end of __init___() but it doesn't really matter.
         if self.db:
-            self.dbopt = '--db='+self.db
+            self.dbopt = '--db=' + self.db
         else:
             self.dbopt = ''
 
         regd_ts = self.regd_treeview.get_selection()
-        regd_ts.set_mode( gtk.SELECTION_SINGLE )
+        regd_ts.set_mode(gtk.SELECTION_SINGLE)
 
         cr = gtk.CellRendererText()
-        #cr.set_property( 'cell-background', '#def' )
-        tvc = gtk.TreeViewColumn( 'Suite', cr, text=0, foreground=4, background=5 )
+        # cr.set_property('cell-background', '#def')
+        tvc = gtk.TreeViewColumn(
+            'Suite', cr, text=0, foreground=4, background=5)
         tvc.set_resizable(True)
         tvc.set_sort_column_id(0)
-        self.regd_treeview.append_column( tvc )
+        self.regd_treeview.append_column(tvc)
 
         cr = gtk.CellRendererText()
-        tvc = gtk.TreeViewColumn( 'Port', cr, text=1, foreground=4, background=5 )
+        tvc = gtk.TreeViewColumn(
+            'Port', cr, text=1, foreground=4, background=5)
         tvc.set_resizable(True)
         # not sure how this sorting works
-        #tvc.set_sort_column_id(1)
-        self.regd_treeview.append_column( tvc )
+        # tvc.set_sort_column_id(1)
+        self.regd_treeview.append_column(tvc)
 
         cr = gtk.CellRendererText()
-        #cr.set_property( 'cell-background', '#def' )
-        tvc = gtk.TreeViewColumn( 'Title', cr, markup=2, foreground=4, background=6 )
+        # cr.set_property('cell-background', '#def')
+        tvc = gtk.TreeViewColumn(
+            'Title', cr, markup=2, foreground=4, background=6)
         tvc.set_resizable(True)
-        #vc.set_sort_column_id(2)
-        self.regd_treeview.append_column( tvc )
+        # vc.set_sort_column_id(2)
+        self.regd_treeview.append_column(tvc)
 
         cr = gtk.CellRendererText()
-        tvc = gtk.TreeViewColumn( 'Location', cr, text=3, foreground=4, background=5 )
+        tvc = gtk.TreeViewColumn(
+            'Location', cr, text=3, foreground=4, background=5)
         tvc.set_resizable(True)
-        #vc.set_sort_column_id(3)
-        self.regd_treeview.append_column( tvc )
+        # vc.set_sort_column_id(3)
+        self.regd_treeview.append_column(tvc)
 
         vbox = self.window.vbox
 
-        sw.add( self.regd_treeview )
+        sw.add(self.regd_treeview)
 
-        vbox.pack_start( sw, True )
+        vbox.pack_start(sw, True)
 
-        self.selected_label_text = '(double-click or OK to select; right-click for db options)'
-        self.selected_label = gtk.Label( self.selected_label_text )
+        self.selected_label_text = (
+            '(double-click or OK to select; right-click for db options)')
+        self.selected_label = gtk.Label(self.selected_label_text)
 
         filter_entry = EntryTempText()
-        filter_entry.set_width_chars( 7 )  # Reduce width in toolbar
-        filter_entry.connect( "activate", self.filter )
-        filter_entry.set_temp_text( "filter" )
+        filter_entry.set_width_chars(7)  # Reduce width in toolbar
+        filter_entry.connect("activate", self.filter)
+        filter_entry.set_temp_text("filter")
         filter_toolitem = gtk.ToolItem()
         filter_toolitem.add(filter_entry)
         tooltip = gtk.Tooltips()
         tooltip.enable()
-        tooltip.set_tip(filter_toolitem, "Filter suites \n(enter a sub-string or regex)")
+        tooltip.set_tip(
+            filter_toolitem, "Filter suites \n(enter a sub-string or regex)")
 
         expand_button = gtk.ToolButton()
-        image = gtk.image_new_from_stock( gtk.STOCK_ADD, gtk.ICON_SIZE_SMALL_TOOLBAR )
-        expand_button.set_icon_widget( image )
-        expand_button.connect( 'clicked', lambda x: self.regd_treeview.expand_all() )
+        image = gtk.image_new_from_stock(
+            gtk.STOCK_ADD, gtk.ICON_SIZE_SMALL_TOOLBAR)
+        expand_button.set_icon_widget(image)
+        expand_button.connect(
+            'clicked', lambda x: self.regd_treeview.expand_all())
 
         collapse_button = gtk.ToolButton()
-        image = gtk.image_new_from_stock( gtk.STOCK_REMOVE, gtk.ICON_SIZE_SMALL_TOOLBAR )
-        collapse_button.set_icon_widget( image )
-        collapse_button.connect( 'clicked', lambda x: self.regd_treeview.collapse_all() )
+        image = gtk.image_new_from_stock(
+            gtk.STOCK_REMOVE, gtk.ICON_SIZE_SMALL_TOOLBAR)
+        collapse_button.set_icon_widget(image)
+        collapse_button.connect(
+            'clicked', lambda x: self.regd_treeview.collapse_all())
 
         hbox = gtk.HBox()
 
         eb = gtk.EventBox()
-        eb.add( self.selected_label )
-        eb.modify_bg( gtk.STATE_NORMAL, gtk.gdk.color_parse( '#bbc' ) )
-        hbox.pack_start( eb, True )
-        hbox.pack_start( expand_button, False )
-        hbox.pack_start( collapse_button, False )
-        hbox.pack_start (filter_toolitem, False)
+        eb.add(self.selected_label)
+        eb.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse('#bbc'))
+        hbox.pack_start(eb, True)
+        hbox.pack_start(expand_button, False)
+        hbox.pack_start(collapse_button, False)
+        hbox.pack_start(filter_toolitem, False)
 
-        vbox.pack_start( hbox, False )
+        vbox.pack_start(hbox, False)
 
         self.window.show_all()
 
         self.start_updater()
 
-    def start_updater(self, filtr=None ):
+    def start_updater(self, filtr=None):
         db = localdb(self.db)
-        #self.db_button.set_label( "_Local/Central DB" )
+        # self.db_button.set_label("_Local/Central DB")
         if self.updater:
-            self.updater.quit = True # does this take effect?
-        self.updater = db_updater( self.regd_treestore, db, filtr, self.pyro_timeout )
+            self.updater.quit = True  # does this take effect?
+        self.updater = db_updater(
+            self.regd_treestore, db, filtr, self.pyro_timeout)
         self.updater.start()
 
     # TODO: a button to do this?
-    #def reload( self, w ):
+    # def reload(self, w):
     #    # tell updated to reconstruct the treeview from scratch
     #    self.updater.reload = True
 
-    def filter(self, filtr_e ):
+    def filter(self, filtr_e):
         if filtr_e == "":
             # reset
             self.start_updater()
             return
         filtr = filtr_e.get_text()
         try:
-            re.compile( filtr )
+            re.compile(filtr)
         except:
-            warning_dialog( "Bad Regular Expression: " + filtr, self.window ).warn()
+            warning_dialog(
+                "Bad Regular Expression: " + filtr, self.window).warn()
             filtr_e.set_text("")
             self.start_updater()
             return
-        self.start_updater( filtr )
+        self.start_updater(filtr)
 
-    def delete_all_event( self, w, e ):
+    def delete_all_event(self, w, e):
         self.updater.quit = True
         # call quit on any remaining gcapture windows, which contain
-        # tailer threads that need to be stopped). Currently we maintain
+        # Tailer threads that need to be stopped). Currently we maintain
         # a list of all gcapture windows opened
         # since start-up, hence the use of 'quit_already' to
         # avoid calling window.destroy() on gcapture windows that have
@@ -436,9 +461,9 @@ class dbchooser(object):
         # a second call to destroy() may be safe anyway?)...
         for gwindow in self.gcapture_windows:
             if not gwindow.quit_already:
-                gwindow.quit( None, None )
+                gwindow.quit(None, None)
 
-    def on_suite_select( self, treeview, event ):
+    def on_suite_select(self, treeview, event):
         try:
             event.button
         except AttributeError:
@@ -462,7 +487,7 @@ class dbchooser(object):
                     iter = self.regd_treestore.get_iter(path)
                     if self.regd_treestore.iter_children(iter):
                         # has children so is expandable
-                        treeview.expand_row(path, False )
+                        treeview.expand_row(path, False)
                         return False
         else:
             # called by button click
@@ -473,42 +498,42 @@ class dbchooser(object):
             # the following sets selection to the position at which the
             # right click was done (otherwise selection lags behind the
             # right click):
-            x = int( event.x )
-            y = int( event.y )
+            x = int(event.x)
+            y = int(event.y)
             time = event.time
-            pth = treeview.get_path_at_pos(x,y)
+            pth = treeview.get_path_at_pos(x, y)
             if pth is None:
                 return False
             treeview.grab_focus()
             path, col, cellx, celly = pth
-            treeview.set_cursor( path, col, 0 )
+            treeview.set_cursor(path, col, 0)
 
         selection = treeview.get_selection()
 
         model, iter = selection.get_selected()
 
-        item, state, descr, suite_dir = model.get( iter, 0,1,2,3 )
+        item, state, descr, suite_dir = model.get(iter, 0, 1, 2, 3)
         if not suite_dir:
             group_clicked = True
         else:
             group_clicked = False
 
-        def get_reg( item, iter ):
+        def get_reg(item, iter):
             reg = item
             if iter:
-                par = model.iter_parent( iter )
+                par = model.iter_parent(iter)
                 if par:
                     val, = model.get(par, 0)
-                    reg = get_reg( val, par ) + RegPath.delimiter + reg
+                    reg = get_reg(val, par) + RegPath.delimiter + reg
             return reg
 
-        reg = get_reg( item, iter )
+        reg = get_reg(item, iter)
         if not group_clicked:
             self.regname = reg
-            self.selected_label.set_text( reg )
+            self.selected_label.set_text(reg)
         else:
             self.regname = None
-            self.selected_label.set_text( self.selected_label_text )
+            self.selected_label.set_text(self.selected_label_text)
 
         if event.type == gtk.gdk._2BUTTON_PRESS:
             # double-click
@@ -524,57 +549,59 @@ class dbchooser(object):
         if group_clicked:
             group = reg
             # MENU OPTIONS FOR GROUPS
-            copy_item = gtk.MenuItem( 'C_opy' )
-            menu.append( copy_item )
-            copy_item.connect( 'activate', self.copy_popup, group, True )
+            copy_item = gtk.MenuItem('C_opy')
+            menu.append(copy_item)
+            copy_item.connect('activate', self.copy_popup, group, True)
 
-            reregister_item = gtk.MenuItem( '_Reregister' )
-            menu.append( reregister_item )
-            reregister_item.connect( 'activate', self.reregister_popup, group, True )
+            reregister_item = gtk.MenuItem('_Reregister')
+            menu.append(reregister_item)
+            reregister_item.connect(
+                'activate', self.reregister_popup, group, True)
 
-            del_item = gtk.MenuItem( '_Unregister' )
-            menu.append( del_item )
-            del_item.connect( 'activate', self.unregister_popup, group, True )
+            del_item = gtk.MenuItem('_Unregister')
+            menu.append(del_item)
+            del_item.connect('activate', self.unregister_popup, group, True)
 
         else:
-            copy_item = gtk.MenuItem( '_Copy' )
-            menu.append( copy_item )
-            copy_item.connect( 'activate', self.copy_popup, reg )
+            copy_item = gtk.MenuItem('_Copy')
+            menu.append(copy_item)
+            copy_item.connect('activate', self.copy_popup, reg)
 
-            reregister_item = gtk.MenuItem( '_Reregister' )
-            menu.append( reregister_item )
-            reregister_item.connect( 'activate', self.reregister_popup, reg )
+            reregister_item = gtk.MenuItem('_Reregister')
+            menu.append(reregister_item)
+            reregister_item.connect('activate', self.reregister_popup, reg)
 
-            del_item = gtk.MenuItem( '_Unregister' )
-            menu.append( del_item )
-            del_item.connect( 'activate', self.unregister_popup, reg )
+            del_item = gtk.MenuItem('_Unregister')
+            menu.append(del_item)
+            del_item.connect('activate', self.unregister_popup, reg)
 
-            compare_item = gtk.MenuItem( 'C_ompare' )
-            menu.append( compare_item )
-            compare_item.connect( 'activate', self.compare_popup, reg )
+            compare_item = gtk.MenuItem('C_ompare')
+            menu.append(compare_item)
+            compare_item.connect('activate', self.compare_popup, reg)
 
         menu.show_all()
         # button only:
-        #menu.popup( None, None, None, event.button, event.time )
+        # menu.popup(None, None, None, event.button, event.time)
         # this seems to work with keypress and button:
-        menu.popup( None, None, None, 0, event.time )
+        menu.popup(None, None, None, 0, event.time)
 
         # TODO - POPUP MENU MUST BE DESTROY()ED AFTER EVERY USE AS
         # POPPING DOWN DOES NOT DO THIS (=> MEMORY LEAK?)
         return False
 
-    def unregister_popup( self, w, reg, is_group=False ):
+    def unregister_popup(self, w, reg, is_group=False):
 
-        window = gtk.MessageDialog( parent=self.window,
-                flags=0,
-                type=gtk.MESSAGE_QUESTION,
-                buttons=gtk.BUTTONS_NONE,
-                message_format="Unregistering Suite " + reg + """
+        window = gtk.MessageDialog(
+            parent=self.window,
+            flags=0,
+            type=gtk.MESSAGE_QUESTION,
+            buttons=gtk.BUTTONS_NONE,
+            message_format="Unregistering Suite " + reg + """
 \nDelete suite definition directory too? (DANGEROUS!)""")
 
-        window.add_button( gtk.STOCK_YES, gtk.RESPONSE_YES )
-        window.add_button( gtk.STOCK_NO, gtk.RESPONSE_NO )
-        window.add_button( gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL )
+        window.add_button(gtk.STOCK_YES, gtk.RESPONSE_YES)
+        window.add_button(gtk.STOCK_NO, gtk.RESPONSE_NO)
+        window.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
         response = window.run()
         window.destroy()
 
@@ -590,69 +617,72 @@ class dbchooser(object):
         else:
             command = None
         if command:
-            res, out = run_get_stdout( command )
+            res, out = run_get_stdout(command)
             if not res:
-                warning_dialog( '\n'.join(out), self.window ).warn()
+                warning_dialog('\n'.join(out), self.window).warn()
 
-    def reregister_popup( self, w, reg, is_group=False ):
+    def reregister_popup(self, w, reg, is_group=False):
 
-        window = EntryDialog( parent=self.window,
-                flags=0,
-                type=gtk.MESSAGE_QUESTION,
-                buttons=gtk.BUTTONS_OK_CANCEL,
-                message_format="Reregister Suite " + reg + " As")
+        window = EntryDialog(
+            parent=self.window,
+            flags=0,
+            type=gtk.MESSAGE_QUESTION,
+            buttons=gtk.BUTTONS_OK_CANCEL,
+            message_format="Reregister Suite " + reg + " As")
 
         rereg = window.run()
         window.destroy()
         if rereg:
             command = "cylc reregister " + reg + ' ' + rereg
-            res, out = run_get_stdout( command )
+            res, out = run_get_stdout(command)
             if not res:
-                warning_dialog( '\n'.join(out), self.window ).warn()
+                warning_dialog('\n'.join(out), self.window).warn()
 
-    def compare_popup( self, w, reg ):
+    def compare_popup(self, w, reg):
 
-        window = EntryDialog( parent=self.window,
-                flags=0,
-                type=gtk.MESSAGE_QUESTION,
-                buttons=gtk.BUTTONS_OK_CANCEL,
-                message_format="Compare Suite " + reg + " With")
+        window = EntryDialog(
+            parent=self.window,
+            flags=0,
+            type=gtk.MESSAGE_QUESTION,
+            buttons=gtk.BUTTONS_OK_CANCEL,
+            message_format="Compare Suite " + reg + " With")
 
         compare = window.run()
         window.destroy()
         if compare:
             command = "cylc diff " + reg + ' ' + compare
-            res, out = run_get_stdout( command )
+            res, out = run_get_stdout(command)
             if not res:
-                warning_dialog( '\n'.join(out), self.window ).warn()
+                warning_dialog('\n'.join(out), self.window).warn()
             else:
                 # TODO: need a bigger scrollable window here!
-                info_dialog( '\n'.join(out), self.window ).inform()
+                info_dialog('\n'.join(out), self.window).inform()
 
-    def copy_popup( self, w, reg, is_group=False ):
+    def copy_popup(self, w, reg, is_group=False):
 
-        window = EntryDialog( parent=self.window,
-                flags=0,
-                type=gtk.MESSAGE_QUESTION,
-                buttons=gtk.BUTTONS_OK_CANCEL,
-                message_format="Copy Suite " + reg + """To
+        window = EntryDialog(
+            parent=self.window,
+            flags=0,
+            type=gtk.MESSAGE_QUESTION,
+            buttons=gtk.BUTTONS_OK_CANCEL,
+            message_format="Copy Suite " + reg + """To
 NAME,TOP_DIRECTORY""")
 
         out = window.run()
         window.destroy()
         if out:
             try:
-                name, topdir = re.split(' *, *', out )
+                name, topdir = re.split(' *, *', out)
             except Exception, e:
-                warning_dialog( str(e), self.window ).warn()
+                warning_dialog(str(e), self.window).warn()
             else:
                 print name, topdir
-                topdir = os.path.expanduser( os.path.expandvars( topdir ))
+                topdir = os.path.expanduser(os.path.expandvars(topdir))
                 print name, topdir
                 command = "cylc cp " + reg + ' ' + name + ' ' + topdir
                 print command
-                res, out = run_get_stdout( command )
+                res, out = run_get_stdout(command)
                 if not res:
-                    warning_dialog( '\n'.join(out), self.window ).warn()
+                    warning_dialog('\n'.join(out), self.window).warn()
                 elif out:
-                    info_dialog( '\n'.join(out), self.window ).inform()
+                    info_dialog('\n'.join(out), self.window).inform()
diff --git a/lib/cylc/gui/dot_maker.py b/lib/cylc/gui/dot_maker.py
index cbf75a4..ba4f746 100644
--- a/lib/cylc/gui/dot_maker.py
+++ b/lib/cylc/gui/dot_maker.py
@@ -23,13 +23,13 @@ from cylc.task_state import task_state
 
 empty = {}
 empty['small'] = ["11 11 1 1", ". c None"]
-empty['small'].extend(["..........."]*11)
+empty['small'].extend(["..........."] * 11)
 empty['medium'] = ["17 17 1 1", ". c None"]
-empty['medium'].extend(["................."]*17)
+empty['medium'].extend(["................."] * 17)
 empty['large'] = ["22 22 1 1", ". c None"]
-empty['large'].extend(["......................"]*22)
+empty['large'].extend(["......................"] * 22)
 empty['extra large'] = ["32 32 1 1", ". c None"]
-empty['extra large'].extend(["................................"]*32)
+empty['extra large'].extend(["................................"] * 32)
 
 stopped = {
     'small': [
diff --git a/lib/cylc/gui/filtered_tailer.py b/lib/cylc/gui/filtered_tailer.py
deleted file mode 100644
index 44cfdff..0000000
--- a/lib/cylc/gui/filtered_tailer.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import gobject
-from tailer import tailer
-import os
-import re
-from cylc import tail
-#from warning_dialog import warning_dialog
-
-class filtered_tailer( tailer ):
-    def __init__( self, logview, log, filters, tag=None,
-            warning_re=None, critical_re=None ):
-        self.filters = filters
-        tailer.__init__( self, logview, log, tag=tag,
-                warning_re=warning_re, critical_re=critical_re )
-
-    def run( self ):
-        #gobject.idle_add( self.clear )
-        if not os.path.exists( self.logfile ):
-            #gobject.idle_add( self.warn, "File not found: " + self.logfile )
-            #print "File not found: " + self.logfile
-            ###print "Disconnecting from log viewer thread"
-            return
-
-        gen = tail.tail( open( self.logfile ))
-        while not self.quit:
-            if not self.freeze:
-                line = gen.next()
-                if line:
-                    match = True
-                    for filter in self.filters:
-                        if filter:
-                            if not re.search( filter, line ):
-                                match = False
-                                break
-                    if match:
-                        gobject.idle_add( self.update_gui, line )
-        ###print "Disconnecting from log viewer thread"
diff --git a/lib/cylc/gui/gcapture.py b/lib/cylc/gui/gcapture.py
index 1c77c51..5a3ec34 100644
--- a/lib/cylc/gui/gcapture.py
+++ b/lib/cylc/gui/gcapture.py
@@ -16,18 +16,18 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from tailer import tailer
+from cylc.gui.tailer import Tailer
 import gtk
 import gobject
 import pango
 import tempfile
-import os, re, sys
 from warning_dialog import warning_dialog, info_dialog
 from util import get_icon
 import subprocess
 
 # unit test: see the command $CYLC_DIR/bin/gcapture
 
+
 class gcapture(object):
     """
 Run a command as a subprocess and capture its stdout and stderr in real
@@ -39,21 +39,21 @@ Lines containing:
 are displayed in red.
     $ capture "echo foo && echox bar"
     """
-    def __init__( self, command, stdoutfile, width=400, height=400, standalone=False, ignore_command=False,
-                  title=None ):
-        self.standalone=standalone
+    def __init__(self, command, stdoutfile, width=400, height=400,
+                 standalone=False, ignore_command=False, title=None):
+        self.standalone = standalone
         self.command = command
         self.ignore_command = ignore_command
         self.stdout = stdoutfile
         self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
-        self.window.set_border_width( 5 )
+        self.window.set_border_width(5)
         if title is None:
-            self.window.set_title( 'Command Output' )
+            self.window.set_title('Command Output')
         else:
-            self.window.set_title( title )
+            self.window.set_title(title)
         self.window.connect("delete_event", self.quit)
         self.window.set_default_size(width, height)
-        self.window.set_icon( get_icon() )
+        self.window.set_icon(get_icon())
         self.quit_already = False
 
         self.find_current = None
@@ -61,37 +61,33 @@ are displayed in red.
         self.search_warning_done = False
 
         sw = gtk.ScrolledWindow()
-        sw.set_policy( gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC )
+        sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
         sw.show()
 
         self.textview = gtk.TextView()
         self.textview.set_editable(False)
-        self.textview.set_wrap_mode( gtk.WRAP_WORD )
+        self.textview.set_wrap_mode(gtk.WRAP_WORD)
         # Use a monospace font. This is safe - by testing - setting an
         # illegal font description has no effect.
-        self.textview.modify_font( pango.FontDescription("monospace") )
+        self.textview.modify_font(pango.FontDescription("monospace"))
         tb = self.textview.get_buffer()
         self.textview.show()
 
-        self.ftag = tb.create_tag( None, background="#70FFA9" )
-
-        self.warning_re = 'WARNING'
-        self.critical_re = 'CRITICAL|ERROR'
+        self.ftag = tb.create_tag(None, background="#70FFA9")
 
         vbox = gtk.VBox()
         vbox.show()
 
         if not self.ignore_command:
             self.progress_bar = gtk.ProgressBar()
-            self.progress_bar.set_text( command )
-            self.progress_bar.set_pulse_step( 0.04 )
+            self.progress_bar.set_text(command)
+            self.progress_bar.set_pulse_step(0.04)
             self.progress_bar.show()
-            vbox.pack_start( self.progress_bar, expand=False )
-        self.command_label = gtk.Label( self.command )
+            vbox.pack_start(self.progress_bar, expand=False)
+        self.command_label = gtk.Label(self.command)
         if self.ignore_command:
             self.command_label.show()
-        vbox.pack_start( self.command_label, expand=False )
-
+        vbox.pack_start(self.command_label, expand=False)
 
         sw.add(self.textview)
 
@@ -100,58 +96,61 @@ are displayed in red.
         frame.show()
         vbox.add(frame)
 
-        save_button = gtk.Button( "Save As" )
-        save_button.connect("clicked", self.save, self.textview )
+        save_button = gtk.Button("Save As")
+        save_button.connect("clicked", self.save, self.textview)
         save_button.show()
 
         hbox = gtk.HBox()
-        hbox.pack_start( save_button, False )
+        hbox.pack_start(save_button, False)
         hbox.show()
 
-        output_label = gtk.Label( 'output : ' + stdoutfile.name )
+        output_label = gtk.Label('output : ' + stdoutfile.name)
         output_label.show()
-        hbox.pack_start( output_label, expand=True )
+        hbox.pack_start(output_label, expand=True)
 
-        self.freeze_button = gtk.ToggleButton( "_Disconnect" )
+        self.freeze_button = gtk.ToggleButton("_Disconnect")
         self.freeze_button.set_active(False)
-        self.freeze_button.connect("toggled", self.freeze )
+        self.freeze_button.connect("toggled", self.freeze)
         self.freeze_button.show()
 
         searchbox = gtk.HBox()
         searchbox.show()
         entry = gtk.Entry()
         entry.show()
-        entry.connect( "activate", self.enter_clicked )
-        searchbox.pack_start (entry, True)
-        b = gtk.Button ("Find Next")
-        b.connect_object ('clicked', self.on_find_clicked, entry)
+        entry.connect("activate", self.enter_clicked)
+        searchbox.pack_start(entry, True)
+        b = gtk.Button("Find Next")
+        b.connect_object('clicked', self.on_find_clicked, entry)
         b.show()
-        searchbox.pack_start (b, False)
-        searchbox.pack_start( self.freeze_button, False )
+        searchbox.pack_start(b, False)
+        searchbox.pack_start(self.freeze_button, False)
 
-        close_button = gtk.Button( "_Close" )
-        close_button.connect("clicked", self.quit, None, None )
+        close_button = gtk.Button("_Close")
+        close_button.connect("clicked", self.quit, None, None)
         close_button.show()
 
         hbox.pack_end(close_button, False)
 
-        vbox.pack_start( searchbox, False )
-        vbox.pack_start( hbox, False )
+        vbox.pack_start(searchbox, False)
+        vbox.pack_start(hbox, False)
 
         self.window.add(vbox)
         close_button.grab_focus()
         self.window.show()
 
-    def run( self ):
+    def run(self):
+        proc = None
         if not self.ignore_command:
-            self.proc = subprocess.Popen( self.command, stdout=self.stdout, stderr=subprocess.STDOUT, shell=True )
-            self.stdout_updater = tailer( self.textview, self.stdout.name, proc=self.proc, warning_re=self.warning_re, critical_re=self.critical_re )
+            proc = subprocess.Popen(
+                self.command, stdout=self.stdout, stderr=subprocess.STDOUT,
+                shell=True)
+            self.proc = proc
             gobject.timeout_add(40, self.pulse_proc_progress)
-        else:
-            self.stdout_updater = tailer( self.textview, self.stdout.name, warning_re=self.warning_re, critical_re=self.critical_re )
+        self.stdout_updater = Tailer(
+            self.textview, self.stdout.name, pollable=proc)
         self.stdout_updater.start()
 
-    def pulse_proc_progress( self ):
+    def pulse_proc_progress(self):
         """While the process is running, pulse the progress bar a bit."""
         self.progress_bar.pulse()
         self.proc.poll()
@@ -163,29 +162,33 @@ are displayed in red.
         # Break gobject.timeout_add loop.
         return False
 
-    def freeze( self, b ):
+    def freeze(self, b):
         if b.get_active():
             self.stdout_updater.freeze = True
-            b.set_label( '_Reconnect' )
+            b.set_label('_Reconnect')
         else:
             self.stdout_updater.freeze = False
-            b.set_label( '_Disconnect' )
+            b.set_label('_Disconnect')
 
-    def save( self, w, tv ):
+    def save(self, w, tv):
         tb = tv.get_buffer()
 
         start = tb.get_start_iter()
         end = tb.get_end_iter()
-        txt = tb.get_text( start, end )
-
-        dialog = gtk.FileChooserDialog(title='Save As',
-                action=gtk.FILE_CHOOSER_ACTION_SAVE,
-                buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,
-                    gtk.STOCK_SAVE,gtk.RESPONSE_OK))
+        txt = tb.get_text(start, end)
+
+        dialog = gtk.FileChooserDialog(
+            title='Save As',
+            action=gtk.FILE_CHOOSER_ACTION_SAVE,
+            buttons=(
+                gtk.STOCK_CANCEL,
+                gtk.RESPONSE_CANCEL,
+                gtk.STOCK_SAVE,
+                gtk.RESPONSE_OK))
         filter = gtk.FileFilter()
         filter.set_name("any")
         filter.add_pattern("*")
-        dialog.add_filter( filter )
+        dialog.add_filter(filter)
 
         response = dialog.run()
 
@@ -197,65 +200,66 @@ are displayed in red.
         dialog.destroy()
 
         try:
-            f = open( fname, 'wb' )
+            f = open(fname, 'wb')
         except IOError, x:
-            warning_dialog( str(x), self.window ).warn()
+            warning_dialog(str(x), self.window).warn()
         else:
-            f.write( txt )
+            f.write(txt)
             f.close()
-            info_dialog( "Buffer saved to " + fname, self.window ).inform()
+            info_dialog("Buffer saved to " + fname, self.window).inform()
 
-    def quit( self, w, e, data=None ):
+    def quit(self, w, e, data=None):
         if self.quit_already:
             # this is because gcylc currently maintains a list of *all*
             # gcapture windows, including those the user has closed.
             return
-        self.stdout_updater.quit = True
+        self.stdout_updater.stop()
         self.quit_already = True
         if self.standalone:
-            #print 'GTK MAIN QUIT'
             gtk.main_quit()
         else:
-            #print 'WINDOW DESTROY'
             self.window.destroy()
 
-    def enter_clicked( self, e ):
-        self.on_find_clicked( e )
+    def enter_clicked(self, e):
+        self.on_find_clicked(e)
 
-    def on_find_clicked( self, e ):
+    def on_find_clicked(self, e):
         tv = self.textview
-        tb = tv.get_buffer ()
+        tb = tv.get_buffer()
         needle = e.get_text()
 
         if not needle:
-            s,e = tb.get_bounds()
-            tb.remove_tag( self.ftag, s,e )
+            s, e = tb.get_bounds()
+            tb.remove_tag(self.ftag, s, e)
             return
 
         self.stdout_updater.freeze = True
         self.freeze_button.set_active(True)
         self.freeze_button.set_label('_Reconnect')
         if not self.search_warning_done:
-            warning_dialog( "Find Next disconnects the live feed. Click Reconnect when you're done.",
-                            self.window ).warn()
+            warning_dialog(
+                ("Find Next disconnects the live feed." +
+                 " Click Reconnect when you're done."),
+                self.window).warn()
             self.search_warning_done = True
 
         if needle == self.find_current:
             s = self.find_current_iter
         else:
-            s,e = tb.get_bounds()
-            tb.remove_tag( self.ftag, s,e )
+            s, e = tb.get_bounds()
+            tb.remove_tag(self.ftag, s, e)
             s = tb.get_end_iter()
-            tv.scroll_to_iter( s, 0 )
+            tv.scroll_to_iter(s, 0)
         try:
             f, l = s.backward_search(needle, gtk.TEXT_SEARCH_VISIBLE_ONLY)
         except:
-            warning_dialog( '"' + needle + '"' + " not found", self.window ).warn()
+            warning_dialog(
+                '"' + needle + '"' + " not found", self.window).warn()
         else:
-            tb.apply_tag( self.ftag, f, l )
+            tb.apply_tag(self.ftag, f, l)
             self.find_current_iter = f
             self.find_current = needle
-            tv.scroll_to_iter( f, 0 )
+            tv.scroll_to_iter(f, 0)
 
     def _handle_proc_completed(self):
         self.progress_bar.hide()
@@ -265,8 +269,11 @@ are displayed in red.
         self.command_label.show()
         return False
 
-class gcapture_tmpfile( gcapture ):
-    def __init__( self, command, tmpdir, width=400, height=400, standalone=False, title=None ):
-        stdout = tempfile.NamedTemporaryFile( dir = tmpdir )
-        gcapture.__init__(self, command, stdout, width=width, height=height, standalone=standalone,
-                          title=title )
+
+class gcapture_tmpfile(gcapture):
+    def __init__(self, command, tmpdir, width=400, height=400,
+                 standalone=False, title=None):
+        stdout = tempfile.NamedTemporaryFile(dir=tmpdir)
+        gcapture.__init__(
+            self, command, stdout, width=width, height=height,
+            standalone=standalone, title=title)
diff --git a/lib/cylc/gui/gpanel.py b/lib/cylc/gui/gpanel.py
index 0ca81c8..7fd43e6 100755
--- a/lib/cylc/gui/gpanel.py
+++ b/lib/cylc/gui/gpanel.py
@@ -30,21 +30,19 @@ import traceback
 import gtk
 import gobject
 import warnings
-#import pygtk
-#pygtk.require('2.0')
 
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.cfgspec.gcylc import gcfg
-from cylc.gui.gsummary import (get_summary_menu, launch_gsummary,
-                               BaseSummaryTimeoutUpdater)
+from cylc.gui.gscan import (get_scan_menu, launch_gscan,
+                            BaseScanTimeoutUpdater)
 from cylc.gui.app_gcylc import run_get_stdout
 from cylc.gui.dot_maker import DotMaker
 from cylc.gui.util import get_icon, setup_icons
 from cylc.owner import user
-from cylc.state_summary import extract_group_state
+from cylc.network.suite_state import extract_group_state
 
 
-class SummaryPanelApplet(object):
+class ScanPanelApplet(object):
 
     """Panel Applet (GNOME 2) to summarise running suite statuses."""
 
@@ -55,7 +53,7 @@ class SummaryPanelApplet(object):
         setup_icons()
         if not hosts:
             try:
-                hosts = GLOBAL_CFG.get( ["suite host scanning","hosts"] )
+                hosts = GLOBAL_CFG.get(["suite host scanning", "hosts"])
             except KeyError:
                 hosts = ["localhost"]
         self.is_compact = is_compact
@@ -78,10 +76,10 @@ class SummaryPanelApplet(object):
         self.top_hbox.pack_start(image_eb, expand=False, fill=False)
         self.top_hbox.pack_start(dot_eb, expand=False, fill=False, padding=2)
         self.top_hbox.show()
-        self.updater = SummaryPanelAppletUpdater(hosts, dot_hbox, image,
-                                                 self.is_compact,
-                                                 owner=owner,
-                                                 poll_interval=poll_interval)
+        self.updater = ScanPanelAppletUpdater(hosts, dot_hbox, image,
+                                              self.is_compact,
+                                              owner=owner,
+                                              poll_interval=poll_interval)
         self.top_hbox.connect("destroy", self.stop)
 
     def get_widget(self):
@@ -103,9 +101,9 @@ class SummaryPanelApplet(object):
         tooltip.set_tip(widget, text)
 
 
-class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
+class ScanPanelAppletUpdater(BaseScanTimeoutUpdater):
 
-    """Update the summary panel applet - subclass of gsummary equivalent."""
+    """Update the scan panel applet - subclass of gscan equivalent."""
 
     IDLE_STOPPED_TIME = 3600  # 1 hour.
     MAX_INDIVIDUAL_SUITES = 5
@@ -118,34 +116,33 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
         self.is_compact = is_compact
         self._set_gcylc_image_tooltip()
         self.gcylc_image.set_sensitive(False)
-        self.theme_name = gcfg.get( ['use theme'] )
-        self.theme = gcfg.get( ['themes', self.theme_name] )
+        self.theme_name = gcfg.get(['use theme'])
+        self.theme = gcfg.get(['themes', self.theme_name])
         self.dots = DotMaker(self.theme)
-        self.statuses = {}
-        self.stop_summaries = {}
-        self.suite_update_times = {}
+        self.hosts_suites_info = {}
+        self.stopped_hosts_suites_info = {}
         self._set_exception_hook()
-        super(SummaryPanelAppletUpdater, self).__init__(
-                              hosts, owner=owner, poll_interval=poll_interval)
+        super(ScanPanelAppletUpdater, self).__init__(
+            hosts, owner=owner, poll_interval=poll_interval)
 
     def clear_stopped_suites(self):
         """Clear stopped suite information that may have built up."""
-        self.stop_summaries.clear()
+        self.stopped_hosts_suites_info.clear()
         gobject.idle_add(self.update)
 
     def start(self):
         self.gcylc_image.set_sensitive(True)
-        super(SummaryPanelAppletUpdater, self).start()
+        super(ScanPanelAppletUpdater, self).start()
         self._set_gcylc_image_tooltip()
 
     def stop(self):
         self.gcylc_image.set_sensitive(False)
-        super(SummaryPanelAppletUpdater, self).stop()
+        super(ScanPanelAppletUpdater, self).stop()
         self._set_gcylc_image_tooltip()
 
     def launch_context_menu(self, event, suite_host_tuples=None,
                             extra_items=None):
-        has_stopped_suites = bool(self.stop_summaries)
+        has_stopped_suites = bool(self.stopped_hosts_suites_info)
 
         if suite_host_tuples is None:
             suite_host_tuples = []
@@ -153,40 +150,41 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
         if extra_items is None:
             extra_items = []
 
-        gsummary_item = gtk.ImageMenuItem("Launch cylc gsummary")
+        gscan_item = gtk.ImageMenuItem("Launch cylc gscan")
         img = gtk.image_new_from_stock("gcylc", gtk.ICON_SIZE_MENU)
-        gsummary_item.set_image(img)
-        gsummary_item.show()
-        gsummary_item.connect("button-press-event",
-                                self._on_button_press_event_gsummary)
-
-        extra_items.append(gsummary_item)
-
-        menu = get_summary_menu(suite_host_tuples, 
-                                self.theme_name, self._set_theme,
-                                has_stopped_suites,
-                                self.clear_stopped_suites,
-                                self.hosts,
-                                self.set_hosts,
-                                self.update_now,
-                                self.start,
-                                program_name="cylc gpanel",
-                                extra_items=extra_items,
-                                owner=self.owner,
-                                is_stopped=self.quit)
-        menu.popup( None, None, None, event.button, event.time )
+        gscan_item.set_image(img)
+        gscan_item.show()
+        gscan_item.connect("button-press-event",
+                           self._on_button_press_event_gscan)
+
+        extra_items.append(gscan_item)
+
+        menu = get_scan_menu(suite_host_tuples,
+                             self.theme_name, self._set_theme,
+                             has_stopped_suites,
+                             self.clear_stopped_suites,
+                             self.hosts,
+                             self.set_hosts,
+                             self.update_now,
+                             self.start,
+                             program_name="cylc gpanel",
+                             extra_items=extra_items,
+                             owner=self.owner,
+                             is_stopped=self.quit)
+        menu.popup(None, None, None, event.button, event.time)
         return False
 
-    def update(self, suite_update_times=None):
+    def update(self):
         """Update the Applet."""
+        info = copy.deepcopy(self.hosts_suites_info)
+        stop_info = copy.deepcopy(self.stopped_hosts_suites_info)
         suite_host_tuples = []
-        statuses = copy.deepcopy(self.statuses)
-        stop_summaries = copy.deepcopy(self.stop_summaries)
         for host in self.hosts:
-            suites = (statuses.get(host, {}).keys() +
-                      stop_summaries.get(host, {}).keys())
+            suites = (info.get(host, {}).keys() +
+                      stop_info.get(host, {}).keys())
             for suite in suites:
-                suite_host_tuples.append((suite, host))
+                if (suite, host) not in suite_host_tuples:
+                    suite_host_tuples.append((suite, host))
         suite_host_tuples.sort()
         for child in self.dot_hbox.get_children():
             self.dot_hbox.remove(child)
@@ -195,23 +193,24 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
         suite_statuses = {}
         compact_suite_statuses = []
         for suite, host in suite_host_tuples:
-            if suite in statuses.get(host, {}):
-                task_cycle_states = statuses[host][suite]
+            if suite in info.get(host, {}):
+                suite_info = info[host][suite]
                 is_stopped = False
             else:
-                info = stop_summaries[host][suite]
-                task_cycle_states, suite_time = info
+                suite_info = stop_info[host][suite]
                 is_stopped = True
-            status_map = {}
-            for task, cycle, status in task_cycle_states:
-                status_map.setdefault(status, []).append(task + "." + cycle)
-            status = extract_group_state(status_map.keys(),
+
+            if "states" not in suite_info:
+                continue
+
+            status = extract_group_state(suite_info['states'].keys(),
                                          is_stopped=is_stopped)
+            status_map = suite_info['states']
             if number_mode:
                 suite_statuses.setdefault(is_stopped, {})
                 suite_statuses[is_stopped].setdefault(status, [])
                 suite_statuses[is_stopped][status].append(
-                                           (suite, host, status_map.items()))
+                    (suite, host, status_map.items()))
             elif self.is_compact:
                 compact_suite_statuses.append((suite, host, status,
                                                status_map.items(), is_stopped))
@@ -221,10 +220,10 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
         if number_mode:
             for is_stopped in sorted(suite_statuses.keys()):
                 statuses = suite_statuses[is_stopped].items()
+                # Sort by number of suites in this state.
                 statuses.sort(lambda x, y: cmp(len(y[1]), len(x[1])))
                 for status, suite_host_states_tuples in statuses:
-                    label = gtk.Label(
-                                str(len(suite_host_states_tuples)) + ":")
+                    label = gtk.Label(str(len(suite_host_states_tuples)) + ":")
                     label.show()
                     self.dot_hbox.pack_start(label, expand=False, fill=False)
                     suite_info_tuples = []
@@ -268,18 +267,18 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
                          self._on_button_press_event)
 
         text_format = "%s - %s - %s"
-        long_text_format = text_format + "\n    Tasks: %s\n"
+        long_text_format = text_format + "\n    %s\n"
         text = ""
         tip_vbox = gtk.VBox()  # Only used in PyGTK 2.12+
         tip_vbox.show()
         for info_tuple in suite_host_info_tuples:
-            suite, host, status, task_states, is_stopped = info_tuple
-            task_states.sort(lambda x, y: cmp(len(y[1]), len(x[1])))
+            suite, host, status, state_counts, is_stopped = info_tuple
+            state_counts.sort(lambda x, y: cmp(y[1], x[1]))
             tip_hbox = gtk.HBox()
             tip_hbox.show()
             state_info = []
-            for state_name, tasks in task_states:
-                state_info.append(str(len(tasks)) + " " + state_name)
+            for state_name, number in state_counts:
+                state_info.append("%d %s" % (number, state_name))
                 image = self.dots.get_image(state_name, is_stopped=is_stopped)
                 image.show()
                 tip_hbox.pack_start(image, expand=False, fill=False)
@@ -295,7 +294,8 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
             tip_hbox.pack_start(tip_label, expand=False, fill=False,
                                 padding=5)
             tip_vbox.pack_start(tip_hbox, expand=False, fill=False)
-            text += long_text_format % (suite, suite_summary, host, states_text)
+            text += long_text_format % (
+                suite, suite_summary, host, states_text)
         text = text.rstrip()
         if hasattr(gtk, "Tooltip"):
             image_eb.set_has_tooltip(True)
@@ -312,8 +312,8 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
                                      suite_host_tuples=widget._connect_args)
         return False
 
-    def _on_button_press_event_gsummary(self, widget, event):
-        launch_gsummary(hosts=self.hosts, owner=self.owner)
+    def _on_button_press_event_gscan(self, widget, event):
+        launch_gscan(hosts=self.hosts, owner=self.owner)
 
     def _on_img_tooltip_query(self, widget, x, y, kbd, tooltip, tip_widget):
         tooltip.set_custom(tip_widget)
@@ -334,7 +334,7 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
         info = "cylc gpanel has a problem.\n\n%s" % exc_text
         self._set_tooltip(self.gcylc_image, info.rstrip())
         if old_hook is not None:
-            old_hook(exception_class, exception, trace)
+            old_hook(e_type, e_value, e_traceback)
 
     def _set_gcylc_image_tooltip(self):
         if self.quit:
@@ -344,7 +344,7 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
 
     def _set_theme(self, new_theme_name):
         self.theme_name = new_theme_name
-        self.theme = gcfg.get( ['themes', self.theme_name] )
+        self.theme = gcfg.get(['themes', self.theme_name])
         self.dots = DotMaker(self.theme)
 
     def _set_tooltip(self, widget, text):
@@ -355,7 +355,7 @@ class SummaryPanelAppletUpdater(BaseSummaryTimeoutUpdater):
 
 def run_in_window(is_compact=False):
     """Run the panel applet in stand-alone mode."""
-    my_panel_app = SummaryPanelApplet(is_compact=is_compact)
+    my_panel_app = ScanPanelApplet(is_compact=is_compact)
     window = gtk.Window()
     window.set_title("cylc panel applet test")
     window.add(my_panel_app.top_hbox)
diff --git a/lib/cylc/gui/graph.py b/lib/cylc/gui/graph.py
index 6a38e8e..178bdc9 100644
--- a/lib/cylc/gui/graph.py
+++ b/lib/cylc/gui/graph.py
@@ -17,64 +17,62 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import gobject
-#import pygtk
-#pygtk.require('2.0')
 import gtk
 
-from cylc.config import config, SuiteConfigError
 from gcapture import gcapture, gcapture_tmpfile
 from warning_dialog import warning_dialog
 
-def graph_suite_popup( reg, cmd_help, defstartc, defstopc, graph_opts,
-                       gcapture_windows, tmpdir, template_opts, parent_window=None ):
+
+def graph_suite_popup(reg, cmd_help, defstartc, defstopc, graph_opts,
+                      gcapture_windows, tmpdir, template_opts,
+                      parent_window=None):
     """Popup a dialog to allow a user to configure their suite graphing."""
     try:
         import xdot
     except Exception, x:
-        warning_dialog( str(x) + "\nGraphing disabled.", parent_window ).warn()
+        warning_dialog(str(x) + "\nGraphing disabled.", parent_window).warn()
         return False
 
     window = gtk.Window()
     window.set_border_width(5)
-    window.set_title( "cylc graph " + reg)
-    window.set_transient_for( parent_window )
-    window.set_type_hint( gtk.gdk.WINDOW_TYPE_HINT_DIALOG )
+    window.set_title("cylc graph " + reg)
+    window.set_transient_for(parent_window)
+    window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
 
     vbox = gtk.VBox()
 
-    label = gtk.Label("[START]: " )
+    label = gtk.Label("[START]: ")
     start_entry = gtk.Entry()
     start_entry.set_max_length(14)
     if defstartc:
-        start_entry.set_text( str(defstartc) )
+        start_entry.set_text(str(defstartc))
     ic_hbox = gtk.HBox()
-    ic_hbox.pack_start( label )
+    ic_hbox.pack_start(label)
     ic_hbox.pack_start(start_entry, True)
     vbox.pack_start(ic_hbox)
 
-    label = gtk.Label("[STOP]:" )
+    label = gtk.Label("[STOP]:")
     stop_entry = gtk.Entry()
     stop_entry.set_max_length(14)
     if defstopc:
-        stop_entry.set_text( str(defstopc) )
+        stop_entry.set_text(str(defstopc))
     fc_hbox = gtk.HBox()
-    fc_hbox.pack_start( label )
+    fc_hbox.pack_start(label)
     fc_hbox.pack_start(stop_entry, True)
-    vbox.pack_start (fc_hbox, True)
+    vbox.pack_start(fc_hbox, True)
 
-    cancel_button = gtk.Button( "_Close" )
-    cancel_button.connect("clicked", lambda x: window.destroy() )
-    ok_button = gtk.Button( "_Graph" )
-    ok_button.connect("clicked",
-              lambda w: graph_suite(
-                  reg,
-                  start_entry.get_text(),
-                  stop_entry.get_text(),
-                  graph_opts,  gcapture_windows,
-                  tmpdir, template_opts, parent_window))
+    cancel_button = gtk.Button("_Close")
+    cancel_button.connect("clicked", lambda x: window.destroy())
+    ok_button = gtk.Button("_Graph")
+    ok_button.connect("clicked", lambda w: graph_suite(
+        reg,
+        start_entry.get_text(),
+        stop_entry.get_text(),
+        graph_opts, gcapture_windows,
+        tmpdir, template_opts, parent_window))
 
-    help_button = gtk.Button( "_Help" )
-    help_button.connect("clicked", cmd_help, 'prep', 'graph' )
+    help_button = gtk.Button("_Help")
+    help_button.connect("clicked", cmd_help, 'prep', 'graph')
 
     hbox = gtk.HBox()
     hbox.pack_start(ok_button, False)
@@ -87,12 +85,12 @@ def graph_suite_popup( reg, cmd_help, defstartc, defstopc, graph_opts,
 
 
 def graph_suite(reg, start, stop, graph_opts,
-        gcapture_windows, tmpdir, template_opts, window=None):
+                gcapture_windows, tmpdir, template_opts, window=None):
     """Launch the cylc graph command with some options."""
     options = graph_opts
     options += ' ' + reg + ' ' + start + ' ' + stop
-    command = "cylc graph --notify-completion " + template_opts + " " + options
-    foo = gcapture_tmpfile( command, tmpdir )
+    command = "cylc graph " + template_opts + " " + options
+    foo = gcapture_tmpfile(command, tmpdir)
     gcapture_windows.append(foo)
     foo.run()
     return False
diff --git a/lib/cylc/gui/gsummary.py b/lib/cylc/gui/gscan.py
similarity index 60%
rename from lib/cylc/gui/gsummary.py
rename to lib/cylc/gui/gscan.py
index 57af214..a169526 100644
--- a/lib/cylc/gui/gsummary.py
+++ b/lib/cylc/gui/gscan.py
@@ -20,7 +20,8 @@ import copy
 import os
 import re
 import shlex
-import subprocess
+import signal
+from subprocess import Popen, PIPE, STDOUT
 import sys
 import threading
 import time
@@ -28,11 +29,10 @@ import time
 import gtk
 import gobject
 from isodatetime.data import get_timepoint_from_seconds_since_unix_epoch
-#import pygtk
-#pygtk.require('2.0')
 
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.cfgspec.gcylc import gcfg
+import cylc.flags
 from cylc.gui.legend import ThemeLegendWindow
 from cylc.gui.app_gcylc import run_get_stdout
 from cylc.gui.dot_maker import DotMaker
@@ -47,68 +47,131 @@ from cylc.task_state import task_state
 PYRO_TIMEOUT = 2
 
 
-def get_host_suites(hosts, timeout=None, owner=None):
-    """Return a dictionary of hosts and their running suites."""
+def parse_cylc_scan_raw(text):
+    """Parse cylc scan --raw formatted output.
+
+    Return a nested host->suite->prop->value dictionary,
+    where the properties are as named in the 4th column of
+    cylc scan --raw output.
+
+    For the states properties, translate the state lines into
+    key-value pairs (e.g. failed:1 => {failed: 1}).
+
+    """
+    host_suite_properties = {}
+    for line in text.splitlines():
+        try:
+            suite, owner, host, prop, value = line.strip().split("|")
+        except ValueError:
+            continue
+        host_suite_properties.setdefault(host, {}).setdefault(suite, {})
+        if prop.startswith("states"):
+            new_value = {}
+            for item in value.split():
+                state, num = item.rsplit(":", 1)
+                new_value[state] = int(num)
+            value = new_value
+        if prop == "update-time":
+            value = int(float(value))
+        host_suite_properties[host][suite][prop] = value
+    return host_suite_properties
+
+
+def get_hosts_suites_info(hosts, timeout=None, owner=None):
+    """Return a dictionary of hosts, suites, and their properties."""
     host_suites_map = {}
     if timeout is None:
         timeout = PYRO_TIMEOUT
-    command = ["cylc", "scan", "--pyro-timeout=%s" % timeout]
+    command = ["cylc", "scan", "--raw", "--pyro-timeout=%s" % timeout]
     if owner:
         command.append("--owner=%s" % owner)
     if hosts:
         command += hosts
-    popen = subprocess.Popen(
-        command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    res = popen.wait()
-    if res == 0:
-        for line in popen.communicate()[0].splitlines():
-            if line:
-                name, _, host, _ = line.split()
-                if host not in host_suites_map:
-                    host_suites_map[host] = []
-                host_suites_map[host].append(name)
+    if cylc.flags.debug:
+        stderr = sys.stderr
+        command.append("--debug")
+    else:
+        stderr = PIPE
+    env = os.environ.copy()
+    env["PATH"] = ":".join(sys.path) + ":" + env["PATH"]
+    proc = Popen(
+        command, stdout=PIPE, stderr=stderr, env=env, preexec_fn=os.setpgrp)
+    try:
+        out, err = proc.communicate()
+        if proc.wait():
+            if cylc.flags.debug:
+                sys.stderr.write(err)
+        else:
+            host_suites_map = parse_cylc_scan_raw(out)
+    finally:
+        if proc.poll() is None:
+            try:
+                os.killpg(proc.pid, signal.SIGTERM)
+            except OSError:
+                pass
+    for host, suites_map in host_suites_map.items():
+        for suite, suite_info in suites_map.items():
+            if suite_info.keys() == ["port"]:
+                # Just the port file - could be an older suite daemon.
+                suite_info.update(
+                    get_unscannable_suite_info(
+                        host, suite, owner=owner))
     return host_suites_map
 
 
-def get_task_cycle_statuses_updatetime(host, suite, owner=None):
-    """Return a list of task, cycle, status tuples, or None and update time."""
+def get_unscannable_suite_info(host, suite, owner=None):
+    """Return a map like cylc scan --raw for states and last update time."""
     if owner is None:
         owner = user
-    command = ["cylc", "cat-state", "--host=%s" % host,
-               "--user=%s" % owner, suite]
-    popen = subprocess.Popen(command,
-                             stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
-    stdout = popen.stdout.read()
-    res = popen.wait()
-    if res != 0:
-        sys.stderr.write(popen.stderr.read())
-        return None, None
-    task_cycle_statuses = []
-    for line in stdout.rpartition("Begin task states")[2].splitlines():
+    command = ["cylc", "cat-state", "--host=" + host, "--user=" + owner]
+    if cylc.flags.debug:
+        stderr = sys.stderr
+        command.append("--debug")
+    else:
+        stderr = PIPE
+    proc = Popen(
+        command + [suite], stdout=PIPE, stderr=stderr, preexec_fn=os.setpgrp)
+    try:
+        out, err = proc.communicate()
+        if proc.wait():  # non-zero return code
+            if cylc.flags.debug:
+                sys.stderr.write(err)
+            return {}
+    finally:
+        if proc.poll() is None:
+            try:
+                os.killpg(proc.pid, signal.SIGTERM)
+            except OSError:
+                pass
+    suite_info = {}
+    for line in out.rpartition("Begin task states")[2].splitlines():
         task_result = re.match("([^ ]+) : status=([^,]+), spawned", line)
         if not task_result:
             continue
-        task, status = task_result.groups()
-        task_order = task.split(".")
-        task_order.append(status)
-        task_cycle_statuses.append(tuple(task_order))
-    suite_update_times = re.search("^time : [^ ]+ \(([0-9]+)\)$", stdout, re.M)
-    if suite_update_times is not None:
-        suite_update_times = int(suite_update_times.group(1))
+        task, state = task_result.groups()
+        task_name, task_point = task.split(".")
+        for states_point in ("states", "states:" + task_point):
+            suite_info.setdefault(states_point, {})
+            suite_info[states_point].setdefault(state, 0)
+            suite_info[states_point][state] += 1
+    suite_update_time_match = re.search(
+        "^time : [^ ]+ \(([0-9]+)\)$", out, re.M)
+    if suite_update_time_match is None:
+        suite_update_time = int(time.time())
     else:
-        suite_update_times = time.time()
-    return task_cycle_statuses, suite_update_times
+        suite_update_time = int(suite_update_time_match.group(1))
+    suite_info['update-time'] = suite_update_time
+    return suite_info
 
 
-def get_summary_menu(suite_host_tuples,
-                     theme_name, set_theme_func,
-                     has_stopped_suites, clear_stopped_suites_func,
-                     scanned_hosts, change_hosts_func,
-                     update_now_func, start_func,
-                     program_name, extra_items=None, owner=None,
-                     is_stopped=False):
-    """Return a right click menu for summary GUIs.
+def get_scan_menu(suite_host_tuples,
+                  theme_name, set_theme_func,
+                  has_stopped_suites, clear_stopped_suites_func,
+                  scanned_hosts, change_hosts_func,
+                  update_now_func, start_func,
+                  program_name, extra_items=None, owner=None,
+                  is_stopped=False):
+    """Return a right click menu for scan GUIs.
 
     suite_host_tuples should be a list of (suite, host) tuples (if any).
     theme_name should be the name of the current theme.
@@ -150,11 +213,12 @@ def get_summary_menu(suite_host_tuples,
         img = gtk.image_new_from_stock("gcylc", gtk.ICON_SIZE_MENU)
         gcylc_item.set_image(img)
         gcylc_item._connect_args = (suite, host)
-        gcylc_item.connect("button-press-event",
-                            lambda b, e: launch_gcylc(
-                                                b._connect_args[1],
-                                                b._connect_args[0],
-                                                owner=owner))
+        gcylc_item.connect(
+            "button-press-event",
+            lambda b, e: launch_gcylc(b._connect_args[1],
+                                      b._connect_args[0],
+                                      owner=owner)
+        )
         gcylc_item.show()
         menu.append(gcylc_item)
     if suite_host_tuples:
@@ -183,16 +247,17 @@ def get_summary_menu(suite_host_tuples,
     theme_items[theme] = gtk.RadioMenuItem(label=theme)
     thememenu.append(theme_items[theme])
     theme_items[theme].theme_name = theme
-    for theme in gcfg.get( ['themes'] ):
+    for theme in gcfg.get(['themes']):
         if theme == "default":
             continue
-        theme_items[theme] = gtk.RadioMenuItem(group=theme_items['default'], label=theme)
+        theme_items[theme] = gtk.RadioMenuItem(
+            group=theme_items['default'], label=theme)
         thememenu.append(theme_items[theme])
         theme_items[theme].theme_name = theme
 
     # set_active then connect, to avoid causing an unnecessary toggle now.
     theme_items[theme_name].set_active(True)
-    for theme in gcfg.get( ['themes'] ):
+    for theme in gcfg.get(['themes']):
         theme_items[theme].show()
         theme_items[theme].connect('toggled',
                                    lambda i: (i.get_active() and
@@ -202,9 +267,10 @@ def get_summary_menu(suite_host_tuples,
     theme_legend_item = gtk.MenuItem("Show task state key")
     theme_legend_item.show()
     theme_legend_item.set_sensitive(not is_stopped)
-    theme_legend_item.connect("button-press-event",
-                              lambda b, e: launch_theme_legend(
-                                        gcfg.get(['themes',theme_name])))
+    theme_legend_item.connect(
+        "button-press-event",
+        lambda b, e: launch_theme_legend(gcfg.get(['themes', theme_name]))
+    )
     menu.append(theme_legend_item)
     sep_item = gtk.SeparatorMenuItem()
     sep_item.show()
@@ -227,7 +293,7 @@ def get_summary_menu(suite_host_tuples,
     clear_item.show()
     clear_item.set_sensitive(has_stopped_suites)
     clear_item.connect("button-press-event",
-                        lambda b, e: clear_stopped_suites_func())
+                       lambda b, e: clear_stopped_suites_func())
     menu.append(clear_item)
 
     # Construct a configure scanned hosts item.
@@ -249,10 +315,11 @@ def get_summary_menu(suite_host_tuples,
     img = gtk.image_new_from_stock(gtk.STOCK_ABOUT, gtk.ICON_SIZE_MENU)
     info_item.set_image(img)
     info_item.show()
-    info_item.connect("button-press-event",
-                      lambda b, e: launch_about_dialog(
-                                          program_name,
-                                          scanned_hosts))
+    info_item.connect(
+        "button-press-event",
+        lambda b, e: launch_about_dialog(program_name,
+                                         scanned_hosts)
+    )
     menu.append(info_item)
     return menu
 
@@ -280,25 +347,53 @@ def launch_gcylc(host, suite, owner=None):
     """Launch gcylc for a given suite and host."""
     if owner is None:
         owner = user
-    stdout = open(os.devnull, "w")
-    stderr = stdout
-    command = "cylc gui --host=%s --user=%s %s" % (
-                                         host, owner, suite)
-    command = shlex.split(command)
-    subprocess.Popen(command, stdout=stdout, stderr=stderr)
-
-
-def launch_gsummary(hosts=None, owner=None):
-    """Launch gsummary for a given list of hosts and/or owner."""
-    stdout = open(os.devnull, "w")
-    stderr = stdout
-    command = ["cylc", "gsummary"]
+    args = ["--host=" + host, "--user=" + owner, suite]
+
+    # Get version of suite
+    f_null = open(os.devnull, "w")
+    if cylc.flags.debug:
+        stderr = sys.stderr
+        args = ["--debug"] + args
+    else:
+        stderr = f_null
+    command = ["cylc", "get-suite-version"] + args
+    proc = Popen(command, stdout=PIPE, stderr=stderr)
+    suite_version = proc.communicate()[0].strip()
+    proc.wait()
+
+    # Run correct version of "cylc gui", provided that "admin/cylc-wrapper" is
+    # installed.
+    env = None
+    if suite_version != CYLC_VERSION:
+        env = dict(os.environ)
+        env["CYLC_VERSION"] = suite_version
+    command = ["cylc", "gui"] + args
+    if cylc.flags.debug:
+        stdout = sys.stdout
+        stderr = sys.stderr
+        Popen(command, env=env, stdout=stdout, stderr=stderr)
+    else:
+        stdout = f_null
+        stderr = STDOUT
+        Popen(["nohup"] + command, env=env, stdout=stdout, stderr=stderr)
+
+
+def launch_gscan(hosts=None, owner=None):
+    """Launch gscan for a given list of hosts and/or owner."""
+    if cylc.flags.debug:
+        stdout = sys.stdout
+        stderr = sys.stderr
+        command = ["cylc", "gscan", "--debug"]
+    else:
+        stdout = open(os.devnull, "w")
+        stderr = STDOUT
+        command = ["cylc", "gscan"]
     if hosts is not None:
         for host in hosts:
             command += ["--host=%s" % host]
     if owner is not None:
         command += ["--user=%s" % owner]
-    subprocess.Popen(command, stdout=stdout, stderr=stderr)
+    Popen(command, stdout=stdout, stderr=stderr)
 
 
 def launch_hosts_dialog(existing_hosts, change_hosts_func):
@@ -339,13 +434,13 @@ def launch_theme_legend(theme):
     ThemeLegendWindow(None, theme)
 
 
-class SummaryApp(object):
+class ScanApp(object):
 
     """Summarize running suite statuses for a given set of hosts."""
 
     def __init__(self, hosts=None, owner=None, poll_interval=None):
         gobject.threads_init()
-        set_exception_hook_dialog("cylc gsummary")
+        set_exception_hook_dialog("cylc gscan")
         setup_icons()
         if not hosts:
             hosts = GLOBAL_CFG.get(["suite host scanning", "hosts"])
@@ -354,16 +449,16 @@ class SummaryApp(object):
             owner = user
         self.owner = owner
         self.window = gtk.Window()
-        self.window.set_title("cylc gsummary")
+        self.window.set_title("cylc gscan")
         self.window.set_icon(get_icon())
         self.vbox = gtk.VBox()
         self.vbox.show()
 
-        self.theme_name = gcfg.get( ['use theme'] )
-        self.theme = gcfg.get( ['themes', self.theme_name] )
+        self.theme_name = gcfg.get(['use theme'])
+        self.theme = gcfg.get(['themes', self.theme_name])
 
         self.dots = DotMaker(self.theme)
-        suite_treemodel = gtk.TreeStore(str, str, bool, str, int, int, str, str)
+        suite_treemodel = gtk.TreeStore(str, str, bool, str, int, str, str)
         self._prev_tooltip_location_id = None
         self.suite_treeview = gtk.TreeView(suite_treemodel)
 
@@ -372,59 +467,64 @@ class SummaryApp(object):
         cell_text_host = gtk.CellRendererText()
         host_name_column.pack_start(cell_text_host, expand=False)
         host_name_column.set_cell_data_func(
-                    cell_text_host, self._set_cell_text_host)
+            cell_text_host, self._set_cell_text_host)
         host_name_column.set_sort_column_id(0)
         host_name_column.set_visible(False)
+        host_name_column.set_resizable(True)
 
         # Construct the suite name column.
         suite_name_column = gtk.TreeViewColumn("Suite")
         cell_text_name = gtk.CellRendererText()
         suite_name_column.pack_start(cell_text_name, expand=False)
         suite_name_column.set_cell_data_func(
-                   cell_text_name, self._set_cell_text_name)
+            cell_text_name, self._set_cell_text_name)
         suite_name_column.set_sort_column_id(1)
+        suite_name_column.set_resizable(True)
 
         # Construct the suite title column.
         suite_title_column = gtk.TreeViewColumn("Title")
         cell_text_title = gtk.CellRendererText()
         suite_title_column.pack_start(cell_text_title, expand=False)
         suite_title_column.set_cell_data_func(
-                    cell_text_title, self._set_cell_text_title)
+            cell_text_title, self._set_cell_text_title)
         suite_title_column.set_sort_column_id(3)
         suite_title_column.set_visible(False)
+        suite_title_column.set_resizable(True)
 
         # Construct the update time column.
         time_column = gtk.TreeViewColumn("Updated")
         cell_text_time = gtk.CellRendererText()
         time_column.pack_start(cell_text_time, expand=False)
         time_column.set_cell_data_func(
-                    cell_text_time, self._set_cell_text_time)
+            cell_text_time, self._set_cell_text_time)
         time_column.set_sort_column_id(4)
         time_column.set_visible(False)
+        time_column.set_resizable(True)
 
         self.suite_treeview.append_column(host_name_column)
         self.suite_treeview.append_column(suite_name_column)
         self.suite_treeview.append_column(suite_title_column)
         self.suite_treeview.append_column(time_column)
 
-      # Construct the status column.
+        # Construct the status column.
         status_column = gtk.TreeViewColumn("Status")
-        status_column.set_sort_column_id(6)
-        status_column_info = 7
-        cycle_column_info = 6
+        status_column.set_sort_column_id(5)
+        status_column.set_resizable(True)
+        status_column_info = 6
+        cycle_column_info = 5
         cell_text_cycle = gtk.CellRendererText()
         status_column.pack_start(cell_text_cycle, expand=False)
         status_column.set_cell_data_func(
-                cell_text_cycle, self._set_cell_text_cycle,
-                cycle_column_info)
+            cell_text_cycle, self._set_cell_text_cycle, cycle_column_info)
         self.suite_treeview.append_column(status_column)
         distinct_states = len(task_state.legal)
         for i in range(distinct_states):
             cell_pixbuf_state = gtk.CellRendererPixbuf()
             status_column.pack_start(cell_pixbuf_state, expand=False)
             status_column.set_cell_data_func(
-                    cell_pixbuf_state, self._set_cell_pixbuf_state,
-                    (status_column_info, i))
+                cell_pixbuf_state, self._set_cell_pixbuf_state,
+                (status_column_info, i)
+            )
 
         self.suite_treeview.show()
         if hasattr(self.suite_treeview, "set_has_tooltip"):
@@ -443,9 +543,10 @@ class SummaryApp(object):
         scrolled_window.add(self.suite_treeview)
         scrolled_window.show()
         self.vbox.pack_start(scrolled_window, expand=True, fill=True)
-        self.updater = SummaryAppUpdater(self.hosts, suite_treemodel,
-                        self.suite_treeview, owner=self.owner,
-                        poll_interval=poll_interval)
+        self.updater = ScanAppUpdater(
+            self.hosts, suite_treemodel, self.suite_treeview,
+            owner=self.owner, poll_interval=poll_interval
+        )
         self.updater.start()
         self.window.add(self.vbox)
         self.window.connect("destroy", self._on_destroy_event)
@@ -457,7 +558,7 @@ class SummaryApp(object):
         # DISPLAY MENU ONLY ON RIGHT CLICK ONLY
 
         if (event.type != gtk.gdk._2BUTTON_PRESS and
-            event.button != 3):
+                event.button != 3):
             return False
 
         treemodel = treeview.get_model()
@@ -476,7 +577,7 @@ class SummaryApp(object):
             iter_ = treemodel.get_iter(path)
             host, suite = treemodel.get(iter_, 0, 1)
             if suite is None:
-                # On an expanded cycle point summary row, so get from parent.
+                # On an expanded cycle point row, so get from parent.
                 host, suite = treemodel.get(treemodel.iter_parent(iter_), 0, 1)
             suite_host_tuples.append((suite, host))
 
@@ -485,7 +586,7 @@ class SummaryApp(object):
                 launch_gcylc(host, suite, owner=self.owner)
             return False
 
-        has_stopped_suites = bool(self.updater.stop_summaries)
+        has_stopped_suites = bool(self.updater.stopped_hosts_suites_info)
 
         view_item = gtk.ImageMenuItem("View Column...")
         img = gtk.image_new_from_stock(gtk.STOCK_INDEX, gtk.ICON_SIZE_MENU)
@@ -503,19 +604,21 @@ class SummaryApp(object):
             column_item.show()
             view_menu.append(column_item)
 
-        menu = get_summary_menu(suite_host_tuples,
-                                self.theme_name,
-                                self._set_theme,
-                                has_stopped_suites,
-                                self.updater.clear_stopped_suites,
-                                self.hosts,
-                                self.updater.set_hosts,
-                                self.updater.update_now,
-                                self.updater.start,
-                                program_name="cylc gsummary",
-                                extra_items=[view_item],
-                                owner=self.owner)
-        menu.popup( None, None, None, event.button, event.time )
+        menu = get_scan_menu(
+            suite_host_tuples,
+            self.theme_name,
+            self._set_theme,
+            has_stopped_suites,
+            self.updater.clear_stopped_suites,
+            self.hosts,
+            self.updater.set_hosts,
+            self.updater.update_now,
+            self.updater.start,
+            program_name="cylc gscan",
+            extra_items=[view_item],
+            owner=self.owner
+        )
+        menu.popup(None, None, None, event.button, event.time)
         return False
 
     def _on_destroy_event(self, widget):
@@ -530,8 +633,8 @@ class SummaryApp(object):
             self._prev_tooltip_location_id = None
             return False
         x, y = self.suite_treeview.convert_widget_to_bin_window_coords(x, y)
-        path, column, cell_x, cell_y = self.suite_treeview.get_path_at_pos(
-                                                                    x, y)
+        path, column, cell_x, cell_y = (
+            self.suite_treeview.get_path_at_pos(x, y))
         model = self.suite_treeview.get_model()
         iter_ = model.get_iter(path)
         parent_iter = model.iter_parent(iter_)
@@ -544,7 +647,6 @@ class SummaryApp(object):
             suite = model.get_value(parent_iter, 1)
             child_row_number = path[-1]
         suite_update_time = model.get_value(iter_, 4)
-        last_update_time = model.get_value(iter_, 5)
         location_id = (host, suite, suite_update_time, column.get_title(),
                        child_row_number)
 
@@ -556,16 +658,25 @@ class SummaryApp(object):
             tooltip.set_text(suite + " - " + host)
             return True
         if column.get_title() == "Updated":
-            time_point = get_timepoint_from_seconds_since_unix_epoch(
-                last_update_time)
-            tooltip.set_text("Info retrieved at " + str(time_point))
+            suite_update_point = get_timepoint_from_seconds_since_unix_epoch(
+                suite_update_time)
+            if (self.updater.last_update_time is not None and
+                    suite_update_time != int(self.updater.last_update_time)):
+                retrieval_point = get_timepoint_from_seconds_since_unix_epoch(
+                    int(self.updater.last_update_time))
+                text = "Last changed at %s\n" % suite_update_point
+                text += "Last scanned at %s" % retrieval_point
+            else:
+                # An older suite (or before any updates are made?)
+                text = "Last scanned at %s" % suite_update_point
+            tooltip.set_text(text)
             return True
 
         if column.get_title() != "Status":
             tooltip.set_text(None)
             return False
         state_texts = []
-        status_column_info = 7
+        status_column_info = 6
         state_text = model.get_value(iter_, status_column_info)
         if state_text is None:
             tooltip.set_text(None)
@@ -574,7 +685,7 @@ class SummaryApp(object):
         for status_number in info:
             status, number = status_number.rsplit(" ", 1)
             state_texts.append(number + " " + status.strip())
-            text = "Tasks: " + ", ".join(state_texts)
+        text = "Tasks: " + ", ".join(state_texts)
         tooltip.set_text(text)
         return True
 
@@ -621,13 +732,13 @@ class SummaryApp(object):
         cell.set_property("text", title)
 
     def _set_cell_text_time(self, column, cell, model, iter_):
-        suite_update_times = model.get_value(iter_, 4)
+        suite_update_time = model.get_value(iter_, 4)
         time_point = get_timepoint_from_seconds_since_unix_epoch(
-                        suite_update_times)
+            suite_update_time)
         time_point.set_time_zone_to_local()
         current_time = time.time()
-        current_point = get_timepoint_from_seconds_since_unix_epoch(
-                        current_time)
+        current_point = (
+            get_timepoint_from_seconds_since_unix_epoch(current_time))
         if str(time_point).split("T")[0] == str(current_point).split("T")[0]:
             time_string = str(time_point).split("T")[1]
         else:
@@ -644,7 +755,7 @@ class SummaryApp(object):
 
     def _set_theme(self, new_theme_name):
         self.theme_name = new_theme_name
-        self.theme = gcfg.get( ['themes',self.theme_name] )
+        self.theme = gcfg.get(['themes', self.theme_name])
         self.dots = DotMaker(self.theme)
 
     def _set_tooltip(self, widget, text):
@@ -653,9 +764,9 @@ class SummaryApp(object):
         tooltip.set_tip(widget, text)
 
 
-class BaseSummaryUpdater(threading.Thread):
+class BaseScanUpdater(threading.Thread):
 
-    """Retrieve running suite summary information.
+    """Retrieve running suite scan information.
 
     Subclasses must provide an update method.
 
@@ -666,20 +777,20 @@ class BaseSummaryUpdater(threading.Thread):
     def __init__(self, hosts, owner=None, poll_interval=None):
         self.hosts = hosts
         if owner is None:
-           owner = user
+            owner = user
         if poll_interval is None:
             poll_interval = self.POLL_INTERVAL
         self.poll_interval = poll_interval
         self.owner = owner
-        self.statuses = {}
-        self.stop_summaries = {}
-        self.suite_update_times = {}
-        self.prev_suites = []
+        self.hosts_suites_info = {}
+        self.stopped_hosts_suites_info = {}
+        self.prev_hosts_suites = []
+        self.last_update_time = None
         self._should_force_update = False
         self.quit = False
-        super(BaseSummaryUpdater, self).__init__()
+        super(BaseScanUpdater, self).__init__()
 
-    def update(self, suite_update_times=None):
+    def update(self):
         """An update method that must be defined in subclasses."""
         raise NotImplementedError()
 
@@ -690,40 +801,41 @@ class BaseSummaryUpdater(threading.Thread):
     def run(self):
         """Execute the main loop of the thread."""
         prev_suites = []
-        last_update_time = None
         while not self.quit:
-            current_time = time.time()
-            if (not self._should_force_update and
-                (last_update_time is not None and
-                 current_time < last_update_time + self.poll_interval)):
+            time_for_update = (
+                self.last_update_time is None or
+                time.time() < self.last_update_time + self.poll_interval
+            )
+            if not self._should_force_update and not time_for_update:
                 time.sleep(1)
                 continue
             if self._should_force_update:
                 self._should_force_update = False
+
             # Sanitise hosts.
-            for host in self.stop_summaries.keys():
+            for host in self.stopped_hosts_suites_info:
                 if host not in self.hosts:
-                    self.stop_summaries.pop(host)
-            for (host, suite) in list(self.prev_suites):
+                    self.stopped_hosts_suites_info.pop(host)
+            for (host, suite) in list(self.prev_hosts_suites):
                 if host not in self.hosts:
-                    self.prev_suites.remove((host, suite))
+                    self.prev_hosts_suites.remove((host, suite))
 
             # Get new information.
-            statuses, stop_summaries, suite_update_times = (
-                get_new_statuses_and_stop_summaries_updatetime(
-                            self.hosts, self.owner,
-                            prev_stop_summaries=self.stop_summaries,
-                            prev_suites=self.prev_suites))
-            prev_suites = []
-            for host in statuses:
-                for suite in statuses[host]:
-                    prev_suites.append((host, suite))
-            self.prev_suites = prev_suites
-            self.statuses = statuses
-            self.stop_summaries = stop_summaries
-            last_update_time = time.time()
-            self.suite_update_times = suite_update_times
-            gobject.idle_add(self.update, current_time)
+            self.hosts_suites_info, self.stopped_hosts_suites_info = (
+                update_hosts_suites_info(
+                    self.hosts, self.owner,
+                    prev_stopped_hosts_suites_info=(
+                        self.stopped_hosts_suites_info),
+                    prev_hosts_suites=self.prev_hosts_suites
+                )
+            )
+            prev_hosts_suites = []
+            for host, suites in self.hosts_suites_info.items():
+                for suite in suites:
+                    prev_hosts_suites.append((host, suite))
+            self.prev_hosts_suites = prev_hosts_suites
+            self.last_update_time = time.time()
+            gobject.idle_add(self.update)
             time.sleep(1)
 
     def set_hosts(self, new_hosts):
@@ -733,9 +845,9 @@ class BaseSummaryUpdater(threading.Thread):
         self.update_now()
 
 
-class BaseSummaryTimeoutUpdater(object):
+class BaseScanTimeoutUpdater(object):
 
-    """Retrieve running suite summary information.
+    """Retrieve running suite scan information.
 
     Subclasses must provide an update method.
 
@@ -752,16 +864,15 @@ class BaseSummaryTimeoutUpdater(object):
             poll_interval = self.POLL_INTERVAL
         self.poll_interval = poll_interval
         self.owner = owner
-        self.statuses = {}
-        self.stop_summaries = {}
-        self.suite_update_times = {}
+        self.hosts_suites_info = {}
+        self.stopped_hosts_suites_info = {}
         self._should_force_update = False
         self._last_running_time = None
         self.quit = True
         self.last_update_time = None
-        self.prev_suites = []
+        self.prev_hosts_suites = []
 
-    def update(self, suite_update_times=None):
+    def update(self):
         """An update method that must be defined in subclasses."""
         raise NotImplementedError()
 
@@ -786,45 +897,45 @@ class BaseSummaryTimeoutUpdater(object):
             return False
         current_time = time.time()
         if (self._last_running_time is not None and
-            self.IDLE_STOPPED_TIME is not None and
-            current_time > self._last_running_time + self.IDLE_STOPPED_TIME):
+                self.IDLE_STOPPED_TIME is not None and
+                current_time > (
+                    self._last_running_time + self.IDLE_STOPPED_TIME)):
             self.stop()
             return True
         if (not self._should_force_update and
-            (self.last_update_time is not None and
-             current_time < self.last_update_time + self.poll_interval)):
+                (self.last_update_time is not None and
+                 current_time < self.last_update_time + self.poll_interval)):
             return True
         if self._should_force_update:
             self._should_force_update = False
 
         # Sanitise hosts.
-        for host in self.stop_summaries.keys():
+        for host in self.stopped_hosts_suites_info.keys():
             if host not in self.hosts:
-                self.stop_summaries.pop(host)
-        for (host, suite) in list(self.prev_suites):
+                self.stopped_hosts_suites_info.pop(host)
+        for (host, suite) in list(self.prev_hosts_suites):
             if host not in self.hosts:
-                self.prev_suites.remove((host, suite))
+                self.prev_hosts_suites.remove((host, suite))
 
         # Get new information.
-        statuses, stop_summaries, suite_update_times = (
-            get_new_statuses_and_stop_summaries_updatetime(
-                       self.hosts, self.owner,
-                       prev_stop_summaries=self.stop_summaries,
-                       prev_suites=self.prev_suites))
-        prev_suites = []
-        for host in statuses:
-            for suite in statuses[host]:
-                prev_suites.append((host, suite))
-        self.prev_suites = prev_suites
-        self.statuses = statuses
-        self.stop_summaries = stop_summaries
-        self.suite_update_times = suite_update_times
+        self.hosts_suites_info, self.stopped_hosts_suites_info = (
+            update_hosts_suites_info(
+                self.hosts, self.owner,
+                prev_stopped_hosts_suites_info=self.stopped_hosts_suites_info,
+                prev_hosts_suites=self.prev_hosts_suites
+            )
+        )
+        prev_hosts_suites = []
+        for host, suites in self.hosts_suites_info.items():
+            for suite in suites:
+                prev_hosts_suites.append((host, suite))
+        self.prev_hosts_suites = prev_hosts_suites
         self.last_update_time = time.time()
-        if self.statuses:
+        if self.hosts_suites_info:
             self._last_running_time = None
         else:
             self._last_running_time = self.last_update_time
-        gobject.idle_add(self.update, current_time)
+        gobject.idle_add(self.update)
         return True
 
     def set_hosts(self, new_hosts):
@@ -833,17 +944,16 @@ class BaseSummaryTimeoutUpdater(object):
         self.update_now()
 
 
-class SummaryAppUpdater(BaseSummaryUpdater):
+class ScanAppUpdater(BaseScanUpdater):
 
-    """Update the summary app."""
+    """Update the scan app."""
 
     def __init__(self, hosts, suite_treemodel, suite_treeview, owner=None,
                  poll_interval=None):
         self.suite_treemodel = suite_treemodel
         self.suite_treeview = suite_treeview
-        self._fetch_suite_titles()
-        super(SummaryAppUpdater, self).__init__(hosts, owner=owner,
-                                                poll_interval=poll_interval)
+        super(ScanAppUpdater, self).__init__(hosts, owner=owner,
+                                             poll_interval=poll_interval)
 
     def _add_expanded_row(self, view, rpath, row_ids):
         """Add user-expanded rows to a list of suite and hosts to be
@@ -872,134 +982,105 @@ class SummaryAppUpdater(BaseSummaryUpdater):
 
     def clear_stopped_suites(self):
         """Clear stopped suite information that may have built up."""
-        self.stop_summaries.clear()
+        self.stopped_hosts_suites_info.clear()
         gobject.idle_add(self.update)
 
-    def update(self, suite_update_times=None):
+    def update(self):
         """Update the Applet."""
         row_ids = self._get_user_expanded_row_ids()
-        statuses = copy.deepcopy(self.statuses)
-        stop_summaries = copy.deepcopy(self.stop_summaries)
-        suite_update_times = copy.deepcopy(self.suite_update_times)
-        if suite_update_times is None:
-            suite_update_times = time.time()
+        info = copy.deepcopy(self.hosts_suites_info)
+        stop_info = copy.deepcopy(self.stopped_hosts_suites_info)
         self.suite_treemodel.clear()
         suite_host_tuples = []
         for host in self.hosts:
-            suites = (statuses.get(host, {}).keys() +
-                      stop_summaries.get(host, {}).keys())
+            suites = (info.get(host, {}).keys() +
+                      stop_info.get(host, {}).keys())
             for suite in suites:
-                suite_host_tuples.append((suite, host))
+                if (suite, host) not in suite_host_tuples:
+                    suite_host_tuples.append((suite, host))
         suite_host_tuples.sort()
-        self._fetch_suite_titles()
         for suite, host in suite_host_tuples:
-            if suite in statuses.get(host, {}):
-                status_map_items = statuses[host][suite]
+            if suite in info.get(host, {}):
+                suite_info = info[host][suite]
                 is_stopped = False
-                suite_time = suite_update_times[host][suite]
-                last_updated_time = time.time()
             else:
-                info = stop_summaries[host][suite]
-                status_map, suite_time = info
-                status_map_items = status_map
+                suite_info = stop_info[host][suite]
                 is_stopped = True
-            states_list = [item[2] for item in (sorted(status_map_items,
-                           key=itemgetter(2)))]
-            states = [key + " " + str(len(list(group))) for key, group in
-                      groupby(states_list)]
-            cycle_status = []
-            cycle_list = []
-            cycle_sort = sorted(status_map_items, key=itemgetter(1, 2))
-            for key, group in groupby(cycle_sort, itemgetter(1)):
-                cycle_sort_3 = [item[1] for item in list(group)]
-                cycles = [key for key, group in groupby(cycle_sort_3)]
-                cycle_list.append(cycles[0])
-            for key, group in groupby(cycle_sort, itemgetter(1)):
-                cycle_sort_2 = [item[2] for item in list(group)]
-                cycle_statuse = [key + " " + str(len(list(group)))
-                                 for key, group in groupby(cycle_sort_2)]
-                cycle_status.append(tuple(cycle_statuse))
-            title = self.suite_titles.get(suite)
-            model_data = [host, suite, is_stopped, title, suite_time,
-                          last_updated_time]
-            model_data += [None]
-            distinct_states = len(task_state.legal)
-            model_data += [' '.join(states[:distinct_states])]
-            parent_iter = self.suite_treemodel.append(None, model_data)
-            for i in range(len(cycle_list)):
-                try:
-                    active_cycle = cycle_status[i]
-                    model_data = [None, None, is_stopped, None, suite_time,
-                                  last_updated_time]
-                    model_data += [cycle_list[i]]
-                    model_data += [' '.join(active_cycle[:distinct_states])]
-                except:
-                    model_data += [None]
-                    model_data += [None]
-                self.suite_treemodel.append(parent_iter, model_data)
+            suite_updated_time = suite_info.get(
+                "update-time", int(time.time())
+            )
+            title = suite_info.get("title")
+
+            for key in sorted(suite_info):
+                if key.startswith("states"):
+                    # Set up the columns, including the cycle point column.
+                    if key == "states":
+                        model_data = [
+                            host, suite, is_stopped, title, suite_updated_time]
+                        model_data.append(None)
+                    else:
+                        model_data = [
+                            None, None, is_stopped, title, suite_updated_time]
+                        model_data.append(key.replace("states:", "", 1))
+
+                    # Add the state count column (e.g. 'failed 1 succeeded 2').
+                    states_text = ""
+                    for state, number in sorted(suite_info[key].items(),
+                                                key=lambda _: _[1]):
+                        if state != "runahead":
+                            # 'runahead' states are usually hidden.
+                            states_text += '%s %d ' % (state, number)
+                    if not states_text:
+                        # Purely runahead cycle.
+                        continue
+                    model_data.append(states_text.rstrip())
+                    if key == "states":
+                        parent_iter = self.suite_treemodel.append(
+                            None, model_data)
+                    else:
+                        self.suite_treemodel.append(parent_iter, model_data)
         self.suite_treemodel.foreach(self._expand_row, row_ids)
         return False
 
-    def _fetch_suite_titles(self):
-        try:
-            dbfile = None
-            if self.owner is not None:
-                dbfile = os.path.join('~' + self.owner, '.cylc', 'REGDB')
-                dbfile = os.path.expanduser(dbfile)
-            db = localdb(file=dbfile)
-            suite_metadata = db.get_list()
-        except Exception:
-            suite_metadata = []
-        self.suite_titles = {}
-        for suite, suite_dir, suite_title in suite_metadata:
-            self.suite_titles[suite] = suite_title
-
-
-def get_new_statuses_and_stop_summaries_updatetime(hosts, owner,
-                                        prev_stop_summaries=None,
-                                        prev_suites=None,
-                                        stop_suite_clear_time=86400):
-    """Return dictionaries of statuses, stop_summaries and updatetimes."""
+
+def update_hosts_suites_info(hosts, owner, prev_stopped_hosts_suites_info=None,
+                             prev_hosts_suites=None,
+                             stop_suite_clear_time=86400):
+    """Return dictionaries of host suite info and stopped host suite info."""
     hosts = copy.deepcopy(hosts)
-    host_suites = get_host_suites(hosts, owner=owner)
-    if prev_stop_summaries is None:
-        prev_stop_summaries = {}
-    if prev_suites is None:
-        prev_suites = []
-    statuses = {}
-    suite_update_times = {}
-    stop_summaries = copy.deepcopy(prev_stop_summaries)
+    hosts_suites_info = get_hosts_suites_info(hosts, owner=owner)
+
+    if prev_stopped_hosts_suites_info is None:
+        prev_stopped_hosts_suites_info = {}
+    if prev_hosts_suites is None:
+        prev_hosts_suites = []
+    stopped_hosts_suites_info = copy.deepcopy(prev_stopped_hosts_suites_info)
     current_time = time.time()
-    current_suites = []
-    for host, suites in host_suites.items():
-        for suite in suites:
-            task_cycle_statuses, update_time = (
-                get_task_cycle_statuses_updatetime(host, suite, owner=owner))
-            if task_cycle_statuses is None:
-                continue
-            statuses.setdefault(host, {})
-            statuses[host].setdefault(suite, {})
-            statuses[host][suite] = task_cycle_statuses
-            suite_update_times.setdefault(host, {})
-            suite_update_times[host].setdefault(suite, {})
-            suite_update_times[host][suite] = update_time
-            if (host in stop_summaries and
-                suite in stop_summaries[host]):
-                stop_summaries[host].pop(suite)
-            current_suites.append((host, suite))
-    for host, suite in prev_suites:
-        if (host, suite) not in current_suites:
-            stop_summaries.setdefault(host, {})
-            summary_statuses, update_time = (
-                get_task_cycle_statuses_updatetime(host, suite, owner=owner))
-            if summary_statuses is None:
+    current_hosts_suites = []
+    for host, suites in hosts_suites_info.items():
+        for suite, suite_info in suites.items():
+            if 'state' not in suite_info or 'update-time' not in suite_info:
                 continue
-            stop_summaries[host][suite] = (summary_statuses,
-                                           current_time)
-    prev_suites = copy.deepcopy(current_suites)
-    for host in stop_summaries:
-        for suite in stop_summaries[host].keys():
-            if (stop_summaries[host][suite][1] +
-                stop_suite_clear_time < current_time):
-                stop_summaries[host].pop(suite)
-    return statuses, stop_summaries, suite_update_times
+            if (host in stopped_hosts_suites_info and
+                    suite in stopped_hosts_suites_info[host]):
+                stopped_hosts_suites_info[host].pop(suite)
+            current_hosts_suites.append((host, suite))
+
+    # Detect newly stopped suites and get some info for them.
+    for host, suite in prev_hosts_suites:
+        if (host, suite) not in current_hosts_suites:
+            stopped_hosts_suites_info.setdefault(host, {})
+            suite_info = get_unscannable_suite_info(host, suite, owner=owner)
+            if suite_info:
+                stopped_hosts_suites_info[host][suite] = suite_info
+
+    # Remove expired stopped suites.
+    for host in stopped_hosts_suites_info:
+        remove_suites = []
+        for suite, suite_info in stopped_hosts_suites_info[host].items():
+            update_time = suite_info.get('update-time', 0)
+            if (update_time + stop_suite_clear_time < current_time):
+                remove_suites.append(suite)
+        for suite in remove_suites:
+            stopped_hosts_suites_info[host].pop(suite)
+    return hosts_suites_info, stopped_hosts_suites_info
diff --git a/lib/cylc/gui/logviewer.py b/lib/cylc/gui/logviewer.py
index ef27d0c..d6375db 100644
--- a/lib/cylc/gui/logviewer.py
+++ b/lib/cylc/gui/logviewer.py
@@ -18,17 +18,17 @@
 
 import gtk
 import pygtk
-####pygtk.require('2.0')
-import time, os, re, sys
-from warning_dialog import warning_dialog
-from tailer import tailer
+import os
+from cylc.gui.tailer import Tailer
+from cylc.gui.warning_dialog import warning_dialog
 import pango
 
+
 class logviewer(object):
-    def __init__( self, name, dir, file, warning_re=None, critical_re=None ):
+    def __init__(self, name, dirname, filename):
         self.name = name
-        self.dir = dir
-        self.file = file
+        self.dirname = dirname
+        self.filename = filename
         self.t = None
 
         self.find_current = None
@@ -36,57 +36,52 @@ class logviewer(object):
         self.search_warning_done = False
 
         self.create_gui_panel()
-        logbuffer = self.logview.get_buffer()
-
-        self.critical_re = critical_re
-        self.warning_re = warning_re
+        self.logview.get_buffer()
 
         self.connect()
 
-    def clear_and_reconnect( self ):
-        self.t.quit = True
+    def clear_and_reconnect(self):
+        self.t.stop()
         self.clear()
         self.connect()
 
     def clear(self):
         logbuffer = self.logview.get_buffer()
-        s,e = logbuffer.get_bounds()
-        logbuffer.delete( s,e )
+        s, e = logbuffer.get_bounds()
+        logbuffer.delete(s, e)
 
-    def path( self ):
-        if self.dir:
-            return self.dir + '/' + self.file
+    def path(self):
+        if self.dirname and not os.path.isabs(self.filename):
+            return os.path.join(self.dirname, self.filename)
         else:
-            return self.file
+            return self.filename
 
-    def connect( self ):
-        self.t = tailer( self.logview, self.path(),
-                warning_re=self.warning_re, critical_re=self.critical_re)
-        ####print "Starting log viewer thread for " + self.name
+    def connect(self):
+        self.t = Tailer(self.logview, self.path())
         self.t.start()
 
-    def quit_w_e( self, w, e ):
-        self.t.quit = True
+    def quit_w_e(self, w, e):
+        self.t.stop()
 
-    def quit( self ):
-        self.t.quit = True
+    def quit(self):
+        self.t.stop()
 
-    def get_widget( self ):
+    def get_widget(self):
         return self.vbox
 
-    def reset_logbuffer( self ):
+    def reset_logbuffer(self):
         # clear log buffer iters and tags
         logbuffer = self.logview.get_buffer()
-        s,e = logbuffer.get_bounds()
-        logbuffer.remove_all_tags( s,e )
+        s, e = logbuffer.get_bounds()
+        logbuffer.remove_all_tags(s, e)
         self.find_current_iter = None
         self.find_current = None
 
-    def enter_clicked( self, e, tv ):
-        self.on_find_clicked( tv, e )
+    def enter_clicked(self, e, tv):
+        self.on_find_clicked(tv, e)
 
-    def on_find_clicked( self, tv, e ):
-        needle = e.get_text ()
+    def on_find_clicked(self, tv, e):
+        needle = e.get_text()
         if not needle:
             return
 
@@ -94,78 +89,79 @@ class logviewer(object):
         self.freeze_button.set_active(True)
         self.freeze_button.set_label('Reconnect')
         if not self.search_warning_done:
-            warning_dialog( "Find Next disconnects the live feed; click Reconnect when you're done" ).warn()
+            warning_dialog(
+                "Find Next disconnects the live feed;" +
+                " click Reconnect when you're done").warn()
             self.search_warning_done = True
 
-        tb = tv.get_buffer ()
+        tb = tv.get_buffer()
 
         if needle == self.find_current:
             s = self.find_current_iter
         else:
-            s,e = tb.get_bounds()
-            tb.remove_all_tags( s,e )
+            s, e = tb.get_bounds()
+            tb.remove_all_tags(s, e)
             s = tb.get_end_iter()
-            tv.scroll_to_iter( s, 0 )
+            tv.scroll_to_iter(s, 0)
         try:
-            f, l = s.backward_search (needle, gtk.TEXT_SEARCH_TEXT_ONLY)
+            f, l = s.backward_search(needle, gtk.TEXT_SEARCH_TEXT_ONLY)
         except:
-            warning_dialog( '"' + needle + '"' + " not found" ).warn()
+            warning_dialog('"' + needle + '"' + " not found").warn()
         else:
-            tag = tb.create_tag( None, background="#70FFA9" )
-            tb.apply_tag( tag, f, l )
+            tag = tb.create_tag(None, background="#70FFA9")
+            tb.apply_tag(tag, f, l)
             self.find_current_iter = f
             self.find_current = needle
-            tv.scroll_to_iter( f, 0 )
+            tv.scroll_to_iter(f, 0)
 
-    def freeze_log( self, b ):
+    def freeze_log(self, b):
         # TODO - HANDLE MORE STUFF IN THREADS LIKE THIS, RATHER THAN
         # PASSING IN ARGUMENTS?
         if b.get_active():
             self.t.freeze = True
-            b.set_label( 'Re_connect' )
+            b.set_label('Re_connect')
             self.reset_logbuffer()
         else:
             self.t.freeze = False
-            b.set_label( 'Dis_connect' )
+            b.set_label('Dis_connect')
 
         return False
 
-    def create_gui_panel( self ):
+    def create_gui_panel(self):
         self.logview = gtk.TextView()
-        self.logview.set_editable( False )
+        self.logview.set_editable(False)
         # Use a monospace font. This is safe - by testing - setting an
         # illegal font description has no effect.
-        self.logview.modify_font( pango.FontDescription("monospace") )
+        self.logview.modify_font(pango.FontDescription("monospace"))
 
         searchbox = gtk.HBox()
         entry = gtk.Entry()
-        entry.connect( "activate", self.enter_clicked, self.logview )
-        searchbox.pack_start (entry, True)
-        b = gtk.Button ("Find Next")
-        b.connect_object ('clicked', self.on_find_clicked, self.logview, entry)
-        searchbox.pack_start (b, False)
+        entry.connect("activate", self.enter_clicked, self.logview)
+        searchbox.pack_start(entry, True)
+        b = gtk.Button("Find Next")
+        b.connect_object('clicked', self.on_find_clicked, self.logview, entry)
+        searchbox.pack_start(b, False)
 
         self.hbox = gtk.HBox()
 
-        self.freeze_button = gtk.ToggleButton( "Dis_connect" )
+        self.freeze_button = gtk.ToggleButton("Dis_connect")
         self.freeze_button.set_active(False)
-        self.freeze_button.connect("toggled", self.freeze_log )
+        self.freeze_button.connect("toggled", self.freeze_log)
 
-        searchbox.pack_end( self.freeze_button, False )
+        searchbox.pack_end(self.freeze_button, False)
 
         sw = gtk.ScrolledWindow()
-        #sw.set_border_width(5)
-        sw.set_policy( gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC )
-        sw.add( self.logview )
+        sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
+        sw.add(self.logview)
         self.logview.set_border_width(5)
-        self.logview.modify_bg( gtk.STATE_NORMAL, gtk.gdk.color_parse( "#fff" ))
+        self.logview.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("#fff"))
 
         self.vbox = gtk.VBox()
 
-        self.log_label = gtk.Label( self.path() )
-        self.log_label.modify_fg( gtk.STATE_NORMAL, gtk.gdk.color_parse( "#00a" ))
-        self.vbox.pack_start( self.log_label, False )
+        self.log_label = gtk.Label(self.path())
+        self.log_label.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse("#00a"))
+        self.vbox.pack_start(self.log_label, False)
 
-        self.vbox.pack_start( sw, True )
-        self.vbox.pack_start( searchbox, False )
-        self.vbox.pack_start( self.hbox, False )
+        self.vbox.pack_start(sw, True)
+        self.vbox.pack_start(searchbox, False)
+        self.vbox.pack_start(self.hbox, False)
diff --git a/lib/cylc/gui/option_group.py b/lib/cylc/gui/option_group.py
index 28941e5..83739b8 100644
--- a/lib/cylc/gui/option_group.py
+++ b/lib/cylc/gui/option_group.py
@@ -18,67 +18,68 @@
 
 import gtk
 
+
 class controlled_option_group(object):
-    def __init__( self, title, option=None, reverse=False ):
+    def __init__(self, title, option=None, reverse=False):
         self.title = title
         self.option = option
-        self.entries = {}        # name -> ( entry, label, option )
-        self.arg_entries = {}    # name -> ( entry, label )
-        self.checkbutton = gtk.CheckButton( title )
-        self.checkbutton.connect( "toggled", self.greyout )
+        self.entries = {}        # name -> (entry, label, option)
+        self.arg_entries = {}    # name -> (entry, label)
+        self.checkbutton = gtk.CheckButton(title)
+        self.checkbutton.connect("toggled", self.greyout)
         if reverse:
             self.checkbutton.set_active(True)
             self.greyout()
 
-    def greyout( self, data=None ):
+    def greyout(self, data=None):
         if self.checkbutton.get_active():
             for name in self.entries:
-                (entry,label,option) = self.entries[name]
+                entry, label, option = self.entries[name]
                 entry.set_sensitive(True)
                 label.set_sensitive(True)
         else:
             for name in self.entries:
-                (entry,label,option) = self.entries[name]
+                entry, label, option = self.entries[name]
                 entry.set_sensitive(False)
                 label.set_sensitive(False)
 
-    def add_arg_entry( self, name, max_chars=None, default=None ):
-        label = gtk.Label( name )
+    def add_arg_entry(self, name, max_chars=None, default=None):
+        label = gtk.Label(name)
         entry = gtk.Entry()
         if max_chars:
-            entry.set_max_length( max_chars )
+            entry.set_max_length(max_chars)
         if default:
-            entry.set_text( default )
-        entry.set_sensitive( False )
-        self.arg_entries[ name ] = ( entry, label )
+            entry.set_text(default)
+        entry.set_sensitive(False)
+        self.arg_entries[name] = (entry, label)
 
-    def add_entry( self, name, option, max_chars=None, default=None ):
-        label = gtk.Label( name )
+    def add_entry(self, name, option, max_chars=None, default=None):
+        label = gtk.Label(name)
         entry = gtk.Entry()
         if max_chars:
-            entry.set_max_length( max_chars )
+            entry.set_max_length(max_chars)
         if default:
-            entry.set_text( default )
-        entry.set_sensitive( False )
-        self.entries[ name ] = ( entry, label, option )
+            entry.set_text(default)
+        entry.set_sensitive(False)
+        self.entries[name] = (entry, label, option)
 
-    def pack( self, vbox ):
-        vbox.pack_start( self.checkbutton )
+    def pack(self, vbox):
+        vbox.pack_start(self.checkbutton)
         for name in self.entries:
-            ( entry, label, option ) = self.entries[name]
+            (entry, label, option) = self.entries[name]
             box = gtk.HBox()
-            box.pack_start( label, True )
-            box.pack_start( entry, True )
-            vbox.pack_start( box )
+            box.pack_start(label, True)
+            box.pack_start(entry, True)
+            vbox.pack_start(box)
         for name in self.arg_entries:
-            ( entry, label ) = self.entries[name]
+            (entry, label) = self.entries[name]
             box = gtk.HBox()
-            box.pack_start( label, True )
-            box.pack_start( entry, True )
-            vbox.pack_start( box )
+            box.pack_start(label, True)
+            box.pack_start(entry, True)
+            vbox.pack_start(box)
         self.greyout()
 
-    def get_options( self ):
+    def get_options(self):
         if not self.checkbutton.get_active():
             return ''
         if self.option:
@@ -97,46 +98,46 @@ class controlled_option_group(object):
 
 
 class option_group(object):
-    def __init__( self ):
-        self.entries = {}        # name -> ( entry, label, option )
-        self.arg_entries = {}    # name -> ( entry, label )
+    def __init__(self):
+        self.entries = {}        # name -> (entry, label, option)
+        self.arg_entries = {}    # name -> (entry, label)
 
-    def add_arg_entry( self, name, max_chars=None, default=None ):
-        label = gtk.Label( name )
+    def add_arg_entry(self, name, max_chars=None, default=None):
+        label = gtk.Label(name)
         entry = gtk.Entry()
         if max_chars:
-            entry.set_max_length( max_chars )
+            entry.set_max_length(max_chars)
         if default:
-            entry.set_text( default )
-        self.arg_entries[ name ] = ( entry, label )
+            entry.set_text(default)
+        self.arg_entries[name] = (entry, label)
 
-    def add_entry( self, name, option, max_chars=None, default=None ):
-        label = gtk.Label( name )
+    def add_entry(self, name, option, max_chars=None, default=None):
+        label = gtk.Label(name)
         entry = gtk.Entry()
         if max_chars:
-            entry.set_max_length( max_chars )
+            entry.set_max_length(max_chars)
         if default:
-            entry.set_text( default )
-        self.entries[ name ] = ( entry, label, option )
+            entry.set_text(default)
+        self.entries[name] = (entry, label, option)
 
-    def pack( self, vbox ):
+    def pack(self, vbox):
         for name in self.entries:
-            ( entry, label, option ) = self.entries[name]
+            (entry, label, option) = self.entries[name]
             box = gtk.HBox()
-            box.pack_start( label, True )
-            box.pack_start( entry, True )
-            vbox.pack_start( box )
+            box.pack_start(label, True)
+            box.pack_start(entry, True)
+            vbox.pack_start(box)
         for name in self.arg_entries:
-            ( entry, label ) = self.arg_entries[name]
+            (entry, label) = self.arg_entries[name]
             box = gtk.HBox()
-            box.pack_start( label, True )
-            box.pack_start( entry, True )
-            vbox.pack_start( box )
+            box.pack_start(label, True)
+            box.pack_start(entry, True)
+            vbox.pack_start(box)
 
-    def get_entries( self ):
+    def get_entries(self):
         return self.entries + self.arg_entries
 
-    def get_options( self ):
+    def get_options(self):
         options = ''
         for name in self.entries:
             (entry, label, option) = self.entries[name]
diff --git a/lib/cylc/gui/tailer.py b/lib/cylc/gui/tailer.py
index 720eaff..0c148bc 100644
--- a/lib/cylc/gui/tailer.py
+++ b/lib/cylc/gui/tailer.py
@@ -15,111 +15,163 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Logic to tail follow a log file for a GUI viewer."""
 
 import gobject
-import threading, subprocess
-import os, sys, re, time
-from cylc import tail
-from warning_dialog import warning_dialog
-
-class tailer(threading.Thread):
-    def __init__( self, logview, log, proc=None, tag=None, warning_re=None, critical_re=None ):
-        super( tailer, self).__init__()
+import os
+from pipes import quote
+import re
+import select
+import shlex
+import signal
+from subprocess import Popen, PIPE, STDOUT
+import threading
+from time import sleep
+
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.gui.warning_dialog import warning_dialog
+
+
+class Tailer(threading.Thread):
+    """Logic to tail follow a log file for a GUI viewer.
+
+    logview -- A GUI view to display the content of the log file.
+    filename -- The name of the log file.
+    cmd_tmpl -- The command template use to follow the log file.
+                (global cfg '[hosts][HOST]remote/local tail command template')
+    pollable -- If specified, it must implement a pollable.poll() method,
+                which is called at regular intervals.
+    """
+
+    READ_SIZE = 4096
+    TAGS = {
+        "CRITICAL": [re.compile(r"\b(?:CRITICAL|ERROR)\b"), "red"],
+        "WARNING": [re.compile(r"\bWARNING\b"), "#a83fd3"]}
+
+    def __init__(self, logview, filename, cmd_tmpl=None, pollable=None,
+                 filters=None):
+        super(Tailer, self).__init__()
+
         self.logview = logview
+        self.filename = filename
+        self.cmd_tmpl = cmd_tmpl
+        self.pollable = pollable
+        self.filters = filters
+
         self.logbuffer = logview.get_buffer()
-        self.logfile = log
         self.quit = False
-        self.tag = tag
-        self.proc = proc
+        self.proc = None
         self.freeze = False
-        self.warning_re = warning_re
-        self.critical_re = critical_re
-        self.warning_tag = self.logbuffer.create_tag( None, foreground = "#a83fd3" )
-        self.critical_tag = self.logbuffer.create_tag( None, foreground = "red" )
-
-    def clear( self ):
-        s,e = self.logbuffer.get_bounds()
-        self.logbuffer.delete( s,e )
-
-    def run( self ):
-        #gobject.idle_add( self.clear )
-        #print "Starting tailer thread"
-
-        if ":" in self.logfile:
-            # Handle remote task output statically - can't get a live
-            # feed using 'ssh owner at host tail -f file' in a subprocess
-            # because p.stdout.readline() blocks waiting for more output.
-            #   Use shell=True in case the task owner is defined by
-            # environment variable (e.g. owner=nwp_$SYS, where
-            # SYS=${HOME##*_} for usernames like nwp_oper, nwp_test)
-            #   But quote the remote command so that '$HOME' in it is
-            # interpreted on the remote machine.
-            loc, file = self.logfile.split(':')
-            command = ["ssh -oBatchMode=yes " + loc + " 'cat " + file + "'"]
-            try:
-                p = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True )
-            except OSError, x:
-                # Probably: ssh command not found
-                out = str(x)
-                out += "\nERROR: failed to invoke ssh to cat the remote log file."
+        self.has_warned_corrupt = False
+        self.tags = {}
+
+    def clear(self):
+        """Clear the log buffer."""
+        pos_start, pos_end = self.logbuffer.get_bounds()
+        self.logbuffer.delete(pos_start, pos_end)
+
+    def run(self):
+        """Invoke the tailer."""
+        command = []
+        if ":" in self.filename:  # remote
+            user_at_host, filename = self.filename.split(':')
+            if "@" in user_at_host:
+                owner, host = user_at_host.split("@", 1)
             else:
-                # Success, or else problems reported by ssh (e.g. host
-                # not found or passwordless access  not configured) go
-                # to stdout/stderr.
-                out = ' '.join(command) + '\n'
-                out += p.communicate()[0]
-
-                out += """
-!!! gcylc WARNING: REMOTE TASK OUTPUT IS NOT LIVE, OPEN THE VIEWER AGAIN TO UPDATE !!!
-"""
-            gobject.idle_add( self.update_gui, out )
-            if self.proc != None:
-                # See comment below
-                self.proc.poll()
+                owner, host = (None, user_at_host)
+            ssh = str(GLOBAL_CFG.get_host_item(
+                "remote shell template", host, owner)).replace(" %s", "")
+            command = shlex.split(ssh) + ["-n", user_at_host]
+            cmd_tmpl = str(GLOBAL_CFG.get_host_item(
+                "remote tail command template", host, owner))
         else:
-            # Live feed (pythonic 'tail -f') for local job submission.
-            #if not os.path.exists( self.logfile ):
-            #    #gobject.idle_add( self.warn, "File not found: " + self.logfile )
-            #    print "File not found: " + self.logfile
-            #    #print "Disconnecting from tailer thread"
-            #    return
+            filename = self.filename
+            cmd_tmpl = str(GLOBAL_CFG.get_host_item(
+                "local tail command template"))
+
+        if self.cmd_tmpl:
+            cmd_tmpl = self.cmd_tmpl
+        command += shlex.split(cmd_tmpl % {"filename": filename})
+        try:
+            self.proc = Popen(
+                command, stdout=PIPE, stderr=STDOUT, preexec_fn=os.setpgrp)
+        except OSError as exc:
+            # E.g. ssh command not found
+            dialog = warning_dialog("%s: %s" % (
+                exc, " ".join([quote(item) for item in command])))
+            gobject.idle_add(dialog.warn)
+            return
+        poller = select.poll()
+        poller.register(self.proc.stdout.fileno())
+
+        buf = ""
+        while not self.quit and self.proc.poll() is None:
+            try:
+                self.pollable.poll()
+            except (TypeError, AttributeError):
+                pass
+            if self.freeze or not poller.poll(100):  # 100 ms timeout
+                sleep(1)
+                continue
+            # Both self.proc.stdout.read(SIZE) and self.proc.stdout.readline()
+            # can block. However os.read(FILENO, SIZE) should be fine after a
+            # poller.poll().
             try:
-                gen = tail.tail( open( self.logfile ))
-            except Exception as x:
-                # e.g. file not found
-                dialog = warning_dialog( type(x).__name__ + ": " + str(x) )
+                data = os.read(self.proc.stdout.fileno(), self.READ_SIZE)
+            except (IOError, OSError) as exc:
+                dialog = warning_dialog("%s: %s" % (
+                    exc, " ".join([quote(item) for item in command])))
                 gobject.idle_add(dialog.warn)
-                return
-
-            while not self.quit:
-                if not self.freeze:
-                    line = gen.next()
-                    if line:
-                        gobject.idle_add( self.update_gui, line )
-                if self.proc != None:
-                    # poll the subprocess; this reaps its exit code and thus
-                    # prevents the pid of the finished process staying in
-                    # the OS process table (a "defunct process") until the
-                    # parent process exits.
-                    self.proc.poll()
-                # The following doesn't work, not sure why, perhaps because
-                # the top level subprocess finishes before the next one
-                # (shows terminated too soon).
-                #    if self.proc.poll() != None:
-                #        (poll() returns None if process hasn't finished yet.)
-                #        #print 'process terminated'
-                #        gobject.idle_add( self.update_gui, '(PROCESS COMPLETED)\n' )
-                #        break
-            #print "Disconnecting from tailer thread"
-
-    def update_gui( self, line ):
-        if self.critical_re and re.search( self.critical_re, line ):
-            self.logbuffer.insert_with_tags( self.logbuffer.get_end_iter(), line, self.critical_tag )
-        elif self.warning_re and re.search( self.warning_re, line ):
-            self.logbuffer.insert_with_tags( self.logbuffer.get_end_iter(), line, self.warning_tag )
-        elif self.tag:
-            self.logbuffer.insert_with_tags( self.logbuffer.get_end_iter(), line, self.tag )
+                break
+            if data:
+                # Manage buffer, only add full lines to display to ensure
+                # filtering and tagging work
+                for line in data.splitlines(True):
+                    if not line.endswith("\n"):
+                        buf += line
+                        continue
+                    elif buf:
+                        line = buf + line
+                        buf = ""
+                    if (not self.filters or
+                            all([re.search(f, line) for f in self.filters])):
+                        gobject.idle_add(self.update_gui, line)
+            sleep(0.01)
+        self.stop()
+
+    def stop(self):
+        """Stop the tailer."""
+        self.quit = True
+        try:
+            # It is important that we kill processes like "tail -F", or it will
+            # hang the GUI.
+            os.killpg(self.proc.pid, signal.SIGTERM)
+            self.proc.wait()
+        except (AttributeError, OSError):
+            pass
+
+    def update_gui(self, line):
+        """Update the GUI viewer."""
+        try:
+            line.decode('utf-8')
+        except UnicodeDecodeError as exc:
+            if self.has_warned_corrupt:
+                return False
+            self.has_warned_corrupt = True
+            dialog = warning_dialog("Problem reading file:\n    %s: %s" %
+                                    (type(exc).__name__, exc))
+            gobject.idle_add(dialog.warn)
+            return False
+        for word, setting in self.TAGS.items():
+            rec, colour = setting
+            if rec.match(line):
+                if word not in self.tags:
+                    self.tags[word] = self.logbuffer.create_tag(
+                        None, foreground=colour)
+                self.logbuffer.insert_with_tags(
+                    self.logbuffer.get_end_iter(), line, self.tags[word])
+                break
         else:
-            self.logbuffer.insert( self.logbuffer.get_end_iter(), line )
-        self.logview.scroll_to_iter( self.logbuffer.get_end_iter(), 0 )
+            self.logbuffer.insert(self.logbuffer.get_end_iter(), line)
+        self.logview.scroll_to_iter(self.logbuffer.get_end_iter(), 0)
         return False
diff --git a/lib/cylc/gui/updater.py b/lib/cylc/gui/updater.py
index e8c3854..cf5bea6 100644
--- a/lib/cylc/gui/updater.py
+++ b/lib/cylc/gui/updater.py
@@ -16,25 +16,30 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from cylc import cylc_pyro_client, dump
+import re
+import sys
+import gtk
+import Pyro
+import atexit
+import gobject
+import threading
+from time import sleep, time, ctime
+
+import cylc.flags
+from cylc.dump import get_stop_state_summary
+from cylc.network.suite_state import (
+    StateSummaryClient, SuiteStillInitialisingError)
+from cylc.network.suite_info import SuiteInfoClient
+from cylc.network.suite_log import SuiteLogClient
+from cylc.network.suite_command import SuiteCommandClient
+from cylc.network.port_file import PortFileError
 from cylc.task_state import task_state
 from cylc.gui.dot_maker import DotMaker
-from cylc.state_summary import get_id_summary
-from cylc.strftime import strftime
 from cylc.wallclock import get_time_string_from_unix_time
 from cylc.task_id import TaskID
-from cylc.version import CYLC_VERSION  # Warning: will SystemExit on failure.
-from warning_dialog import warning_dialog
-import gobject
-import gtk
-import Pyro
-import re
-import string
-import sys
-import threading
-from time import sleep, time
+from cylc.version import CYLC_VERSION
+from cylc.gui.warning_dialog import warning_dialog
 
-from cylc import cylc_pyro_client, dump
 
 class PollSchd(object):
     """Keep information on whether the updater should poll or not."""
@@ -57,6 +62,13 @@ class PollSchd(object):
             self.start()
 
     def ready(self):
+        is_ready = self._ready()
+        if cylc.flags.debug:
+            if not is_ready:
+                print >> sys.stderr, "  PollSchd not ready"
+        return is_ready
+
+    def _ready(self):
         """Return True if a poll is ready."""
         if self.t_init is None:
             return True
@@ -68,7 +80,7 @@ class PollSchd(object):
         for k, v in self.DELAYS.items():
             lower, upper = k
             if ((lower is None or dt_init >= lower) and
-                (upper is None or dt_init < upper)):
+                    (upper is None or dt_init < upper)):
                 if dt_prev > v:
                     self.t_prev = time()
                     return True
@@ -78,12 +90,16 @@ class PollSchd(object):
 
     def start(self):
         """Start keeping track of latest poll, if not already started."""
+        if cylc.flags.debug:
+            print >> sys.stderr, '  PollSchd start'
         if self.t_init is None:
             self.t_init = time()
             self.t_prev = None
 
     def stop(self):
         """Stop keeping track of latest poll."""
+        if cylc.flags.debug:
+            print >> sys.stderr, '  PollSchd stop'
         self.t_init = None
         self.t_prev = None
 
@@ -106,18 +122,14 @@ class Updater(threading.Thread):
         self._err_num_log_lines = 10
         self.err_log_size = 0
         self.task_list = []
-        self.state_summary = {}
-        self.full_state_summary = {}
-        self.fam_state_summary = {}
-        self.full_fam_state_summary = {}
-        self.all_families = {}
-        self.triggering_families = {}
-        self.global_summary = {}
+
+        self.clear_data()
+        self.daemon_version = None
+
         self.stop_summary = None
         self.ancestors = {}
         self.ancestors_pruned = {}
-        self.descendants = []
-        self.god = None
+        self.descendants = {}
         self.mode = "waiting..."
         self.dt = "waiting..."
         self.dt_date = None
@@ -126,7 +138,6 @@ class Updater(threading.Thread):
         self._no_update_event = threading.Event()
         self.poll_schd = PollSchd()
         self._flag_new_update()
-        self._reconnect()
         self.ns_defn_order = []
         self.dict_ns_defn_order = {}
         self.restricted_display = restricted_display
@@ -135,185 +146,308 @@ class Updater(threading.Thread):
         self.kept_task_ids = set()
         self.filt_task_ids = set()
 
-    def _flag_new_update( self ):
+        self.connect_fail_warned = False
+        self.version_mismatch_warned = False
+
+        client_args = (self.cfg.suite, self.cfg.owner, self.cfg.host,
+                       self.cfg.pyro_timeout, self.cfg.port, self.cfg.db,
+                       self.cfg.my_uuid)
+        self.state_summary_client = StateSummaryClient(*client_args)
+        self.suite_info_client = SuiteInfoClient(*client_args)
+        self.suite_log_client = SuiteLogClient(*client_args)
+        self.suite_command_client = SuiteCommandClient(*client_args)
+        # Report sign-out on exit.
+        atexit.register(self.state_summary_client.signout)
+
+    def _flag_new_update(self):
         self.last_update_time = time()
 
-    def _retrieve_hierarchy_info(self):
-        self.ancestors = self.sinfo.get('first-parent ancestors')
-        self.ancestors_pruned = self.sinfo.get('first-parent ancestors', True)
-        self.descendants = self.sinfo.get('first-parent descendants')
-        self.all_families = self.sinfo.get('all families')
-        self.triggering_families = self.sinfo.get('triggering families')
-
-    def _reconnect(self):
-        """Connect to the suite daemon and get Pyro client proxies."""
-        self.god = None
-        self.sinfo = None
-        self.log = None
+    def reconnect(self):
+        """Try to reconnect to the suite daemon."""
+        if cylc.flags.debug:
+            print >> sys.stderr, "  reconnection...",
+        # Reset Pyro clients.
+        self.suite_log_client.reset()
+        self.state_summary_client.reset()
+        self.suite_info_client.reset()
+        self.suite_command_client.reset()
         try:
-            client = cylc_pyro_client.client(
-                    self.cfg.suite,
-                    self.cfg.pphrase,
-                    self.cfg.owner,
-                    self.cfg.host,
-                    self.cfg.pyro_timeout,
-                    self.cfg.port )
-            self.god = client.get_proxy( 'state_summary' )
-            self.sinfo = client.get_proxy( 'suite-info' )
-            self.log = client.get_proxy( 'log' )
-            self._retrieve_hierarchy_info()
-        except Exception, x:
-            # (port file not found, if suite not running)
+            self.daemon_version = self.suite_info_client.get_info(
+                'get_cylc_version')
+        except KeyError:
+            self.daemon_version = "??? (pre 6.1.2?)"
+            if cylc.flags.debug:
+                print >> sys.stderr, "succeeded (old daemon)"
+        except PortFileError as exc:
+            # Failed to (re)connect.
+            # Probably normal shutdown; get a stop summary if available.
+            if not self.connect_fail_warned:
+                self.connect_fail_warned = True
+                gobject.idle_add(self.warn, str(exc))
             if self.stop_summary is None:
-                self.stop_summary = dump.get_stop_state_summary(
-                                                       self.cfg.suite,
-                                                       self.cfg.owner,
-                                                       self.cfg.host)
+                self.stop_summary = get_stop_state_summary(
+                    self.cfg.suite, self.cfg.owner, self.cfg.host)
                 self._flag_new_update()
-            return False
-        else:
-            try:
-                daemon_version = self.sinfo.get('get cylc version')
-            except KeyError:
-                daemon_version = "??? (pre 6.1.2?)"
-            if daemon_version != CYLC_VERSION:
-                warning_dialog(
-                    "Warning: cylc version mismatch!\n\n" +
-                    "Suite running with %r.\n" % daemon_version +
-                    "gcylc at %r.\n" % CYLC_VERSION,
-                    self.info_bar.get_toplevel()
-                ).warn()
-            self.stop_summary = None
-            self.err_log_lines = []
-            self.err_log_size = 0
-            self.status = "connected"
-            self.connected = True
-            self.poll_schd.stop()
-            self._flag_new_update()
-            return True
-
-    def connection_lost( self ):
-        self._summary_update_time = None
-        self.state_summary = {}
-        self.full_state_summary = {}
-        self.fam_state_summary = {}
-        self.full_fam_state_summary = {}
-        self.status = "stopped"
-        self.connected = False
+            if self.stop_summary is not None and any(self.stop_summary):
+                gobject.idle_add(
+                    self.info_bar.set_stop_summary, self.stop_summary)
+            return
+        except Exception as exc:
+            if cylc.flags.debug:
+                print >> sys.stderr, "failed: %s" % str(exc)
+            if not self.connect_fail_warned:
+                self.connect_fail_warned = True
+                if isinstance(exc, Pyro.errors.ConnectionDeniedError):
+                    gobject.idle_add(
+                        self.warn,
+                        "ERROR: %s\n\nIncorrect suite passphrase?" % exc)
+                else:
+                    gobject.idle_add(self.warn, str(exc))
+            return
+
+        if cylc.flags.debug:
+            print >> sys.stderr, "succeeded"
+        # Connected.
+        self.connected = True
+        self.set_status("connected")
+        self.connect_fail_warned = False
+
+        self.poll_schd.stop()
+        if cylc.flags.debug:
+            print >> sys.stderr, (
+                "succeeded: daemon v %s" % self.daemon_version)
+        if (self.daemon_version != CYLC_VERSION and
+                not self.version_mismatch_warned):
+            # (warn only once - reconnect() will be called multiple times
+            # during initialisation of daemons at <= 6.4.0 (for which the state
+            # summary object is not connected until all tasks are loaded).
+            gobject.idle_add(
+                self.warn,
+                "Warning: cylc version mismatch!\n\n" +
+                "Suite running with %r.\n" % self.daemon_version +
+                "gcylc at %r.\n" % CYLC_VERSION)
+            self.version_mismatch_warned = True
+        self.stop_summary = None
+        self.err_log_lines = []
+        self.err_log_size = 0
         self._flag_new_update()
-        self.poll_schd.start()
-        self.info_bar.set_state( [] )
-        self.info_bar.set_status( self.status )
-        if self.stop_summary is not None and any(self.stop_summary):
-            self.info_bar.set_stop_summary(self.stop_summary)
-        # GTK IDLE FUNCTIONS MUST RETURN FALSE OR WILL BE CALLED MULTIPLE TIMES
-        self._reconnect()
-        return False
 
-    def set_update( self, should_update ):
+    def set_update(self, should_update):
         if should_update:
             self._no_update_event.clear()
         else:
             self._no_update_event.set()
 
-    def update(self):
-        if self.god is None:
-            gobject.idle_add( self.connection_lost )
-            return False
-
+    def retrieve_err_log(self):
+        """Retrieve suite err log; return True if it has changed."""
         try:
-            new_err_content, new_err_size = self.log.get_err_content(
-                prev_size=self.err_log_size,
-                max_lines=self._err_num_log_lines)
-        except (AttributeError, Pyro.errors.NamingError):
-            # TODO: post-backwards compatibility concerns, remove this handling.
+            new_err_content, new_err_size = (
+                self.suite_log_client.get_err_content(
+                    self.err_log_size, self._err_num_log_lines))
+        except AttributeError:
+            # TODO: post-backwards compatibility concerns, remove this handling
             new_err_content = ""
             new_err_size = self.err_log_size
-        except Pyro.errors.ProtocolError:
-            gobject.idle_add( self.connection_lost )
-            return False
 
         err_log_changed = (new_err_size != self.err_log_size)
         if err_log_changed:
             self.err_log_lines += new_err_content.splitlines()
             self.err_log_lines = self.err_log_lines[-self._err_num_log_lines:]
             self.err_log_size = new_err_size
+        return err_log_changed
 
-        update_summaries = False
+    def retrieve_summary_update_time(self):
+        """Retrieve suite summary update time; return True if changed."""
+        do_update = False
         try:
-            summary_update_time = self.god.get_summary_update_time()
+            summary_update_time = (
+                self.state_summary_client.get_suite_state_summary_update_time()
+            )
             if (summary_update_time is None or
                     self._summary_update_time is None or
                     summary_update_time != self._summary_update_time):
                 self._summary_update_time = summary_update_time
-                update_summaries = True
+                do_update = True
         except AttributeError as e:
-            # TODO: post-backwards compatibility concerns, remove this handling.
-            # Force an update for daemons using the old API.
-            update_summaries = True
-        except (Pyro.errors.ProtocolError, Pyro.errors.NamingError):
-            gobject.idle_add( self.connection_lost )
-            return False
-
-        if update_summaries:
-            try:
-                [glbl, states, fam_states] = self.god.get_state_summary()
-                self._retrieve_hierarchy_info() # may change on reload
-            except (Pyro.errors.ProtocolError, Pyro.errors.NamingError):
-                gobject.idle_add( self.connection_lost )
-                return False
-
-            if not glbl:
-                self.task_list = []
-                return False
-
-            if glbl['stopping']:
-                self.status = 'stopping'
-            elif glbl['paused']:
-                self.status = 'held'
-            elif glbl['will_pause_at']:
-                self.status = 'hold at ' + glbl[ 'will_pause_at' ]
-            elif glbl['will_stop_at']:
-                self.status = 'running to ' + glbl[ 'will_stop_at' ]
-            else:
-                self.status = 'running'
-            self.mode = glbl['run_mode']
+            # TODO: post-backwards compatibility concerns, remove this handling
+            # Force an update for daemons using the old API
+            do_update = True
+        return do_update
+
+    def retrieve_state_summaries(self):
+        glbl, states, fam_states = (
+            self.state_summary_client.get_suite_state_summary())
+        self.ancestors = self.suite_info_client.get_info(
+            'get_first_parent_ancestors')
+        self.ancestors_pruned = self.suite_info_client.get_info(
+            'get_first_parent_ancestors', True)
+        self.descendants = self.suite_info_client.get_info(
+            'get_first_parent_descendants')
+        self.all_families = self.suite_info_client.get_info('get_all_families')
+        self.triggering_families = self.suite_info_client.get_info(
+            'get_triggering_families')
+
+        self.mode = glbl['run_mode']
+
+        if self.cfg.use_defn_order and 'namespace definition order' in glbl:
+            # (protect for compat with old suite daemons)
+            nsdo = glbl['namespace definition order']
+            if self.ns_defn_order != nsdo:
+                self.ns_defn_order = nsdo
+                self.dict_ns_defn_order = dict(zip(nsdo, range(0, len(nsdo))))
+        try:
+            self.dt = get_time_string_from_unix_time(glbl['last_updated'])
+        except (TypeError, ValueError):
+            # Older suite...
+            self.dt = glbl['last_updated'].isoformat()
+        self.global_summary = glbl
+
+        if self.restricted_display:
+            states = self.filter_for_restricted_display(states)
+
+        self.full_state_summary = states
+        self.full_fam_state_summary = fam_states
+        self.refilter()
+
+        # Prioritise which suite state string to display.
+        # 1. Are we stopping, or some variant of 'running'?
+        if glbl['stopping']:
+            self.status = 'stopping'
+        elif glbl['will_pause_at']:
+            self.status = 'running to hold at ' + glbl['will_pause_at']
+        elif glbl['will_stop_at']:
+            self.status = 'running to ' + glbl['will_stop_at']
+        else:
+            self.status = 'running'
 
-            if self.cfg.use_defn_order and 'namespace definition order' in glbl: 
-                # (protect for compat with old suite daemons)
-                nsdo = glbl['namespace definition order']
-                if self.ns_defn_order != nsdo:
-                    self.ns_defn_order = nsdo
-                    self.dict_ns_defn_order = dict(zip(nsdo, range(0,len(nsdo))))
+        # 2. Override with temporary held status.
+        if glbl['paused']:
+            self.status = 'held'
 
+        # 3. Override running or held with reloading.
+        if not self.status == 'stopping':
             try:
-                self.dt = get_time_string_from_unix_time(glbl['last_updated'])
-            except (TypeError, ValueError):
-                # Older suite...
-                self.dt = glbl['last_updated'].isoformat()
-            self.global_summary = glbl
+                if glbl['reloading']:
+                    self.status = 'reloading'
+            except KeyError:
+                # Back compat.
+                pass
 
-            if self.restricted_display:
-                states = self.filter_for_restricted_display(states)
+    def set_stopped(self):
+        self.connected = False
+        self.set_status("stopped")
+        self.poll_schd.start()
+        self._summary_update_time = None
+        self.clear_data()
 
-            self.full_state_summary = states
-            self.full_fam_state_summary = fam_states
-            self.refilter()
+    def set_status(self, status):
+        self.status = status
+        self.info_bar.set_status(self.status)
 
-        if update_summaries or err_log_changed:
-            return True
+    def clear_data(self):
+        self.state_summary = {}
+        self.full_state_summary = {}
+        self.fam_state_summary = {}
+        self.full_fam_state_summary = {}
+        self.all_families = {}
+        self.triggering_families = {}
+        self.global_summary = {}
+
+    def warn(self, msg):
+        """Pop up a warning dialog; call on idle_add!"""
+        warning_dialog(msg, self.info_bar.get_toplevel()).warn()
         return False
 
+    def update(self):
+        if cylc.flags.debug:
+            print >> sys.stderr, "UPDATE", ctime().split()[3],
+        if not self.connected:
+            # Only reconnect via self.reconnect().
+            self.reconnect()
+            if cylc.flags.debug:
+                print >> sys.stderr, "(not connected)"
+            return False
+        if cylc.flags.debug:
+            print >> sys.stderr, "(connected)"
+        try:
+            err_log_changed = self.retrieve_err_log()
+            summaries_changed = self.retrieve_summary_update_time()
+            if summaries_changed:
+                self.retrieve_state_summaries()
+        except SuiteStillInitialisingError:
+            # Connection achieved but state summary data not available yet.
+            if cylc.flags.debug:
+                print >> sys.stderr, "  connected, suite initializing ..."
+            self.set_status("initialising")
+            if self.info_bar.prog_bar_can_start():
+                gobject.idle_add(
+                    self.info_bar.prog_bar_start, "suite initialising...")
+                self.info_bar.set_state([])
+            return False
+        except Pyro.errors.NamingError as exc:
+            if self.daemon_version is not None:
+                # Back compat <= 6.4.0 the state summary object was not
+                # connected to Pyro until initialisation was completed.
+                if cylc.flags.debug:
+                    print >> sys.stderr, (
+                        "  daemon <= 6.4.0, suite initializing ...")
+                self.set_status("initialising")
+                if self.info_bar.prog_bar_can_start():
+                    gobject.idle_add(
+                        self.info_bar.prog_bar_start, "suite initialising...")
+                    self.info_bar.set_state([])
+                # Reconnect till we get the suite state object.
+                self.reconnect()
+                return False
+            else:
+                if cylc.flags.debug:
+                    print >> sys.stderr, "  CONNECTION LOST", str(exc)
+                self.set_stopped()
+                if self.info_bar.prog_bar_active():
+                    gobject.idle_add(self.info_bar.prog_bar_stop)
+                self.reconnect()
+                return False
+        except Exception as exc:
+            if self.status == "stopping":
+                # Expected stop: prevent the reconnection warning dialog.
+                self.connect_fail_warned = True
+            if cylc.flags.debug:
+                print >> sys.stderr, "  CONNECTION LOST", str(exc)
+            self.set_stopped()
+            if self.info_bar.prog_bar_active():
+                gobject.idle_add(self.info_bar.prog_bar_stop)
+            self.reconnect()
+            return False
+        else:
+            # Got suite data.
+            self.version_mismatch_warned = False
+            if (self.status == "stopping" and
+                    self.info_bar.prog_bar_can_start()):
+                gobject.idle_add(
+                    self.info_bar.prog_bar_start, "suite stopping...")
+            if (self.status == "reloading" and
+                    self.info_bar.prog_bar_can_start()):
+                gobject.idle_add(
+                    self.info_bar.prog_bar_start, "suite reloading...")
+            if (self.info_bar.prog_bar_active() and
+                    self.status not in
+                    ["stopping", "initialising", "reloading"]):
+                gobject.idle_add(self.info_bar.prog_bar_stop)
+            if summaries_changed or err_log_changed:
+                return True
+            else:
+                return False
+
     def filter_by_name(self, states):
         return dict(
-                (i, j) for i, j in states.items() if
-                self.filter_name_string in j['name'] or
-                re.search(self.filter_name_string, j['name']))
+            (i, j) for i, j in states.items() if
+            self.filter_name_string in j['name'] or
+            re.search(self.filter_name_string, j['name']))
 
     def filter_by_state(self, states):
         return dict(
-                (i, j) for i, j in states.items() if
-                j['state'] not in self.filter_states_excl)
+            (i, j) for i, j in states.items() if
+            j['state'] not in self.filter_states_excl)
 
     def filter_families(self, families):
         """Remove family summaries if no members are present."""
@@ -333,8 +467,8 @@ class Updater(threading.Thread):
 
     def filter_for_restricted_display(self, states):
         return dict(
-                (i, j) for i, j in states.items() if j['state'] in
-                task_state.legal_for_restricted_monitoring)
+            (i, j) for i, j in states.items() if j['state'] in
+            task_state.legal_for_restricted_monitoring)
 
     def refilter(self):
         """filter from the full state summary"""
@@ -356,16 +490,17 @@ class Updater(threading.Thread):
             self.fam_state_summary = self.full_fam_state_summary
             self.filt_task_ids = set()
             self.kept_task_ids = set(self.state_summary.keys())
-        self.task_list = list(set([t['name'] for t in self.state_summary.values()]))
+        self.task_list = list(
+            set([t['name'] for t in self.state_summary.values()]))
         self.task_list.sort()
 
-    def update_globals( self ):
-        self.info_bar.set_state( self.global_summary.get( "states", [] ) )
-        self.info_bar.set_mode( self.mode )
-        self.info_bar.set_time( self.dt )
-        self.info_bar.set_status( self.status )
-        self.info_bar.set_log( "\n".join(self.err_log_lines),
-                               self.err_log_size )
+    def update_globals(self):
+        self.info_bar.set_state(self.global_summary.get("states", []))
+        self.info_bar.set_mode(self.mode)
+        self.info_bar.set_time(self.dt)
+        self.info_bar.set_status(self.status)
+        self.info_bar.set_log("\n".join(self.err_log_lines),
+                              self.err_log_size)
         return False
 
     def stop(self):
@@ -373,11 +508,10 @@ class Updater(threading.Thread):
 
     def run(self):
         while not self.quit:
-            if (not self._no_update_event.is_set()
-                and self.poll_schd.ready()
-                and self.update()):
+            if (not self._no_update_event.is_set() and
+                    self.poll_schd.ready() and self.update()):
                 self._flag_new_update()
-                gobject.idle_add( self.update_globals )
+                gobject.idle_add(self.update_globals)
             sleep(1)
         else:
             pass
diff --git a/lib/cylc/gui/updater_dot.py b/lib/cylc/gui/updater_dot.py
index 923215f..8207cf9 100644
--- a/lib/cylc/gui/updater_dot.py
+++ b/lib/cylc/gui/updater_dot.py
@@ -25,7 +25,7 @@ from time import sleep
 
 from cylc.task_id import TaskID
 from cylc.gui.dot_maker import DotMaker
-from cylc.state_summary import get_id_summary
+from cylc.network.suite_state import get_id_summary
 from copy import deepcopy
 
 
@@ -79,9 +79,9 @@ class DotUpdater(threading.Thread):
     def _set_tooltip(self, widget, tip_text):
         tip = gtk.Tooltips()
         tip.enable()
-        tip.set_tip( widget, tip_text )
+        tip.set_tip(widget, tip_text)
 
-    def clear_list( self ):
+    def clear_list(self):
         self.led_liststore.clear()
         # gtk idle functions must return false or will be called multiple times
         return False
@@ -96,7 +96,7 @@ class DotUpdater(threading.Thread):
 
         if not self.action_required and (
                 self.last_update_time is not None and
-                self.last_update_time >= self.updater.last_update_time ):
+                self.last_update_time >= self.updater.last_update_time):
             return False
 
         self.last_update_time = self.updater.last_update_time
@@ -133,14 +133,16 @@ class DotUpdater(threading.Thread):
                 if item not in self.task_list:
                     self.task_list.append(item)
 
-        if self.cfg.use_defn_order and self.updater.ns_defn_order and self.defn_order_on:
-            self.task_list = [ i for i in self.updater.ns_defn_order if i in self.task_list ]
+        if (self.cfg.use_defn_order and self.updater.ns_defn_order and
+                self.defn_order_on):
+            self.task_list = [
+                i for i in self.updater.ns_defn_order if i in self.task_list]
         else:
             self.task_list.sort()
 
         return True
 
-    def set_led_headings( self ):
+    def set_led_headings(self):
         if not self.should_transpose_view:
             new_headings = ['Name'] + self.point_strings
         else:
@@ -161,10 +163,10 @@ class DotUpdater(threading.Thread):
             label.show()
             labels.append(label)
             label_box = gtk.VBox()
-            label_box.pack_start( label, expand=False, fill=False )
+            label_box.pack_start(label, expand=False, fill=False)
             label_box.show()
-            self._set_tooltip( label_box, tip )
-            tvcs[n].set_widget( label_box )
+            self._set_tooltip(label_box, tip)
+            tvcs[n].set_widget(label_box)
         max_pixel_length = -1
         for label in labels:
             x, y = label.get_layout().get_size()
@@ -174,14 +176,14 @@ class DotUpdater(threading.Thread):
             while label.get_layout().get_size()[0] < max_pixel_length:
                 label.set_text(label.get_text() + ' ')
 
-    def ledview_widgets( self ):
+    def ledview_widgets(self):
         if not self.should_transpose_view:
-            types = [str] + [gtk.gdk.Pixbuf] * len( self.point_strings )
+            types = [str] + [gtk.gdk.Pixbuf] * len(self.point_strings)
             num_new_columns = len(types)
         else:
-            types = [str] + [gtk.gdk.Pixbuf] * len( self.task_list) + [str]
+            types = [str] + [gtk.gdk.Pixbuf] * len(self.task_list) + [str]
             num_new_columns = 1 + len(self.task_list)
-        new_led_liststore = gtk.ListStore( *types )
+        new_led_liststore = gtk.ListStore(*types)
         old_types = []
         for i in range(self.led_liststore.get_n_columns()):
             old_types.append(self.led_liststore.get_column_type(i))
@@ -202,7 +204,7 @@ class DotUpdater(threading.Thread):
                 self.is_transposed == self.should_transpose_view):
 
             tvcs_for_removal = self.led_treeview.get_columns()[
-                 num_new_columns:]
+                num_new_columns:]
 
             for tvc in tvcs_for_removal:
                 self.led_treeview.remove_column(tvc)
@@ -212,12 +214,11 @@ class DotUpdater(threading.Thread):
             for model_col_num in range(num_columns, num_new_columns):
                 # Add newly-needed columns.
                 cr = gtk.CellRendererPixbuf()
-                #cr.set_property( 'cell_background', 'black' )
-                cr.set_property( 'xalign', 0 )
-                tvc = gtk.TreeViewColumn( ""  )
-                tvc.pack_end( cr, True )
-                tvc.set_attributes( cr, pixbuf=model_col_num )
-                self.led_treeview.append_column( tvc )
+                cr.set_property('xalign', 0)
+                tvc = gtk.TreeViewColumn("")
+                tvc.pack_end(cr, True)
+                tvc.set_attributes(cr, pixbuf=model_col_num)
+                self.led_treeview.append_column(tvc)
             self.set_led_headings()
             return False
 
@@ -225,7 +226,7 @@ class DotUpdater(threading.Thread):
         for tvc in tvcs:
             self.led_treeview.remove_column(tvc)
 
-        self.led_treeview.set_model( self.led_liststore )
+        self.led_treeview.set_model(self.led_liststore)
 
         if not self.should_transpose_view:
             tvc = gtk.TreeViewColumn('Name')
@@ -233,24 +234,23 @@ class DotUpdater(threading.Thread):
             tvc = gtk.TreeViewColumn('Point')
 
         cr = gtk.CellRendererText()
-        tvc.pack_start( cr, False )
-        tvc.set_attributes( cr, text=0 )
+        tvc.pack_start(cr, False)
+        tvc.set_attributes(cr, text=0)
 
-        self.led_treeview.append_column( tvc )
+        self.led_treeview.append_column(tvc)
 
         if not self.should_transpose_view:
-            data_range = range(1, len( self.point_strings ) + 1)
+            data_range = range(1, len(self.point_strings) + 1)
         else:
-            data_range = range(1, len( self.task_list ) + 1)
+            data_range = range(1, len(self.task_list) + 1)
 
         for n in data_range:
             cr = gtk.CellRendererPixbuf()
-            #cr.set_property( 'cell_background', 'black' )
-            cr.set_property( 'xalign', 0 )
-            tvc = gtk.TreeViewColumn( ""  )
-            tvc.pack_end( cr, True )
-            tvc.set_attributes( cr, pixbuf=n )
-            self.led_treeview.append_column( tvc )
+            cr.set_property('xalign', 0)
+            tvc = gtk.TreeViewColumn("")
+            tvc.pack_end(cr, True)
+            tvc.set_attributes(cr, pixbuf=n)
+            self.led_treeview.append_column(tvc)
 
         self.set_led_headings()
         self.is_transposed = self.should_transpose_view
@@ -296,27 +296,27 @@ class DotUpdater(threading.Thread):
         if col_index == 0:
             tooltip.set_text(task_id)
             return True
-        text = get_id_summary( task_id, self.state_summary,
-                               self.fam_state_summary, self.descendants )
+        text = get_id_summary(task_id, self.state_summary,
+                              self.fam_state_summary, self.descendants)
         if text == task_id:
             return False
         tooltip.set_text(text)
         return True
 
-    def update_gui( self ):
+    def update_gui(self):
         new_data = {}
         state_summary = {}
-        state_summary.update( self.state_summary )
-        state_summary.update( self.fam_state_summary )
+        state_summary.update(self.state_summary)
+        state_summary.update(self.fam_state_summary)
         self.ledview_widgets()
 
         tasks_by_point_string = {}
         tasks_by_name = {}
         for id_ in state_summary:
             name, point_string = TaskID.split(id_)
-            tasks_by_point_string.setdefault( point_string, [] )
+            tasks_by_point_string.setdefault(point_string, [])
             tasks_by_point_string[point_string].append(name)
-            tasks_by_name.setdefault( name, [] )
+            tasks_by_name.setdefault(name, [])
             tasks_by_name[name].append(point_string)
 
         # flat (a liststore would do)
@@ -376,9 +376,8 @@ class DotUpdater(threading.Thread):
         states = {}
         while not self.quit:
             if self.update() or self.action_required:
-                gobject.idle_add( self.update_gui )
+                gobject.idle_add(self.update_gui)
                 self.action_required = False
             sleep(0.2)
         else:
             pass
-            ####print "Disconnecting task state info thread"
diff --git a/lib/cylc/gui/updater_graph.py b/lib/cylc/gui/updater_graph.py
index 718371b..8a49ce4 100644
--- a/lib/cylc/gui/updater_graph.py
+++ b/lib/cylc/gui/updater_graph.py
@@ -16,9 +16,9 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from cylc import cylc_pyro_client, dump, graphing
+from cylc import dump, graphing
 from cylc.mkdir_p import mkdir_p
-from cylc.state_summary import get_id_summary
+from cylc.network.suite_state import get_id_summary
 from cylc.task_id import TaskID
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.gui.warning_dialog import warning_dialog
@@ -60,7 +60,7 @@ class GraphUpdater(threading.Thread):
 
         self.quit = False
         self.cleared = False
-        self.ignore_suicide = False
+        self.ignore_suicide = True
         self.focus_start_point_string = None
         self.focus_stop_point_string = None
         self.xdot = xdot
@@ -132,10 +132,8 @@ class GraphUpdater(threading.Thread):
                 mkdir_p(self.suite_share_dir)
             except Exception as exc:
                 gobject.idle_add(warning_dialog(
-                    "%s\nCannot create graph frames directory." % (
-                        str(exc)).warn
-                    )
-                )
+                    "%s\nCannot create graph frames directory." % (str(exc))
+                ).warn)
                 self.write_dot_frames = False
 
     def clear_graph(self):
@@ -216,7 +214,6 @@ class GraphUpdater(threading.Thread):
             return False
         elif not compare_dict_of_dict(states, self.state_summary):
             # state changed - implicitly includes family state change.
-            #print 'STATE CHANGED'
             self.state_summary = states
             self.fam_state_summary = f_states
             return True
@@ -232,12 +229,10 @@ class GraphUpdater(threading.Thread):
                 # DO NOT USE gobject.idle_add() HERE - IT DRASTICALLY
                 # AFFECTS PERFORMANCE FOR LARGE SUITES? appears to
                 # be unnecessary anyway (due to xdot internals?)
-                ################ gobject.idle_add(self.update_xdot)
                     self.update_xdot(no_zoom=needed_no_redraw)
             sleep(0.2)
         else:
             pass
-            ####print "Disconnecting task state info thread"
 
     def update_xdot(self, no_zoom=False):
         self.xdot.set_dotcode(self.graphw.to_string(), no_zoom=True)
@@ -304,17 +299,16 @@ class GraphUpdater(threading.Thread):
             oldest = self.oldest_point_string
             newest = self.newest_point_string
 
-        start_time = self.global_summary['start time']
-
         try:
-            res = self.updater.sinfo.get(
-                'graph raw', oldest, newest, self.group, self.ungroup,
+            res = self.updater.suite_info_client.get_info(
+                'get_graph_raw', oldest, newest, self.group, self.ungroup,
                 self.ungroup_recursive, self.group_all, self.ungroup_all)
         except TypeError:
             # Back compat with pre cylc-6 suite daemons.
-            res = self.updater.sinfo.get(
-                'graph raw', oldest, newest, False, self.group, self.ungroup,
-                self.ungroup_recursive, self.group_all, self.ungroup_all)
+            res = self.updater.suite_info_client.get(
+                'get_graph_raw', oldest, newest, False, self.group,
+                self.ungroup, self.ungroup_recursive, self.group_all,
+                self.ungroup_all)
         except Exception as exc:  # PyroError?
             print >> sys.stderr, str(exc)
             return False
@@ -357,8 +351,17 @@ class GraphUpdater(threading.Thread):
                     # Don't need to guard against special nodes here (yet).
                     name, point_string = TaskID.split(id)
                     if name not in self.all_families:
+                        # This node is a task, not a family.
                         if id in self.updater.filt_task_ids:
                             nodes_to_remove.add(node)
+                        elif id not in self.updater.kept_task_ids:
+                            # A base node - these only appear in the graph.
+                            filter_string = self.updater.filter_name_string
+                            if (filter_string and
+                                    filter_string not in name and
+                                    not re.search(filter_string, name)):
+                                # A base node that fails the name filter.
+                                nodes_to_remove.add(node)
                     elif id in self.fam_state_summary:
                         # Remove family nodes if all members filtered out.
                         remove = True
@@ -369,6 +372,9 @@ class GraphUpdater(threading.Thread):
                                 break
                         if remove:
                             nodes_to_remove.add(node)
+                    elif id in self.updater.full_fam_state_summary:
+                        # An updater-filtered-out family.
+                        nodes_to_remove.add(node)
 
             # Base node cropping.
             if self.crop:
diff --git a/lib/cylc/gui/updater_tree.py b/lib/cylc/gui/updater_tree.py
index 7166adb..825a8fe 100644
--- a/lib/cylc/gui/updater_tree.py
+++ b/lib/cylc/gui/updater_tree.py
@@ -18,6 +18,7 @@
 
 from copy import deepcopy
 import datetime
+import time
 import gobject
 import itertools
 import threading
@@ -25,12 +26,12 @@ from time import sleep
 
 from cylc.task_id import TaskID
 from cylc.gui.dot_maker import DotMaker
-from cylc.state_summary import get_id_summary
+from cylc.network.suite_state import get_id_summary
 from cylc.strftime import isoformat_strftime
 from cylc.wallclock import (
-        get_current_time_string,
-        get_time_string_from_unix_time,
-        TIME_ZONE_STRING_LOCAL_BASIC
+    get_current_time_string,
+    get_time_string_from_unix_time,
+    TIME_ZONE_STRING_LOCAL_BASIC
 )
 
 
@@ -42,7 +43,8 @@ def _time_trim(time_value):
 
 class TreeUpdater(threading.Thread):
 
-    def __init__(self, cfg, updater, ttreeview, ttree_paths, info_bar, theme, dot_size):
+    def __init__(self, cfg, updater, ttreeview, ttree_paths, info_bar, theme,
+                 dot_size):
 
         super(TreeUpdater, self).__init__()
 
@@ -64,9 +66,11 @@ class TreeUpdater(threading.Thread):
         self._prev_data = {}
         self._prev_fam_data = {}
 
-        self.autoexpand_states = [ 'queued', 'ready', 'submitted', 'running', 'failed' ]
+        self.autoexpand_states = [
+            'queued', 'ready', 'expired', 'submitted', 'running', 'failed']
         self._last_autoexpand_me = []
-        self.ttree_paths = ttree_paths  # Dict of paths vs all descendant node states
+        # Dict of paths vs all descendant node states
+        self.ttree_paths = ttree_paths
         self.should_group_families = ("text" not in self.cfg.ungrouped_views)
         self.ttreeview = ttreeview
         # Hierarchy of models: view <- sorted <- filtered <- base model
@@ -88,7 +92,7 @@ class TreeUpdater(threading.Thread):
         dotm = DotMaker(theme, size=dot_size)
         self.dots = dotm.get_dots()
 
-    def clear_tree( self ):
+    def clear_tree(self):
         self.ttreestore.clear()
         # gtk idle functions must return false or will be called multiple times
         return False
@@ -103,7 +107,7 @@ class TreeUpdater(threading.Thread):
 
         if not self.action_required and (
                 self.last_update_time is not None and
-                self.last_update_time >= self.updater.last_update_time ):
+                self.last_update_time >= self.updater.last_update_time):
             return False
 
         self.last_update_time = self.updater.last_update_time
@@ -116,26 +120,27 @@ class TreeUpdater(threading.Thread):
         self.updater.set_update(True)
         return True
 
-    def search_level( self, model, iter, func, data ):
+    def search_level(self, model, iter, func, data):
         while iter:
-            if func( model, iter, data):
+            if func(model, iter, data):
                 return iter
             iter = model.iter_next(iter)
         return None
 
-    def search_treemodel( self, model, iter, func, data ):
+    def search_treemodel(self, model, iter, func, data):
         while iter:
-            if func( model, iter, data):
+            if func(model, iter, data):
                 return iter
-            result = self.search_treemodel( model, model.iter_children(iter), func, data)
+            result = self.search_treemodel(
+                model, model.iter_children(iter), func, data)
             if result:
                 return result
             iter = model.iter_next(iter)
         return None
 
-    def match_func( self, model, iter, data ):
+    def match_func(self, model, iter, data):
         column, key = data
-        value = model.get_value( iter, column )
+        value = model.get_value(iter, column)
         return value == key
 
     def on_query_tooltip(self, widget, x, y, kbd_ctx, tooltip):
@@ -162,14 +167,14 @@ class TreeUpdater(threading.Thread):
             self._prev_tooltip_task_id = task_id
             tooltip.set_text(None)
             return False
-        text = get_id_summary( task_id, self.state_summary,
-                               self.fam_state_summary, self.descendants )
+        text = get_id_summary(task_id, self.state_summary,
+                              self.fam_state_summary, self.descendants)
         if text == task_id:
             return False
         tooltip.set_text(text)
         return True
 
-    def update_gui( self ):
+    def update_gui(self):
         """Update the treeview with new task and family information.
 
         This redraws the treeview, but keeps a memory of user-expanded
@@ -201,7 +206,7 @@ class TreeUpdater(threading.Thread):
             last_update_date = None
 
         tetc_cached_ids_left = set(self._id_tetc_cache)
-        
+
         # Start figuring out if we can get away with not rebuilding the tree.
         id_named_paths = {}
         should_rebuild_tree = False
@@ -220,19 +225,20 @@ class TreeUpdater(threading.Thread):
             for id in summary:
                 name, point_string = TaskID.split(id)
                 if point_string not in dest:
-                    dest[ point_string ] = {}
-                state = summary[ id ].get('state')
+                    dest[point_string] = {}
+                state = summary[id].get('state')
 
                 # Populate task timing slots.
                 t_info = {}
                 tkeys = ['submitted_time_string', 'started_time_string',
-                        'finished_time_string']
+                         'finished_time_string']
 
                 if id in self.fam_state_summary:
                     # Family timing currently left empty.
                     for dt in tkeys:
                         t_info[dt] = ""
                         t_info['mean_total_elapsed_time_string'] = ""
+                        t_info['progress'] = 0
                 else:
                     meant = summary[id].get('mean total elapsed time')
                     tstart = summary[id].get('started_time')
@@ -242,36 +248,49 @@ class TreeUpdater(threading.Thread):
                         try:
                             t_info[dt] = summary[id][dt]
                         except KeyError:
-                            # Pre cylc-6 back compat: no special "_string" items,
-                            # and the data was in string form already.
+                            # Pre cylc-6 back compat: no special "_string"
+                            # items, and the data was in string form already.
                             odt = dt.replace("_string", "")
                             try:
                                 t_info[dt] = summary[id][odt]
                             except KeyError:
                                 if dt == 'finished_time_string':
                                     # Was succeeded_time.
-                                    t_info[dt] = summary[id].get('succeeded_time')
+                                    t_info[dt] = summary[id].get(
+                                        'succeeded_time')
                                 else:
                                     t_info[dt] = None
                             if isinstance(t_info[dt], str):
                                 # Remove decimal fraction seconds.
                                 t_info[dt] = t_info[dt].split('.')[0]
 
+                    # Compute percent progress.
+                    if (isinstance(tstart, float) and (
+                            isinstance(meant, float) or
+                            isinstance(meant, int))):
+                        tetc_unix = tstart + meant
+                        tnow = time.time()
+                        if tnow > tetc_unix:
+                            t_info['progress'] = 100
+                        else:
+                            t_info['progress'] = int(
+                                100 * (tnow - tstart) / (tetc_unix - tstart))
+                    else:
+                        t_info['progress'] = 0
+
                     if (t_info['finished_time_string'] is None and
                             isinstance(tstart, float) and
                             (isinstance(meant, float) or
                              isinstance(meant, int))):
                         # Task not finished, but has started and has a meant;
                         # so we can compute an expected time of completion.
-                        tetc_unix = tstart + meant
                         tetc_string = (
                             self._id_tetc_cache.get(id, {}).get(tetc_unix))
                         if tetc_string is None:
                             # We have to calculate it.
                             tetc_string = get_time_string_from_unix_time(
                                 tetc_unix,
-                                custom_time_zone_info=daemon_time_zone_info
-                            )
+                                custom_time_zone_info=daemon_time_zone_info)
                             self._id_tetc_cache[id] = {tetc_unix: tetc_string}
                         t_info['finished_time_string'] = tetc_string
                         estimated_t_finish = True
@@ -289,7 +308,7 @@ class TreeUpdater(threading.Thread):
                                 meant_minutes, meant_seconds)
                         else:
                             meant_string = "PT%dS" % meant_seconds
-                    elif isinstance(meant,str):
+                    elif isinstance(meant, str):
                         meant_string = meant
                     else:
                         meant_string = "*"
@@ -301,16 +320,15 @@ class TreeUpdater(threading.Thread):
                             t_info[dt] = "*"
 
                     if estimated_t_finish:
-                        # TODO - this markup probably affects sort order?
-                        t_info['finished_time_string'] = "<i>%s?</i>" % (
-                                t_info['finished_time_string'])
-    
+                        t_info['finished_time_string'] = "%s?" % (
+                            t_info['finished_time_string'])
+
                 # Use "*" (or "" for family rows) until slot is populated
                 # and for pre cylc-6 back compat for host and job ID cols.
                 job_id = summary[id].get('submit_method_id')
                 batch_sys_name = summary[id].get('batch_sys_name')
                 host = summary[id].get('host')
-                message = summary[ id ].get('latest_message')
+                message = summary[id].get('latest_message')
                 if message is not None:
                     if last_update_date is not None:
                         message = message.replace(
@@ -339,7 +357,7 @@ class TreeUpdater(threading.Thread):
                     t_info['started_time_string'],
                     t_info['finished_time_string'],
                     t_info['mean_total_elapsed_time_string'],
-                    message, icon
+                    message, icon, t_info['progress']
                 ]
                 dest[point_string][name] = new_info
 
@@ -355,7 +373,7 @@ class TreeUpdater(threading.Thread):
                         name = point_string
                     update_row_ids.append((point_string, name, is_fam))
 
-                if not is_fam:
+                if not is_fam and name in self.ancestors:
                     # Calculate the family nesting for tasks.
                     families = list(self.ancestors[name])
                     families.sort(lambda x, y: (y in self.ancestors[x]) -
@@ -386,7 +404,7 @@ class TreeUpdater(threading.Thread):
         # Cache the current row point-string and names.
         row_id_iters_left = {}
         self.ttreestore.foreach(self._cache_row_id_iters, row_id_iters_left)
-        
+
         point_strings = new_data.keys()
         point_strings.sort()  # This basic sort is not always desirable.
 
@@ -398,9 +416,10 @@ class TreeUpdater(threading.Thread):
             # For each id, calculate the new path and add or replace that path
             # in the self.ttreestore.
             for i, point_string in enumerate(point_strings):
-                p_data = [ None ] * 7
-                if "root" in new_fam_data[point_string]:
+                try:
                     p_data = new_fam_data[point_string]["root"]
+                except KeyError:
+                    p_data = [None] * 7
                 p_path = (i,)
                 p_row_id = (point_string, point_string)
                 p_data = list(p_row_id) + p_data
@@ -417,8 +436,7 @@ class TreeUpdater(threading.Thread):
                 if self.cfg.use_defn_order and self.updater.ns_defn_order:
                     task_named_paths.sort(
                         key=lambda x: map(
-                            self.updater.dict_ns_defn_order.get, x)
-                    )
+                            self.updater.dict_ns_defn_order.get, x))
                 else:
                     task_named_paths.sort()
 
@@ -433,7 +451,7 @@ class TreeUpdater(threading.Thread):
                     # and simply ["foo_bar"] in non-grouped mode.
                     name = named_path[-1]
                     state = new_data[point_string][name][0]
-                    self._update_path_info( p_iter, state, name )
+                    self._update_path_info(p_iter, state, name)
 
                     f_iter = p_iter
                     f_path = p_path
@@ -446,9 +464,10 @@ class TreeUpdater(threading.Thread):
                             f_path = family_paths[fam]
                         else:
                             # Add family to tree
-                            f_data = [ None ] * 7
-                            if fam in new_fam_data[point_string]:
+                            try:
                                 f_data = new_fam_data[point_string][fam]
+                            except KeyError:
+                                f_data = [None] * 7
                             if i > 0:
                                 parent_fam = named_path[i - 1]
                             else:
@@ -456,33 +475,29 @@ class TreeUpdater(threading.Thread):
                                 parent_fam = point_string
                             family_num_children.setdefault(parent_fam, 0)
                             family_num_children[parent_fam] += 1
-                            f_row_id = ( point_string, fam )
+                            f_row_id = (point_string, fam)
                             f_data = list(f_row_id) + f_data
                             # New path is parent_path + (siblings + 1).
                             f_path = tuple(
                                 list(family_paths[parent_fam]) +
-                                [family_num_children[parent_fam] - 1]
-                            )
+                                [family_num_children[parent_fam] - 1])
                             f_iter = self._update_model(
                                 self.ttreestore, columns, f_path, f_row_id,
-                                f_data, row_id_iters_left
-                            )
+                                f_data, row_id_iters_left)
                             family_iters[fam] = f_iter
                             family_paths[fam] = f_path
-                        self._update_path_info( f_iter, state, name )
+                        self._update_path_info(f_iter, state, name)
                     # Add task to tree using the family path we just found.
                     parent_fam = fam
                     family_num_children.setdefault(parent_fam, 0)
                     family_num_children[parent_fam] += 1
                     t_path = tuple(
-                        list(f_path) + [family_num_children[parent_fam] - 1]
-                    )
+                        list(f_path) + [family_num_children[parent_fam] - 1])
                     t_row_id = (point_string, name)
                     t_data = list(t_row_id) + new_data[point_string][name]
                     self._update_model(
                         self.ttreestore, columns, t_path, t_row_id, t_data,
-                        row_id_iters_left
-                    )
+                        row_id_iters_left)
             # Adding and updating finished - now we need to delete left overs.
             delete_items = row_id_iters_left.items()
             # Sort reversed by path, to get children before parents.
@@ -506,14 +521,13 @@ class TreeUpdater(threading.Thread):
             # Update the tree in place - no row has been added or deleted.
             # Our row_id_iters_left cache is still valid.
             for point_string, name, is_fam in sorted(update_row_ids):
-                if is_fam:
-                    if name == point_string:
+                try:
+                    if is_fam and name == point_string:
                         data = new_fam_data[point_string]["root"]
-                    else:
+                    elif is_fam:
                         data = new_fam_data[point_string][name]
-                else:
-                    data = new_data[point_string][name]
-                try:
+                    else:
+                        data = new_data[point_string][name]
                     iter_, path = row_id_iters_left[(point_string, name)]
                 except KeyError:
                     if not is_fam:
@@ -539,19 +553,19 @@ class TreeUpdater(threading.Thread):
         model.sort_column_changed()
 
         # Expand all the rows that were user-expanded or need auto-expansion.
-        model.foreach( self._expand_row, expand_me )
+        model.foreach(self._expand_row, expand_me)
         self._prev_id_named_paths = id_named_paths
         self._prev_data = new_data
         self._prev_fam_data = new_fam_data
         return False
 
-    def _cache_row_id_iters( self, model, path, iter_, row_id_iters ):
+    def _cache_row_id_iters(self, model, path, iter_, row_id_iters):
         # Cache a row id and its TreeIter and path in row_id_iters.
         row_id = self._get_row_id(model, path)
         row_id_iters[row_id] = (iter_, path)
 
-    def _update_model( self, model, columns, path, row_id, data,
-                       old_row_id_iters ):
+    def _update_model(self, model, columns, path, row_id, data,
+                      old_row_id_iters):
         # Given a path, make sure the model has 'data' at that path.
         old_row_id_iters.pop(row_id, None)
 
@@ -589,7 +603,7 @@ class TreeUpdater(threading.Thread):
         parent_iter = model.iter_parent(iter_)
         next_iter = iter_.copy()
         old_row_id_iters.pop(dest_row_id, None)
-        
+
         while model.remove(next_iter):
             # next_iter is silently updated each time by model.remove.
             next_path = model.get_path(next_iter)
@@ -603,70 +617,69 @@ class TreeUpdater(threading.Thread):
         iter_ = model.append(parent_iter, data)
         return iter_
 
-    def _get_row_id( self, model, rpath ):
-        # Record a row's first two values.
-        riter = model.get_iter( rpath )
-        point_string = model.get_value( riter, 0 )
-        name = model.get_value( riter, 1 )
+    def _get_row_id(self, model, rpath):
+        """Record a row's first two values."""
+        riter = model.get_iter(rpath)
+        point_string = model.get_value(riter, 0)
+        name = model.get_value(riter, 1)
         return (point_string, name)
 
-    def _add_expanded_row( self, view, rpath, expand_me ):
-        # Add user-expanded rows to a list of rows to be expanded.
+    def _add_expanded_row(self, view, rpath, expand_me):
+        """Add user-expanded rows to a list of rows to be expanded."""
         model = view.get_model()
-        row_iter = model.get_iter( rpath )
-        row_id = self._get_row_id( model, rpath )
-        if (not self.autoexpand or
-            row_id not in self._last_autoexpand_me):
-            expand_me.append( row_id )
+        row_iter = model.get_iter(rpath)
+        row_id = self._get_row_id(model, rpath)
+        if not self.autoexpand or row_id not in self._last_autoexpand_me:
+            expand_me.append(row_id)
         return False
 
-    def _get_user_expanded_row_ids( self ):
+    def _get_user_expanded_row_ids(self):
         """Return a list of user-expanded row point_strings and names."""
         names = []
         model = self.ttreeview.get_model()
         if model is None or model.get_iter_first() is None:
             return names
-        self.ttreeview.map_expanded_rows( self._add_expanded_row, names )
+        self.ttreeview.map_expanded_rows(self._add_expanded_row, names)
         return names
 
-    def _expand_row( self, model, rpath, riter, expand_me ):
+    def _expand_row(self, model, rpath, riter, expand_me):
         """Expand a row if it matches expand_me point_strings and names."""
-        point_string_name_tuple = self._get_row_id( model, rpath )
+        point_string_name_tuple = self._get_row_id(model, rpath)
         if point_string_name_tuple in expand_me:
-            self.ttreeview.expand_to_path( rpath )
+            self.ttreeview.expand_to_path(rpath)
         return False
 
-    def _update_path_info( self, row_iter, descendant_state, descendant_name ):
+    def _update_path_info(self, row_iter, descendant_state, descendant_name):
         # Cache states and names from the subtree below this row.
-        path = self.ttreestore.get_path( row_iter )
-        self.ttree_paths.setdefault( path, {})
-        self.ttree_paths[path].setdefault( 'states', [] )
-        self.ttree_paths[path]['states'].append( descendant_state )
-        self.ttree_paths[path].setdefault( 'names', [] )
-        self.ttree_paths[path]['names'].append( descendant_name )
-
-    def _get_autoexpand_rows( self ):
+        path = self.ttreestore.get_path(row_iter)
+        self.ttree_paths.setdefault(path, {})
+        self.ttree_paths[path].setdefault('states', [])
+        self.ttree_paths[path]['states'].append(descendant_state)
+        self.ttree_paths[path].setdefault('names', [])
+        self.ttree_paths[path]['names'].append(descendant_name)
+
+    def _get_autoexpand_rows(self):
         # Return a list of rows that meet the auto-expansion criteria.
         autoexpand_me = []
         r_iter = self.ttreestore.get_iter_first()
         while r_iter is not None:
-            point_string = self.ttreestore.get_value( r_iter, 0 )
-            name = self.ttreestore.get_value( r_iter, 1 )
-            if (( point_string, name ) not in autoexpand_me and
-                self._calc_autoexpand_row( r_iter )):
+            point_string = self.ttreestore.get_value(r_iter, 0)
+            name = self.ttreestore.get_value(r_iter, 1)
+            if ((point_string, name) not in autoexpand_me and
+                    self._calc_autoexpand_row(r_iter)):
                 # This row should be auto-expanded.
-                autoexpand_me.append( ( point_string, name ) )
+                autoexpand_me.append((point_string, name))
                 # Now check whether the child rows also need this.
-                new_iter = self.ttreestore.iter_children( r_iter )
+                new_iter = self.ttreestore.iter_children(r_iter)
             else:
                 # This row shouldn't be auto-expanded, move on.
-                new_iter = self.ttreestore.iter_next( r_iter )
+                new_iter = self.ttreestore.iter_next(r_iter)
                 if new_iter is None:
-                    new_iter = self.ttreestore.iter_parent( r_iter )
+                    new_iter = self.ttreestore.iter_parent(r_iter)
             r_iter = new_iter
         return autoexpand_me
 
-    def _calc_autoexpand_row( self, row_iter ):
+    def _calc_autoexpand_row(self, row_iter):
         """Calculate whether a row meets the auto-expansion criteria.
 
         Currently, a family row with tasks in the right states will not
@@ -674,25 +687,24 @@ class TreeUpdater(threading.Thread):
         will.
 
         """
-        path = self.ttreestore.get_path( row_iter )
-        sub_st = self.ttree_paths.get( path, {} ).get( 'states', [] )
-        point_string = self.ttreestore.get_value( row_iter, 0 )
-        name = self.ttreestore.get_value( row_iter, 1 )
-        if any( [ s in self.autoexpand_states for s in sub_st ] ):
+        path = self.ttreestore.get_path(row_iter)
+        sub_st = self.ttree_paths.get(path, {}).get('states', [])
+        point_string = self.ttreestore.get_value(row_iter, 0)
+        name = self.ttreestore.get_value(row_iter, 1)
+        if any([s in self.autoexpand_states for s in sub_st]):
             # return True  # TODO: Option for different expansion rules?
             if point_string == name:
                 # Expand cycle points if any child states comply.
                 return True
-            child_iter = self.ttreestore.iter_children( row_iter )
+            child_iter = self.ttreestore.iter_children(row_iter)
             while child_iter is not None:
-                c_path = self.ttreestore.get_path( child_iter )
-                c_sub_st = self.ttree_paths.get( c_path,
-                                                 {} ).get('states', [] )
-                if any( [s in self.autoexpand_states for s in c_sub_st ] ):
-                     # Expand if there are sub-families with valid states.
-                     # Do not expand if it's just tasks with valid states.
-                     return True
-                child_iter = self.ttreestore.iter_next( child_iter )
+                c_path = self.ttreestore.get_path(child_iter)
+                c_sub_st = self.ttree_paths.get(c_path, {}).get('states', [])
+                if any([s in self.autoexpand_states for s in c_sub_st]):
+                    # Expand if there are sub-families with valid states.
+                    # Do not expand if it's just tasks with valid states.
+                    return True
+                child_iter = self.ttreestore.iter_next(child_iter)
             return False
         return False
 
@@ -701,8 +713,7 @@ class TreeUpdater(threading.Thread):
         states = {}
         while not self.quit:
             if self.update():
-                gobject.idle_add( self.update_gui )
+                gobject.idle_add(self.update_gui)
             sleep(0.2)
         else:
             pass
-            ####print "Disconnecting task state info thread"
diff --git a/lib/cylc/gui/util.py b/lib/cylc/gui/util.py
index 5776529..6efae2a 100644
--- a/lib/cylc/gui/util.py
+++ b/lib/cylc/gui/util.py
@@ -55,6 +55,7 @@ class EntryTempText(gtk.Entry):
             return ""
         return text
 
+
 class EntryDialog(gtk.MessageDialog):
     def __init__(self, *args, **kwargs):
         '''
@@ -70,14 +71,18 @@ class EntryDialog(gtk.MessageDialog):
         super(EntryDialog, self).__init__(*args, **kwargs)
         entry = gtk.Entry()
         entry.set_text(str(default_value))
-        entry.connect("activate",
-                lambda ent, dlg, resp: dlg.response(resp),
-                self, gtk.RESPONSE_OK)
+        entry.connect(
+            "activate",
+            lambda ent, dlg, resp: dlg.response(resp),
+            self,
+            gtk.RESPONSE_OK)
         self.vbox.pack_end(entry, True, True, 0)
         self.vbox.show_all()
         self.entry = entry
+
     def set_value(self, text):
         self.entry.set_text(text)
+
     def run(self):
         result = super(EntryDialog, self).run()
         if result == gtk.RESPONSE_OK:
@@ -101,11 +106,11 @@ def get_icon():
     """Return the gcylc icon as a gtk.gdk.Pixbuf."""
     try:
         icon_path = os.path.join(get_image_dir(), "icon.svg")
-        icon      = gtk.gdk.pixbuf_new_from_file(icon_path)
+        icon = gtk.gdk.pixbuf_new_from_file(icon_path)
     except:
         # SVG error? Try loading it the old way.
         icon_path = os.path.join(get_image_dir(), "icon.png")
-        icon      = gtk.gdk.pixbuf_new_from_file(icon_path)
+        icon = gtk.gdk.pixbuf_new_from_file(icon_path)
     return icon
 
 
@@ -136,8 +141,7 @@ def set_exception_hook_dialog(program_name=None):
     """Set a custom uncaught exception hook for launching an error dialog."""
     old_hook = sys.excepthook
     sys.excepthook = lambda *a: _launch_exception_hook_dialog(
-                                                  *a, old_hook=old_hook,
-                                                  program_name=program_name)
+        *a, old_hook=old_hook, program_name=program_name)
 
 
 def setup_icons():
@@ -146,15 +150,16 @@ def setup_icons():
     root_img_dir = get_image_dir()
     pixbuf = get_icon()
     gcylc_iconset = gtk.IconSet(pixbuf)
-    pixbuf = gtk.gdk.pixbuf_new_from_file( root_img_dir + '/icons/group.png' )
+    pixbuf = gtk.gdk.pixbuf_new_from_file(root_img_dir + '/icons/group.png')
     grp_iconset = gtk.IconSet(pixbuf)
-    pixbuf = gtk.gdk.pixbuf_new_from_file( root_img_dir + '/icons/ungroup.png' )
+    pixbuf = gtk.gdk.pixbuf_new_from_file(root_img_dir + '/icons/ungroup.png')
     ungrp_iconset = gtk.IconSet(pixbuf)
-    pixbuf = gtk.gdk.pixbuf_new_from_file( root_img_dir + '/icons/transpose.png' )
+    pixbuf = gtk.gdk.pixbuf_new_from_file(
+        root_img_dir + '/icons/transpose.png')
     transpose_iconset = gtk.IconSet(pixbuf)
     factory = gtk.IconFactory()
-    factory.add( 'gcylc', gcylc_iconset )
-    factory.add( 'group', grp_iconset )
-    factory.add( 'ungroup', ungrp_iconset )
-    factory.add( 'transpose', transpose_iconset )
+    factory.add('gcylc', gcylc_iconset)
+    factory.add('group', grp_iconset)
+    factory.add('ungroup', ungrp_iconset)
+    factory.add('transpose', transpose_iconset)
     factory.add_default()
diff --git a/lib/cylc/gui/view_dot.py b/lib/cylc/gui/view_dot.py
index 146234f..f7fa629 100644
--- a/lib/cylc/gui/view_dot.py
+++ b/lib/cylc/gui/view_dot.py
@@ -17,21 +17,21 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import gtk
-import os, re
+import os
 import gobject
 from updater_dot import DotUpdater
 from gcapture import gcapture_tmpfile
-from cylc import cylc_pyro_client
 from cylc.task_id import TaskID
 from util import EntryTempText
 from warning_dialog import warning_dialog
 
+
 class ControlLED(object):
     """
 LED suite control interface.
     """
-    def __init__(self, cfg, updater, theme, dot_size, info_bar, get_right_click_menu,
-                 log_colors, insert_task_popup):
+    def __init__(self, cfg, updater, theme, dot_size, info_bar,
+                 get_right_click_menu, log_colors, insert_task_popup):
 
         self.cfg = cfg
         self.updater = updater
@@ -44,46 +44,46 @@ LED suite control interface.
 
         self.gcapture_windows = []
 
-    def get_control_widgets( self ):
+    def get_control_widgets(self):
 
         main_box = gtk.VBox()
 
         sw = gtk.ScrolledWindow()
-        sw.set_policy( gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC )
+        sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
 
-        types = tuple( [gtk.gdk.Pixbuf]* (10 ))
+        types = tuple([gtk.gdk.Pixbuf] * 10)
         liststore = gtk.ListStore(*types)
-        treeview = gtk.TreeView( liststore )
-        treeview.connect( 'button_press_event', self.on_treeview_button_pressed )
-        sw.add( treeview )
+        treeview = gtk.TreeView(liststore)
+        treeview.connect('button_press_event', self.on_treeview_button_pressed)
+        sw.add(treeview)
 
-        main_box.pack_start( sw, expand=True, fill=True )
+        main_box.pack_start(sw, expand=True, fill=True)
 
         self.t = DotUpdater(
-                self.cfg, self.updater, treeview, self.info_bar, self.theme,
-                self.dot_size
+            self.cfg, self.updater, treeview, self.info_bar, self.theme,
+            self.dot_size
         )
         self.t.start()
 
         return main_box
 
-    def on_treeview_button_pressed( self, treeview, event ):
+    def on_treeview_button_pressed(self, treeview, event):
         # DISPLAY MENU ONLY ON RIGHT CLICK ONLY
         if event.button != 3:
             return False
         # the following sets selection to the position at which the
         # right click was done (otherwise selection lags behind the
         # right click):
-        x = int( event.x )
-        y = int( event.y )
+        x = int(event.x)
+        y = int(event.y)
         time = event.time
-        pth = treeview.get_path_at_pos(x,y)
+        pth = treeview.get_path_at_pos(x, y)
 
         if pth is None:
             return False
 
         path, col, cellx, celly = pth
-        r_iter = treeview.get_model().get_iter( path )
+        r_iter = treeview.get_model().get_iter(path)
 
         column_index = treeview.get_columns().index(col)
         if column_index == 0:
@@ -91,33 +91,33 @@ LED suite control interface.
 
         if not self.t.is_transposed:
             point_string = self.t.led_headings[column_index]
-            name = treeview.get_model().get_value( r_iter, 0 )
+            name = treeview.get_model().get_value(r_iter, 0)
         else:
             name = self.t.led_headings[column_index]
             point_string_column = treeview.get_model().get_n_columns() - 1
             point_string = treeview.get_model().get_value(
-                r_iter, point_string_column )
+                r_iter, point_string_column)
 
         task_id = TaskID.get(name, point_string)
 
         is_fam = (name in self.t.descendants)
 
-        menu = self.get_right_click_menu( task_id, task_is_family=is_fam )
+        menu = self.get_right_click_menu(task_id, task_is_family=is_fam)
 
         sep = gtk.SeparatorMenuItem()
         sep.show()
-        menu.append( sep )
+        menu.append(sep)
 
-        toggle_item = gtk.CheckMenuItem( 'Toggle Hide Task Headings' )
-        toggle_item.set_active( self.t.should_hide_headings )
-        menu.append( toggle_item )
-        toggle_item.connect( 'toggled', self.toggle_headings )
+        toggle_item = gtk.CheckMenuItem('Toggle Hide Task Headings')
+        toggle_item.set_active(self.t.should_hide_headings)
+        menu.append(toggle_item)
+        toggle_item.connect('toggled', self.toggle_headings)
         toggle_item.show()
 
-        group_item = gtk.CheckMenuItem( 'Toggle Family Grouping' )
-        group_item.set_active( self.t.should_group_families )
-        menu.append( group_item )
-        group_item.connect( 'toggled', self.toggle_grouping )
+        group_item = gtk.CheckMenuItem('Toggle Family Grouping')
+        group_item.set_active(self.t.should_group_families)
+        menu.append(group_item)
+        group_item.connect('toggled', self.toggle_grouping)
         group_item.show()
 
         transpose_menu_item = gtk.CheckMenuItem('Toggle _Transpose View')
@@ -127,13 +127,14 @@ LED suite control interface.
         transpose_menu_item.show()
 
         if self.cfg.use_defn_order:
-            defn_order_menu_item = gtk.CheckMenuItem( 'Toggle _Definition Order' )
-            defn_order_menu_item.set_active( self.t.defn_order_on )
-            menu.append( defn_order_menu_item )
-            defn_order_menu_item.connect( 'toggled', self.toggle_defn_order )
+            defn_order_menu_item = gtk.CheckMenuItem(
+                'Toggle _Definition Order')
+            defn_order_menu_item.set_active(self.t.defn_order_on)
+            menu.append(defn_order_menu_item)
+            defn_order_menu_item.connect('toggled', self.toggle_defn_order)
             defn_order_menu_item.show()
 
-        menu.popup( None, None, None, event.button, event.time )
+        menu.popup(None, None, None, event.button, event.time)
 
         # TODO - popup menus are not automatically destroyed and can be
         # reused if saved; however, we need to reconstruct or at least
@@ -142,7 +143,7 @@ LED suite control interface.
 
         return True
 
-    def toggle_grouping( self, toggle_item ):
+    def toggle_grouping(self, toggle_item):
         """Toggle grouping by visualisation families."""
         group_on = toggle_item.get_active()
         if group_on == self.t.should_group_families:
@@ -153,17 +154,17 @@ LED suite control interface.
         elif "dot" not in self.cfg.ungrouped_views:
             self.cfg.ungrouped_views.append("dot")
         self.t.should_group_families = group_on
-        if isinstance( toggle_item, gtk.ToggleToolButton ):
+        if isinstance(toggle_item, gtk.ToggleToolButton):
             if group_on:
                 tip_text = "Dot View - Click to ungroup families"
             else:
                 tip_text = "Dot View - Click to group tasks by families"
-            self._set_tooltip( toggle_item, tip_text )
-            self.group_menu_item.set_active( group_on )
+            self._set_tooltip(toggle_item, tip_text)
+            self.group_menu_item.set_active(group_on)
         else:
             if toggle_item != self.group_menu_item:
-                self.group_menu_item.set_active( group_on )
-            self.group_toolbutton.set_active( group_on )
+                self.group_menu_item.set_active(group_on)
+            self.group_toolbutton.set_active(group_on)
         self.t.action_required = True
         return False
 
@@ -173,7 +174,7 @@ LED suite control interface.
             return False
         self.t.should_hide_headings = headings_off
         if toggle_item != self.headings_menu_item:
-            self.headings_menu_item.set_active( headings_off )
+            self.headings_menu_item.set_active(headings_off)
         self.t.action_required = True
 
     def toggle_transpose(self, toggle_item):
@@ -187,48 +188,49 @@ LED suite control interface.
         self.t.action_required = True
         return False
 
-    def toggle_defn_order( self, toggle_item ):
+    def toggle_defn_order(self, toggle_item):
         """Toggle definition vs alphabetic ordering of namespaces"""
         defn_order_on = toggle_item.get_active()
         if defn_order_on == self.t.defn_order_on:
             return False
         self.t.defn_order_on = defn_order_on
         if toggle_item != self.defn_order_menu_item:
-            self.defn_order_menu_item.set_active( defn_order_on )
+            self.defn_order_menu_item.set_active(defn_order_on)
         self.t.action_required = True
         return False
 
     def stop(self):
         self.t.quit = True
 
-    def on_popup_quit( self, b, lv, w ):
+    def on_popup_quit(self, b, lv, w):
         lv.quit()
-        self.quitters.remove( lv )
+        self.quitters.remove(lv)
         w.destroy()
 
     def refresh(self):
         self.t.update()
         self.t.action_required = True
 
-    def _set_tooltip( self, widget, tip_text ):
+    def _set_tooltip(self, widget, tip_text):
         # Convenience function to add hover over text to a widget.
         tip = gtk.Tooltips()
         tip.enable()
-        tip.set_tip( widget, tip_text )
+        tip.set_tip(widget, tip_text)
 
-    def get_menuitems( self ):
+    def get_menuitems(self):
         """Return the menuitems specific to this view."""
         items = []
-        self.headings_menu_item = gtk.CheckMenuItem( 'Toggle _Hide Task Headings' )
-        self.headings_menu_item.set_active( self.t.should_hide_headings )
-        items.append( self.headings_menu_item )
+        self.headings_menu_item = gtk.CheckMenuItem(
+            'Toggle _Hide Task Headings')
+        self.headings_menu_item.set_active(self.t.should_hide_headings)
+        items.append(self.headings_menu_item)
         self.headings_menu_item.show()
-        self.headings_menu_item.connect( 'toggled', self.toggle_headings )
+        self.headings_menu_item.connect('toggled', self.toggle_headings)
 
-        self.group_menu_item = gtk.CheckMenuItem( 'Toggle _Family Grouping' )
-        self.group_menu_item.set_active( self.t.should_group_families )
-        items.append( self.group_menu_item )
-        self.group_menu_item.connect( 'toggled', self.toggle_grouping )
+        self.group_menu_item = gtk.CheckMenuItem('Toggle _Family Grouping')
+        self.group_menu_item.set_active(self.t.should_group_families)
+        items.append(self.group_menu_item)
+        self.group_menu_item.connect('toggled', self.toggle_grouping)
 
         self.transpose_menu_item = gtk.CheckMenuItem('Toggle _Transpose View')
         self.transpose_menu_item.set_active(self.t.should_transpose_view)
@@ -236,33 +238,40 @@ LED suite control interface.
         self.transpose_menu_item.connect('toggled', self.toggle_transpose)
 
         if self.cfg.use_defn_order:
-            self.defn_order_menu_item = gtk.CheckMenuItem( 'Toggle _Definition Order' )
-            self.defn_order_menu_item.set_active( self.t.defn_order_on )
-            items.append( self.defn_order_menu_item )
-            self.defn_order_menu_item.connect( 'toggled', self.toggle_defn_order )
- 
+            self.defn_order_menu_item = gtk.CheckMenuItem(
+                'Toggle _Definition Order')
+            self.defn_order_menu_item.set_active(self.t.defn_order_on)
+            items.append(self.defn_order_menu_item)
+            self.defn_order_menu_item.connect(
+                'toggled', self.toggle_defn_order)
+
         return items
 
-    def get_toolitems( self ):
+    def get_toolitems(self):
         """Return the tool bar items specific to this view."""
         items = []
 
         self.group_toolbutton = gtk.ToggleToolButton()
-        self.group_toolbutton.set_active( self.t.should_group_families )
-        g_image = gtk.image_new_from_stock( 'group', gtk.ICON_SIZE_SMALL_TOOLBAR )
-        self.group_toolbutton.set_icon_widget( g_image )
-        self.group_toolbutton.set_label( "Group" )
-        self.group_toolbutton.connect( 'toggled', self.toggle_grouping )
-        items.append( self.group_toolbutton )
-        self._set_tooltip( self.group_toolbutton, "Dot View - Click to group tasks by families" )
+        self.group_toolbutton.set_active(self.t.should_group_families)
+        g_image = gtk.image_new_from_stock(
+            'group', gtk.ICON_SIZE_SMALL_TOOLBAR)
+        self.group_toolbutton.set_icon_widget(g_image)
+        self.group_toolbutton.set_label("Group")
+        self.group_toolbutton.connect('toggled', self.toggle_grouping)
+        items.append(self.group_toolbutton)
+        self._set_tooltip(
+            self.group_toolbutton,
+            "Dot View - Click to group tasks by families")
 
         self.transpose_toolbutton = gtk.ToggleToolButton()
         self.transpose_toolbutton.set_active(False)
-        g_image = gtk.image_new_from_stock('transpose', gtk.ICON_SIZE_SMALL_TOOLBAR)
+        g_image = gtk.image_new_from_stock(
+            'transpose', gtk.ICON_SIZE_SMALL_TOOLBAR)
         self.transpose_toolbutton.set_icon_widget(g_image)
         self.transpose_toolbutton.set_label("Transpose")
         self.transpose_toolbutton.connect('toggled', self.toggle_transpose)
         items.append(self.transpose_toolbutton)
-        self._set_tooltip(self.transpose_toolbutton, "Dot View - Click to transpose view")
+        self._set_tooltip(
+            self.transpose_toolbutton, "Dot View - Click to transpose view")
 
         return items
diff --git a/lib/cylc/gui/view_graph.py b/lib/cylc/gui/view_graph.py
index b1e9c03..3a75ee5 100644
--- a/lib/cylc/gui/view_graph.py
+++ b/lib/cylc/gui/view_graph.py
@@ -107,13 +107,11 @@ Dependency graph suite control interface.
         url = unicode(url.url)
         m = re.match('base:(.*)', url)
         if m:
-            #print 'BASE GRAPH'
             task_id = m.groups()[0]
             self.xdot.widget.set_tooltip_text(self.t.get_summary(task_id))
             return False
 
         # URL is task ID
-        #print 'LIVE TASK'
         self.xdot.widget.set_tooltip_text(self.t.get_summary(url))
         return False
 
@@ -132,9 +130,9 @@ Dependency graph suite control interface.
             'activate', self.focused_timezoom_direct, point_string)
 
         # TODO - pre cylc-6 could focus on a range of points (was hours-based).
-        #timezoom_item = gtk.MenuItem('Focus on Range')
-        #timezoom_item.connect(
-        #    'activate', self.focused_timezoom_popup, task_id)
+        # timezoom_item = gtk.MenuItem('Focus on Range')
+        # timezoom_item.connect(
+        #     'activate', self.focused_timezoom_popup, task_id)
 
         timezoom_reset_item = gtk.MenuItem('Focus Reset')
         timezoom_reset_item.connect('activate', self.focused_timezoom_direct,
@@ -179,7 +177,6 @@ Dependency graph suite control interface.
             menu.append(gtk.SeparatorMenuItem())
 
         menu.append(timezoom_item_direct)
-        #menu.append(timezoom_item)
         menu.append(timezoom_reset_item)
 
         menu.append(gtk.SeparatorMenuItem())
@@ -189,7 +186,8 @@ Dependency graph suite control interface.
 
         if type == 'live task':
             is_fam = (name in self.t.descendants)
-            default_menu = self.get_right_click_menu(task_id, task_is_family=is_fam)
+            default_menu = self.get_right_click_menu(
+                task_id, task_is_family=is_fam)
             dm_kids = default_menu.get_children()
             for item in reversed(dm_kids[:2]):
                 # Put task name and URL at the top.
@@ -544,7 +542,6 @@ Dependency graph suite control interface.
         hbox.pack_start(apply_button, False)
         hbox.pack_start(reset_button, False)
         hbox.pack_end(cancel_button, False)
-        #hbox.pack_end(help_button, False)
         vbox.pack_start(hbox)
 
         window.add(vbox)
diff --git a/lib/cylc/gui/view_tree.py b/lib/cylc/gui/view_tree.py
index 056515f..6c1997b 100644
--- a/lib/cylc/gui/view_tree.py
+++ b/lib/cylc/gui/view_tree.py
@@ -17,7 +17,8 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import gtk
-import os, re
+import os
+import re
 import gobject
 from updater_tree import TreeUpdater
 from gcapture import gcapture_tmpfile
@@ -27,11 +28,9 @@ from isodatetime.parsers import DurationParser
 
 
 class ControlTree(object):
-    """
-Text Treeview suite control interface.
-    """
-    def __init__(self, cfg, updater, theme, dot_size, info_bar, get_right_click_menu,
-                 log_colors, insert_task_popup ):
+    """Text Treeview suite control interface."""
+    def __init__(self, cfg, updater, theme, dot_size, info_bar,
+                 get_right_click_menu, log_colors, insert_task_popup):
 
         self.cfg = cfg
         self.updater = updater
@@ -47,18 +46,18 @@ Text Treeview suite control interface.
 
         self.ttree_paths = {}  # Cache dict of tree paths & states, names.
 
-    def get_control_widgets( self ):
+    def get_control_widgets(self):
         main_box = gtk.VBox()
-        main_box.pack_start( self.treeview_widgets(), expand=True, fill=True )
+        main_box.pack_start(self.treeview_widgets(), expand=True, fill=True)
 
         self.t = TreeUpdater(
-                self.cfg, self.updater, self.ttreeview, self.ttree_paths,
-                self.info_bar, self.theme, self.dot_size
+            self.cfg, self.updater, self.ttreeview, self.ttree_paths,
+            self.info_bar, self.theme, self.dot_size
         )
         self.t.start()
         return main_box
 
-    def toggle_grouping( self, toggle_item ):
+    def toggle_grouping(self, toggle_item):
         """Toggle grouping by visualisation families."""
         group_on = toggle_item.get_active()
         if group_on == self.t.should_group_families:
@@ -69,42 +68,46 @@ Text Treeview suite control interface.
         elif "text" not in self.cfg.ungrouped_views:
             self.cfg.ungrouped_views.append("text")
         self.t.should_group_families = group_on
-        if isinstance( toggle_item, gtk.ToggleToolButton ):
+        if isinstance(toggle_item, gtk.ToggleToolButton):
             if group_on:
                 tip_text = "Tree View - Click to ungroup families"
             else:
                 tip_text = "Tree View - Click to group tasks by families"
-            self._set_tooltip( toggle_item, tip_text )
-            self.group_menu_item.set_active( group_on )
+            self._set_tooltip(toggle_item, tip_text)
+            self.group_menu_item.set_active(group_on)
         else:
             if toggle_item != self.group_menu_item:
-                self.group_menu_item.set_active( group_on )
-            self.group_toolbutton.set_active( group_on )
+                self.group_menu_item.set_active(group_on)
+            self.group_toolbutton.set_active(group_on)
         self.t.update_gui()
         return False
 
     def stop(self):
         self.t.quit = True
 
-    def toggle_autoexpand( self, w ):
+    def toggle_autoexpand(self, w):
         self.t.autoexpand = not self.t.autoexpand
 
-    def treeview_widgets( self ):
+    def treeview_widgets(self):
         self.sort_col_num = 0
-        self.ttreestore = gtk.TreeStore(str, str, str, str, str, str, str, str, str, str, str, gtk.gdk.Pixbuf)
+        self.ttreestore = gtk.TreeStore(
+            str, str, str, str, str, str, str, str, str, str, str,
+            gtk.gdk.Pixbuf, int)
         self.ttreeview = gtk.TreeView()
         self.ttreeview.set_rules_hint(True)
-        self.tmodelfilter = self.ttreestore.filter_new() # TODO - REMOVE FILTER HERE?
+        # TODO - REMOVE FILTER HERE?
+        self.tmodelfilter = self.ttreestore.filter_new()
         self.tmodelsort = gtk.TreeModelSort(self.tmodelfilter)
         self.ttreeview.set_model(self.tmodelsort)
 
         ts = self.ttreeview.get_selection()
-        ts.set_mode( gtk.SELECTION_SINGLE )
+        ts.set_mode(gtk.SELECTION_SINGLE)
 
-        self.ttreeview.connect('button_press_event', self.on_treeview_button_pressed)
+        self.ttreeview.connect(
+            'button_press_event', self.on_treeview_button_pressed)
         headings = [
-                None, 'task', 'state', 'host', 'job system', 'job ID', 'T-submit', 'T-start',
-                'T-finish', 'dT-mean', 'latest message'
+            None, 'task', 'state', 'host', 'job system', 'job ID', 'T-submit',
+            'T-start', 'T-finish', 'dT-mean', 'latest message',
         ]
 
         for n in range(1, len(headings)):
@@ -114,6 +117,11 @@ Text Treeview suite control interface.
                 crp = gtk.CellRendererPixbuf()
                 tvc.pack_start(crp, False)
                 tvc.set_attributes(crp, pixbuf=11)
+            if n == 8:
+                # Pack in progress and text cell renderers.
+                prog_cr = gtk.CellRendererProgress()
+                tvc.pack_start(prog_cr, True)
+                tvc.set_cell_data_func(prog_cr, self._set_cell_text_time, n)
             cr = gtk.CellRendererText()
             tvc.pack_start(cr, True)
             if n == 6 or n == 7 or n == 8:
@@ -134,7 +142,7 @@ Text Treeview suite control interface.
 
         return vbox
 
-    def on_treeview_button_pressed( self, treeview, event ):
+    def on_treeview_button_pressed(self, treeview, event):
         # DISPLAY MENU ONLY ON RIGHT CLICK ONLY
         if event.button != 3:
             return False
@@ -142,22 +150,22 @@ Text Treeview suite control interface.
         # the following sets selection to the position at which the
         # right click was done (otherwise selection lags behind the
         # right click):
-        x = int( event.x )
-        y = int( event.y )
+        x = int(event.x)
+        y = int(event.y)
         time = event.time
-        pth = treeview.get_path_at_pos(x,y)
+        pth = treeview.get_path_at_pos(x, y)
 
         if pth is None:
             return False
 
         treeview.grab_focus()
         path, col, cellx, celly = pth
-        treeview.set_cursor( path, col, 0 )
+        treeview.set_cursor(path, col, 0)
 
         selection = treeview.get_selection()
         treemodel, iter = selection.get_selected()
-        point_string = treemodel.get_value( iter, 0 )
-        name = treemodel.get_value( iter, 1 )
+        point_string = treemodel.get_value(iter, 0)
+        name = treemodel.get_value(iter, 1)
         if point_string == name:
             # must have clicked on the top level point_string
             return
@@ -166,19 +174,19 @@ Text Treeview suite control interface.
 
         is_fam = (name in self.t.descendants)
 
-        menu = self.get_right_click_menu( task_id, task_is_family=is_fam )
+        menu = self.get_right_click_menu(task_id, task_is_family=is_fam)
 
         sep = gtk.SeparatorMenuItem()
         sep.show()
-        menu.append( sep )
+        menu.append(sep)
 
-        group_item = gtk.CheckMenuItem( 'Toggle Family Grouping' )
-        group_item.set_active( self.t.should_group_families )
-        menu.append( group_item )
-        group_item.connect( 'toggled', self.toggle_grouping )
+        group_item = gtk.CheckMenuItem('Toggle Family Grouping')
+        group_item.set_active(self.t.should_group_families)
+        menu.append(group_item)
+        group_item.connect('toggled', self.toggle_grouping)
         group_item.show()
 
-        menu.popup( None, None, None, event.button, event.time )
+        menu.popup(None, None, None, event.button, event.time)
 
         # TODO - popup menus are not automatically destroyed and can be
         # reused if saved; however, we need to reconstruct or at least
@@ -218,104 +226,128 @@ Text Treeview suite control interface.
             secsout = seconds
         return secsout
 
-    def change_sort_order( self, col, event=None, n=0 ):
+    def change_sort_order(self, col, event=None, n=0):
         if hasattr(event, "button") and event.button != 1:
             return False
         cols = self.ttreeview.get_columns()
         self.sort_col_num = n
         if cols[n].get_sort_order() == gtk.SORT_ASCENDING:
-            cols[n].set_sort_order( gtk.SORT_DESCENDING )
+            cols[n].set_sort_order(gtk.SORT_DESCENDING)
         else:
-            cols[n].set_sort_order( gtk.SORT_ASCENDING )
+            cols[n].set_sort_order(gtk.SORT_ASCENDING)
         return False
 
-    def on_popup_quit( self, b, lv, w ):
+    def on_popup_quit(self, b, lv, w):
         lv.quit()
-        self.quitters.remove( lv )
+        self.quitters.remove(lv)
         w.destroy()
 
     def refresh(self):
         self.t.update_gui()
 
-    def get_menuitems( self ):
+    def get_menuitems(self):
         """Return the menu items specific to this view."""
         items = []
-        autoex_item = gtk.CheckMenuItem( 'Toggle _Auto-Expand Tree' )
-        autoex_item.set_active( self.t.autoexpand )
-        items.append( autoex_item )
-        autoex_item.connect( 'activate', self.toggle_autoexpand )
-
-        self.group_menu_item = gtk.CheckMenuItem( 'Toggle _Family Grouping' )
-        self.group_menu_item.set_active( self.t.should_group_families )
-        items.append( self.group_menu_item )
-        self.group_menu_item.connect( 'toggled', self.toggle_grouping )
+        autoex_item = gtk.CheckMenuItem('Toggle _Auto-Expand Tree')
+        autoex_item.set_active(self.t.autoexpand)
+        items.append(autoex_item)
+        autoex_item.connect('activate', self.toggle_autoexpand)
+
+        self.group_menu_item = gtk.CheckMenuItem('Toggle _Family Grouping')
+        self.group_menu_item.set_active(self.t.should_group_families)
+        items.append(self.group_menu_item)
+        self.group_menu_item.connect('toggled', self.toggle_grouping)
         return items
 
-    def _set_tooltip( self, widget, tip_text ):
-        # Convenience function to add hover over text to a widget.
+    def _set_tooltip(self, widget, tip_text):
+        """Convenience function to add hover over text to a widget."""
         tip = gtk.Tooltips()
         tip.enable()
-        tip.set_tip( widget, tip_text )
+        tip.set_tip(widget, tip_text)
 
     def _set_cell_text_time(self, column, cell, model, iter_, n):
         """Remove the date part if it matches the last update date."""
         date_time_string = model.get_value(iter_, n)
         if "T" in self.updater.dt:
             last_update_date = self.updater.dt.split("T")[0]
-            date_time_string = date_time_string.replace(last_update_date
-                                                        + "T", "", 1)
+            date_time_string = date_time_string.replace(
+                last_update_date + "T", "", 1)
         if n == 8:
-            return cell.set_property("markup", date_time_string)
-        else:
-            return cell.set_property("text", date_time_string)
-
-    def get_toolitems( self ):
+            # Progress bar for estimated completion time.
+            if isinstance(cell, gtk.CellRendererText):
+                if date_time_string.endswith("?"):
+                    # Task running -show progress bar instead.
+                    cell.set_property('visible', False)
+                else:
+                    # Task not running - just show text
+                    cell.set_property('visible', True)
+                    cell.set_property('text', date_time_string)
+            if isinstance(cell, gtk.CellRendererProgress):
+                if date_time_string.endswith("?"):
+                    # Task running -show progress bar to estimated finish time.
+                    cell.set_property('visible', True)
+                    percent = model.get_value(iter_, 12)
+                    cell.set_property('value', percent)
+                else:
+                    # Task not running - show text cell instead.
+                    cell.set_property('visible', False)
+                    cell.set_property('value', 0)
+        cell.set_property("text", date_time_string)
+
+    def get_toolitems(self):
         """Return the tool bar items specific to this view."""
         items = []
 
         expand_button = gtk.ToolButton()
-        image = gtk.image_new_from_stock( gtk.STOCK_ADD, gtk.ICON_SIZE_SMALL_TOOLBAR )
-        expand_button.set_icon_widget( image )
-        expand_button.set_label( "Expand" )
-        self._set_tooltip( expand_button, "Tree View - Expand all" )
-        expand_button.connect( 'clicked', lambda x: self.ttreeview.expand_all() )
-        items.append( expand_button )
+        image = gtk.image_new_from_stock(
+            gtk.STOCK_ADD, gtk.ICON_SIZE_SMALL_TOOLBAR)
+        expand_button.set_icon_widget(image)
+        expand_button.set_label("Expand")
+        self._set_tooltip(expand_button, "Tree View - Expand all")
+        expand_button.connect('clicked', lambda x: self.ttreeview.expand_all())
+        items.append(expand_button)
 
         collapse_button = gtk.ToolButton()
-        image = gtk.image_new_from_stock( gtk.STOCK_REMOVE, gtk.ICON_SIZE_SMALL_TOOLBAR )
-        collapse_button.set_icon_widget( image )
-        collapse_button.set_label( "Collapse" )
-        collapse_button.connect( 'clicked', lambda x: self.ttreeview.collapse_all() )
-        self._set_tooltip( collapse_button, "Tree View - Collapse all" )
-        items.append( collapse_button )
+        image = gtk.image_new_from_stock(
+            gtk.STOCK_REMOVE, gtk.ICON_SIZE_SMALL_TOOLBAR)
+        collapse_button.set_icon_widget(image)
+        collapse_button.set_label("Collapse")
+        collapse_button.connect(
+            'clicked', lambda x: self.ttreeview.collapse_all())
+        self._set_tooltip(collapse_button, "Tree View - Collapse all")
+        items.append(collapse_button)
 
         self.group_toolbutton = gtk.ToggleToolButton()
-        self.group_toolbutton.set_active( self.t.should_group_families )
-        g_image = gtk.image_new_from_stock( 'group', gtk.ICON_SIZE_SMALL_TOOLBAR )
-        self.group_toolbutton.set_icon_widget( g_image )
-        self.group_toolbutton.set_label( "Group" )
-        self.group_toolbutton.connect( 'toggled', self.toggle_grouping )
-        self._set_tooltip( self.group_toolbutton, "Tree View - Click to group tasks by families" )
-        items.append( self.group_toolbutton )
+        self.group_toolbutton.set_active(self.t.should_group_families)
+        g_image = gtk.image_new_from_stock(
+            'group', gtk.ICON_SIZE_SMALL_TOOLBAR)
+        self.group_toolbutton.set_icon_widget(g_image)
+        self.group_toolbutton.set_label("Group")
+        self.group_toolbutton.connect('toggled', self.toggle_grouping)
+        self._set_tooltip(
+            self.group_toolbutton,
+            "Tree View - Click to group tasks by families")
+        items.append(self.group_toolbutton)
 
         return items
 
-class StandaloneControlTreeApp( ControlTree ):
-    def __init__(self, suite, owner, host, port ):
+
+class StandaloneControlTreeApp(ControlTree):
+    def __init__(self, suite, owner, host, port):
         gobject.threads_init()
-        ControlTree.__init__(self, suite, owner, host, port )
+        ControlTree.__init__(self, suite, owner, host, port)
 
-    def quit_gcapture( self ):
+    def quit_gcapture(self):
         for gwindow in self.gcapture_windows:
             if not gwindow.quit_already:
-                gwindow.quit( None, None )
+                gwindow.quit(None, None)
 
     def delete_event(self, widget, event, data=None):
         self.quit_gcapture()
-        ControlTree.delete_event( self, widget, event, data )
+        ControlTree.delete_event(self, widget, event, data)
         gtk.main_quit()
 
-    def click_exit( self, foo ):
+    def click_exit(self, foo):
         self.quit_gcapture()
-        ControlTree.click_exit( self, foo )
+        ControlTree.click_exit(self, foo)
         gtk.main_quit()
diff --git a/lib/cylc/gui/warning_dialog.py b/lib/cylc/gui/warning_dialog.py
index 52ba067..1f5674a 100644
--- a/lib/cylc/gui/warning_dialog.py
+++ b/lib/cylc/gui/warning_dialog.py
@@ -18,39 +18,41 @@
 
 import gtk
 import pygtk
-####pygtk.require('2.0')
 from util import get_icon
 
+
 class warning_dialog(object):
-    def __init__( self, msg, parent=None ):
-        self.dialog = gtk.MessageDialog( parent,
-                gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_WARNING,
-                gtk.BUTTONS_CLOSE, msg )
+    def __init__(self, msg, parent=None):
+        self.dialog = gtk.MessageDialog(
+            parent, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_WARNING,
+            gtk.BUTTONS_CLOSE, msg)
         self.dialog.set_icon(get_icon())
 
-    def warn( self ):
+    def warn(self):
         self.dialog.run()
         self.dialog.destroy()
 
+
 class info_dialog(object):
-    def __init__( self, msg, parent=None ):
-        self.dialog = gtk.MessageDialog( parent,
-                gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
-                gtk.BUTTONS_OK, msg )
+    def __init__(self, msg, parent=None):
+        self.dialog = gtk.MessageDialog(
+            parent, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO,
+            gtk.BUTTONS_OK, msg)
         self.dialog.set_icon(get_icon())
 
-    def inform( self ):
+    def inform(self):
         self.dialog.run()
         self.dialog.destroy()
 
+
 class question_dialog(object):
-    def __init__( self, msg, parent=None ):
-        self.dialog = gtk.MessageDialog( parent,
-                gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION,
-                gtk.BUTTONS_YES_NO, msg )
+    def __init__(self, msg, parent=None):
+        self.dialog = gtk.MessageDialog(
+            parent, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION,
+            gtk.BUTTONS_YES_NO, msg)
         self.dialog.set_icon(get_icon())
 
-    def ask( self ):
+    def ask(self):
         response = self.dialog.run()
         self.dialog.destroy()
         return response
diff --git a/lib/cylc/job_file.py b/lib/cylc/job_file.py
index 554f8ca..662fd08 100644
--- a/lib/cylc/job_file.py
+++ b/lib/cylc/job_file.py
@@ -24,6 +24,7 @@ import StringIO
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.task_id import TaskID
 from cylc.batch_sys_manager import BATCH_SYS_MANAGER
+from cylc.task_message import TaskMessage
 
 
 class JobFile(object):
@@ -41,12 +42,12 @@ class JobFile(object):
     def write(self, job_conf):
         """Write each job script section in turn."""
 
-        ############# !!!!!!!! WARNING !!!!!!!!!!! #####################
+        # ########### !!!!!!!! WARNING !!!!!!!!!!! #####################
         # BE EXTREMELY WARY OF CHANGING THE ORDER OF JOB SCRIPT SECTIONS
         # Users may be relying on the existing order (see for example
         # the comment below on suite bin path being required before
         # task runtime environment setup).
-        ################################################################
+        # ##############################################################
 
         # Access to cylc must be configured before user environment so
         # that cylc commands can be used in defining user environment
@@ -85,6 +86,8 @@ class JobFile(object):
         for prefix, value in [
                 ("# Suite: ", job_conf['suite name']),
                 ("# Task: ", job_conf['task id']),
+                (BATCH_SYS_MANAGER.LINE_PREFIX_JOB_LOG_DIR,
+                 os.path.dirname(job_conf['common job log path'])),
                 (BATCH_SYS_MANAGER.LINE_PREFIX_BATCH_SYS_NAME,
                  job_conf['batch system name']),
                 (BATCH_SYS_MANAGER.LINE_PREFIX_BATCH_SUBMIT_CMD_TMPL,
@@ -137,13 +140,17 @@ prelude''')
         "typeset" below instead of the more sensible but bash-specific "local".
 
         """
-        fail_signals_string = " ".join(
-            BATCH_SYS_MANAGER.get_fail_signals(job_conf))
+        args = {
+            "signals_str": " ".join(
+                BATCH_SYS_MANAGER.get_fail_signals(job_conf)),
+            "priority": TaskMessage.CRITICAL,
+            "message1": TaskMessage.FAILED,
+            "message2": TaskMessage.FAIL_MESSAGE_PREFIX}
         handle.write(r"""
 
 # TRAP ERROR SIGNALS:
 set -u # Fail when using an undefined variable
-FAIL_SIGNALS='""" + fail_signals_string + """'
+FAIL_SIGNALS='%(signals_str)s'
 TRAP_FAIL_SIGNAL() {
     typeset SIGNAL=$1
     echo "Received signal $SIGNAL" >&2
@@ -151,26 +158,27 @@ TRAP_FAIL_SIGNAL() {
     for S in ${VACATION_SIGNALS:-} $FAIL_SIGNALS; do
         trap "" $S
     done
-    if [[ -n ${CYLC_TASK_LOG_ROOT:-} ]]; then
-        {
-            echo "CYLC_JOB_EXIT=$SIGNAL"
-            date -u +'CYLC_JOB_EXIT_TIME=%FT%H:%M:%SZ'
-        } >>$CYLC_TASK_LOG_ROOT.status
+    if [[ -n "${CYLC_TASK_MESSAGE_STARTED_PID:-}" ]]; then
+        wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true
     fi
-    cylc task failed "Task job script received signal $@"
+    cylc task message -p '%(priority)s' "%(message2)s$SIGNAL" '%(message1)s'
     exit 1
 }
 for S in $FAIL_SIGNALS; do
     trap "TRAP_FAIL_SIGNAL $S" $S
 done
-unset S""")
+unset S""" % args)
 
         vacation_signal = BATCH_SYS_MANAGER.get_vacation_signal(job_conf)
         if vacation_signal:
+            args = {
+                "signals_str": vacation_signal,
+                "priority": TaskMessage.WARNING,
+                "message": TaskMessage.VACATION_MESSAGE_PREFIX}
             handle.write(r"""
 
 # TRAP VACATION SIGNALS:
-VACATION_SIGNALS='""" + vacation_signal + r"""'
+VACATION_SIGNALS='%(signals_str)s'
 TRAP_VACATION_SIGNAL() {
     typeset SIGNAL=$1
     echo "Received signal $SIGNAL" >&2
@@ -178,17 +186,17 @@ TRAP_VACATION_SIGNAL() {
     for S in $VACATION_SIGNALS $FAIL_SIGNALS; do
         trap "" $S
     done
-    if [[ -n ${CYLC_TASK_LOG_ROOT:-} && -f $CYLC_TASK_LOG_ROOT.status ]]; then
-        rm -f $CYLC_TASK_LOG_ROOT.status
+    if [[ -n "${CYLC_TASK_MESSAGE_STARTED_PID:-}" ]]; then
+        wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true
     fi
-    cylc task message -p WARNING "Task job script vacated by signal $@"
+    cylc task message -p '%(priority)s' "%(message)s$SIGNAL"
     exit 1
 }
 S=
 for S in $VACATION_SIGNALS; do
     trap "TRAP_VACATION_SIGNAL $S" $S
 done
-unset S""")
+unset S""" % args)
 
     @classmethod
     def _write_init_script(cls, handle, job_conf):
@@ -283,6 +291,7 @@ unset S""")
         handle.write("\nexport CYLC_TASK_WORK_DIR=" + task_work_dir)
         # DEPRECATED
         handle.write("\nexport CYLC_TASK_WORK_PATH=$CYLC_TASK_WORK_DIR")
+        handle.write("\nexport CYLC_JOB_PID=$$")
 
     @classmethod
     def _write_env_script(cls, handle, job_conf):
@@ -344,11 +353,11 @@ unset S""")
 
         # NOTE ON TILDE EXPANSION:
         # The code above handles the following correctly:
-        #| ~foo/bar
-        #| ~/bar
-        #| ~/filename with spaces
-        #| ~foo
-        #| ~
+        # | ~foo/bar
+        # | ~/bar
+        # | ~/filename with spaces
+        # | ~foo
+        # | ~
 
         # NOTE: the reason for separate export of user-specified
         # variables is this: inline export does not activate the
@@ -365,11 +374,8 @@ unset S""")
         handle.write(r"""
 
 # SEND TASK STARTED MESSAGE:
-{
-    echo "CYLC_JOB_PID=$$"
-    date -u +'CYLC_JOB_INIT_TIME=%FT%H:%M:%SZ'
-} >>$CYLC_TASK_LOG_ROOT.status
-cylc task started
+cylc task message '%(message)s' &
+CYLC_TASK_MESSAGE_STARTED_PID=$!
 
 # SHARE DIRECTORY CREATE:
 mkdir -p $CYLC_SUITE_SHARE_DIR || true
@@ -377,7 +383,7 @@ mkdir -p $CYLC_SUITE_SHARE_DIR || true
 # WORK DIRECTORY CREATE:
 mkdir -p $(dirname $CYLC_TASK_WORK_DIR) || true
 mkdir -p $CYLC_TASK_WORK_DIR
-cd $CYLC_TASK_WORK_DIR""")
+cd $CYLC_TASK_WORK_DIR""" % {"message": TaskMessage.STARTED})
 
     def _write_manual_environment(self, handle, job_conf):
         """Write a transferable environment for detaching tasks."""
@@ -441,7 +447,7 @@ echo""")
 echo 'JOB SCRIPT EXITING: THIS TASK HANDLES ITS OWN COMPLETION MESSAGING'
 trap '' EXIT
 
-#EOF""")
+""")
         else:
             handle.write(r"""
 
@@ -450,16 +456,19 @@ cd
 rmdir $CYLC_TASK_WORK_DIR 2>/dev/null || true
 
 # SEND TASK SUCCEEDED MESSAGE:
-{
-    echo 'CYLC_JOB_EXIT=SUCCEEDED'
-    date -u +'CYLC_JOB_EXIT_TIME=%FT%H:%M:%SZ'
-} >>$CYLC_TASK_LOG_ROOT.status
-cylc task succeeded
+wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true
+cylc task message '%(message)s'
 
 echo 'JOB SCRIPT EXITING (TASK SUCCEEDED)'
 trap '' EXIT
 
-#EOF""")
+""" % {"message": TaskMessage.SUCCEEDED})
+
+        task_name, point_string = TaskID.split(job_conf['task id'])
+        job_conf['absolute submit number']
+        handle.write("%s%s\n" % (
+            BATCH_SYS_MANAGER.LINE_PREFIX_EOF,
+            os.path.dirname(job_conf['common job log path'])))
 
 
 JOB_FILE = JobFile()
diff --git a/lib/cylc/job_host.py b/lib/cylc/job_host.py
index 13b2229..6ba99bc 100644
--- a/lib/cylc/job_host.py
+++ b/lib/cylc/job_host.py
@@ -18,7 +18,8 @@
 """Manage a remote job host."""
 
 import os
-from subprocess import check_call
+from pipes import quote
+from subprocess import Popen, PIPE
 from logging import getLogger, INFO
 import shlex
 
@@ -30,7 +31,18 @@ class RemoteJobHostInitError(Exception):
     """Cannot initialise suite run directory of remote job host."""
 
     def __str__(self):
-        return "%s: initialisation did not complete" % self.args[0]
+        user_at_host, cmd_str, ret_code, out, err = self.args
+        ret = (
+            # user_at_host
+            "%s: initialisation did not complete:\n" +
+            # command  # return code
+            "COMMAND FAILED (%d): %s\n"
+        ) % (user_at_host, ret_code, cmd_str)
+        for label, item in ("STDOUT", out), ("STDERR", err):
+            if item:
+                for line in item.splitlines(True):  # keep newline chars
+                    ret += "COMMAND %s: %s" % (label, line)
+        return ret
 
 
 class RemoteJobHostManager(object):
@@ -74,26 +86,28 @@ class RemoteJobHostManager(object):
         suite_run_py = os.path.join(suite_run_dir, "python")
         if os.path.isdir(suite_run_py):
             sources.append(suite_run_py)
-        try:
-            r_suite_run_dir = GLOBAL_CFG.get_derived_host_item(
-                suite_name, 'suite run directory', host, owner)
-            r_log_job_dir = GLOBAL_CFG.get_derived_host_item(
-                suite_name, 'suite job log directory', host, owner)
-            getLogger('main').log(INFO, 'Initialising %s:%s' % (
-                user_at_host, r_suite_run_dir))
-
-            ssh_tmpl = GLOBAL_CFG.get_host_item(
-                'remote shell template', host, owner).replace(" %s", "")
-            scp_tmpl = GLOBAL_CFG.get_host_item(
-                'remote copy template', host, owner)
-
-            cmd1 = shlex.split(ssh_tmpl) + [
-                user_at_host,
-                'mkdir -p "%s" "%s"' % (r_suite_run_dir, r_log_job_dir)]
-            cmd2 = shlex.split(scp_tmpl) + ["-r"] + sources + [
-                user_at_host + ":" + r_suite_run_dir + "/"]
-            for cmd in [cmd1, cmd2]:
-                check_call(cmd)
-        except Exception:
-            raise RemoteJobHostInitError(user_at_host)
+        r_suite_run_dir = GLOBAL_CFG.get_derived_host_item(
+            suite_name, 'suite run directory', host, owner)
+        r_log_job_dir = GLOBAL_CFG.get_derived_host_item(
+            suite_name, 'suite job log directory', host, owner)
+        getLogger('main').log(INFO, 'Initialising %s:%s' % (
+            user_at_host, r_suite_run_dir))
+
+        ssh_tmpl = GLOBAL_CFG.get_host_item(
+            'remote shell template', host, owner).replace(" %s", "")
+        scp_tmpl = GLOBAL_CFG.get_host_item(
+            'remote copy template', host, owner)
+
+        cmd1 = shlex.split(ssh_tmpl) + [
+            "-n", user_at_host,
+            'mkdir', '-p', r_suite_run_dir, r_log_job_dir]
+        cmd2 = shlex.split(scp_tmpl) + ["-r"] + sources + [
+            user_at_host + ":" + r_suite_run_dir + "/"]
+        for cmd in [cmd1, cmd2]:
+            proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
+            out, err = proc.communicate()
+            if proc.wait():
+                raise RemoteJobHostInitError(
+                    user_at_host, " ".join([quote(item) for item in cmd]),
+                    proc.returncode, out, err)
         self.initialised_hosts.append(user_at_host)
diff --git a/lib/cylc/job_logs.py b/lib/cylc/job_logs.py
deleted file mode 100644
index 9b139cc..0000000
--- a/lib/cylc/job_logs.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""Logging of output from job activities."""
-
-import os
-import logging
-from shutil import rmtree
-
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.mkdir_p import mkdir_p
-from cylc.wallclock import get_current_time_string
-
-
-class CommandLogger(object):
-    """Log daemon-invoked command output to the job log dir."""
-
-    LOGGING_PRIORITY = {
-        "INFO": logging.INFO,
-        "NORMAL": logging.INFO,
-        "WARNING": logging.WARNING,
-        "ERROR": logging.ERROR,
-        "CRITICAL": logging.CRITICAL,
-        "DEBUG": logging.DEBUG,
-    }
-
-    # Format string for single line output
-    JOB_LOG_FMT_1 = "%(timestamp)s %(mesg_type)s: %(mesg)s"
-    # Format string for multi-line output
-    JOB_LOG_FMT_M = "%(timestamp)s %(mesg_type)s:\n\n%(mesg)s\n"
-
-    @classmethod
-    def get_latest_job_log(cls, suite, task_name, task_point):
-        """Return the latest job log path on the suite host."""
-
-        suite_job_log_dir = GLOBAL_CFG.get_derived_host_item(
-            suite, "suite job log directory")
-        the_rest = os.path.join(str(task_point), task_name, "NN", "job")
-        return os.path.join(suite_job_log_dir, the_rest)
- 
-    @classmethod
-    def get_create_job_log_path(
-            cls, suite, task_name, task_point, submit_num, new_mode=False):
-        """Return a new job log path on the suite host, in two parts.
-
-        /part1/part2
-
-        * part1: the top level job log directory on the suite host.
-        * part2: the rest, which is also used on remote task hosts.
-
-        The full local job log directory is created if necessary, and its
-        parent symlinked to NN (submit number).
-
-        """
-
-        suite_job_log_dir = GLOBAL_CFG.get_derived_host_item(
-            suite, "suite job log directory")
-
-        the_rest_dir = os.path.join(
-            str(task_point), task_name, "%02d" % int(submit_num))
-        the_rest = os.path.join(the_rest_dir, "job")
-
-        local_log_dir = os.path.join(suite_job_log_dir, the_rest_dir)
-
-        if new_mode:
-            try:
-                rmtree(local_log_dir)
-            except OSError:
-                pass
-
-        mkdir_p(local_log_dir)
-        target = os.path.join(os.path.dirname(local_log_dir), "NN")
-        try:
-            os.unlink(target)
-        except OSError:
-            pass
-        try:
-            os.symlink(os.path.basename(local_log_dir), target)
-        except OSError as exc:
-            if not exc.filename:
-                exc.filename = target
-            raise exc
-        return suite_job_log_dir, the_rest
-
-    def __init__(self, suite, task_name, task_point):
-        dir_ = GLOBAL_CFG.get_derived_host_item(
-            suite, "suite job log directory")
-        self.base_path = os.path.join(dir_, str(task_point), task_name)
-        self.suite_logger = logging.getLogger("main")
-
-    def append_to_log(self, submit_num, log_type, out=None, err=None):
-        """Write new command output to the appropriate log file."""
-        sub_num = "%02d" % int(submit_num)
-        dir_ = os.path.join(self.base_path, sub_num)
-        mkdir_p(dir_)
-        job_log_handle = open(os.path.join(dir_, "job-activity.log"), "a")
-        timestamp = get_current_time_string()
-        self._write_to_log(job_log_handle, timestamp, log_type + "-OUT", out)
-        self._write_to_log(job_log_handle, timestamp, log_type + "-ERR", err)
-        job_log_handle.close()
-
-    def _write_to_log(self, job_log_handle, timestamp, mesg_type, mesg):
-        """Write message to the logs."""
-        if mesg:
-            if mesg_type.endswith("-ERR"):
-                self.suite_logger.warning(mesg)
-            else:
-                self.suite_logger.info(mesg)
-            if len(mesg.splitlines()) > 1:
-                fmt = self.JOB_LOG_FMT_M
-            else:
-                fmt = self.JOB_LOG_FMT_1
-            if not mesg.endswith("\n"):
-                mesg += "\n"
-            job_log_handle.write(fmt % {
-                "timestamp": timestamp,
-                "mesg_type": mesg_type,
-                "mesg": mesg})
diff --git a/lib/cylc/mkdir_p.py b/lib/cylc/mkdir_p.py
index cf801c7..a61f853 100644
--- a/lib/cylc/mkdir_p.py
+++ b/lib/cylc/mkdir_p.py
@@ -28,18 +28,20 @@
 # Judging from discussion on the Python dev list in 2010, this problem
 # will be fixed in Python 3.?.  For now we have to roll our own ...
 
-import os, errno
+import errno
+import os
 
-def mkdir_p( path, mode=None ):
+
+def mkdir_p(path, mode=None):
     if mode:
         # reset mode and get current value
-        old_mode = os.umask( 0 )
+        old_mode = os.umask(0)
 
     try:
         if mode:
-            os.makedirs( path, int(mode, 8) )
+            os.makedirs(path, int(mode, 8))
         else:
-            os.makedirs( path )
+            os.makedirs(path)
 
     except OSError, err:
         if err.errno != errno.EEXIST:
@@ -49,4 +51,4 @@ def mkdir_p( path, mode=None ):
             pass
 
     if mode:
-        os.umask( old_mode )
+        os.umask(old_mode)
diff --git a/lib/cylc/mp_pool.py b/lib/cylc/mp_pool.py
index 29b25ba..39158fc 100644
--- a/lib/cylc/mp_pool.py
+++ b/lib/cylc/mp_pool.py
@@ -29,90 +29,127 @@ Some notes:
   (early versions of this module gave a choice of process or thread).
 """
 
-import time
+import fileinput
 import logging
 from pipes import quote
 from subprocess import Popen, PIPE
 import multiprocessing
+from tempfile import TemporaryFile
+import time
+import traceback
 
 from cylc.batch_sys_manager import BATCH_SYS_MANAGER
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 import cylc.flags
+from cylc.wallclock import get_current_time_string
 
 
-CMD_TYPE_JOB_SUBMISSION = 0
-CMD_TYPE_JOB_POLL_KILL = 1
-CMD_TYPE_EVENT_HANDLER = 2
-
-TRUE = 1
-FALSE = 0
-JOB_SKIPPED_FLAG = 999
-
-# Shared memory flag.
-STOP_JOB_SUBMISSION = multiprocessing.Value('i', FALSE)
-
-
-def _run_command(
-        cmd_type, cmd, is_bg_submit=None, stdin_file_path=None, env=None,
-        shell=False):
+def _run_command(ctx):
     """Execute a shell command and capture its output and exit status."""
 
-    cmd_result = {'CMD': cmd, 'EXIT': None, 'OUT': None, 'ERR': None}
-
     if cylc.flags.debug:
-        if shell:
-            print cmd
+        if ctx.cmd_kwargs.get('shell'):
+            print ctx.cmd
         else:
-            print ' '.join([quote(cmd_str) for cmd_str in cmd])
+            print ' '.join([quote(cmd_str) for cmd_str in ctx.cmd])
 
-    if (STOP_JOB_SUBMISSION.value == TRUE
-            and cmd_type == CMD_TYPE_JOB_SUBMISSION):
-        cmd_result['OUT'] = "job submission skipped (suite stopping)"
-        cmd_result['EXIT'] = JOB_SKIPPED_FLAG
-        return cmd_result
+    if (SuiteProcPool.STOP_JOB_SUBMISSION.value and
+            ctx.cmd_key == SuiteProcPool.JOB_SUBMIT):
+        ctx.err = "job submission skipped (suite stopping)"
+        ctx.ret_code = SuiteProcPool.JOB_SKIPPED_FLAG
+        ctx.timestamp = get_current_time_string()
+        return ctx
 
     try:
         stdin_file = None
-        if stdin_file_path:
-            stdin_file = open(stdin_file_path)
+        if ctx.cmd_kwargs.get('stdin_file_paths'):
+            stdin_file = TemporaryFile()
+            for file_path in ctx.cmd_kwargs['stdin_file_paths']:
+                for line in open(file_path):
+                    stdin_file.write(line)
+            stdin_file.seek(0)
+        elif ctx.cmd_kwargs.get('stdin_str'):
+            stdin_file = PIPE
         proc = Popen(
-            cmd, stdin=stdin_file, stdout=PIPE, stderr=PIPE,
-            env=env, shell=shell)
-    except (IOError, OSError) as exc:
-        cmd_result['EXIT'] = 1
-        cmd_result['ERR'] = str(exc)
+            ctx.cmd, stdin=stdin_file, stdout=PIPE, stderr=PIPE,
+            env=ctx.cmd_kwargs.get('env'), shell=ctx.cmd_kwargs.get('shell'))
+    except IOError as exc:
+        if cylc.flags.debug:
+            traceback.print_exc()
+        ctx.ret_code = 1
+        ctx.err = str(exc)
+    except OSError as exc:
+        if exc.filename is None:
+            exc.filename = ctx.cmd[0]
+        if cylc.flags.debug:
+            traceback.print_exc()
+        ctx.ret_code = 1
+        ctx.err = str(exc)
     else:
-        # Does this command behave like a background job submit where:
-        # 1. The process should print its job ID to STDOUT.
-        # 2. The process should then continue in background.
-        if is_bg_submit:  # behave like background job submit?
-            # Capture just the echoed PID then move on.
-            # N.B. Some hosts print garbage to STDOUT when going through a
-            # login shell, so we want to try a few lines
-            cmd_result['EXIT'] = 0
-            cmd_result['OUT'] = ""
-            for _ in range(10):  # Try 10 lines
-                line = proc.stdout.readline()
-                cmd_result['OUT'] += line
-                if line.startswith(BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID):
-                    break
-            # Check if submission is OK or not
-            if not cmd_result['OUT'].rstrip():
-                ret_code = proc.poll()
-                if ret_code is not None:
-                    cmd_result['OUT'], cmd_result['ERR'] = proc.communicate()
-                    cmd_result['EXIT'] = ret_code
-        else:
-            cmd_result['EXIT'] = proc.wait()
-            if cmd_result['EXIT'] is not None:
-                cmd_result['OUT'], cmd_result['ERR'] = proc.communicate()
-
-    return cmd_result
+        ctx.out, ctx.err = proc.communicate(ctx.cmd_kwargs.get('stdin_str'))
+        ctx.ret_code = proc.wait()
+
+    ctx.timestamp = get_current_time_string()
+    return ctx
+
+
+class SuiteProcContext(object):
+    """Represent the context of a command to run."""
+
+    # Format string for single line output
+    JOB_LOG_FMT_1 = "%(timestamp)s [%(cmd_key)s %(attr)s] %(mesg)s"
+    # Format string for multi-line output
+    JOB_LOG_FMT_M = "%(timestamp)s [%(cmd_key)s %(attr)s]\n\n%(mesg)s\n"
+
+    def __init__(self, cmd_key, cmd, **cmd_kwargs):
+        self.timestamp = get_current_time_string()
+        self.cmd_key = cmd_key
+        self.cmd = cmd
+        self.cmd_kwargs = cmd_kwargs
+
+        self.err = cmd_kwargs.get('err')
+        self.ret_code = cmd_kwargs.get('ret_code')
+        self.out = cmd_kwargs.get('out')
+
+    def __str__(self):
+        ret = ""
+        for attr in "cmd", "ret_code", "out", "err":
+            value = getattr(self, attr, None)
+            if value is not None and str(value).strip():
+                mesg = ""
+                if attr == "cmd" and self.cmd_kwargs.get("stdin_file_paths"):
+                    mesg += "cat"
+                    for file_path in self.cmd_kwargs.get("stdin_file_paths"):
+                        mesg += " " + quote(file_path)
+                    mesg += " | "
+                if attr == "cmd" and isinstance(value, list):
+                    mesg += " ".join(quote(item) for item in value)
+                else:
+                    mesg = str(value).strip()
+                if attr == "cmd" and self.cmd_kwargs.get("stdin_str"):
+                    mesg += " <<<%s" % quote(self.cmd_kwargs.get("stdin_str"))
+                if len(mesg.splitlines()) > 1:
+                    fmt = self.JOB_LOG_FMT_M
+                else:
+                    fmt = self.JOB_LOG_FMT_1
+                if not mesg.endswith("\n"):
+                    mesg += "\n"
+                ret += fmt % {
+                    "timestamp": self.timestamp,
+                    "cmd_key": self.cmd_key,
+                    "attr": attr,
+                    "mesg": mesg}
+        return ret
 
 
 class SuiteProcPool(object):
     """Use a process pool to execute shell commands."""
 
+    JOB_SUBMIT = "job-submit"
+    JOB_SKIPPED_FLAG = 999
+    # Shared memory flag.
+    STOP_JOB_SUBMISSION = multiprocessing.Value('i', 0)
+
     _INSTANCE = None
 
     @classmethod
@@ -138,41 +175,34 @@ class SuiteProcPool(object):
         self.log.debug(
             "Initializing process pool, size %d" % self.pool_size)
         self.pool = multiprocessing.Pool(processes=self.pool_size)
-        self.unhandled_results = []
+        self.results = {}
 
-    def put_command(
-            self, cmd_type, cmd, callback, is_bg_submit=False,
-            stdin_file_path=None, env=None, shell=False):
+    def put_command(self, ctx, callback):
         """Queue a new shell command to execute."""
         try:
-            result = self.pool.apply_async(
-                _run_command,
-                (cmd_type, cmd, is_bg_submit, stdin_file_path, env, shell))
+            result = self.pool.apply_async(_run_command, [ctx])
         except AssertionError as exc:
             self.log.warning("%s\n  %s\n %s" % (
                 str(exc),
                 "Rejecting command (pool closed)",
-                cmd))
+                ctx.cmd))
         else:
-            if callback:
-                self.unhandled_results.append((result, callback))
+            self.results[id(result)] = (result, callback)
 
     def handle_results_async(self):
         """Pass any available results to their associated callback."""
-        still_to_do = []
-        for item in self.unhandled_results:
-            res, callback = item
-            if res.ready():
-                val = res.get()
-                callback(val)
-            else:
-                still_to_do.append((res, callback))
-        self.unhandled_results = still_to_do
+        for result_id, item in self.results.items():
+            result, callback = item
+            if result.ready():
+                self.results.pop(result_id)
+                value = result.get()
+                if callable(callback):
+                    callback(value)
 
     @classmethod
     def stop_job_submission(cls):
         """Set STOP_JOB_SUBMISSION flag."""
-        STOP_JOB_SUBMISSION.value = TRUE
+        cls.STOP_JOB_SUBMISSION.value = 1
 
     def close(self):
         """Close the pool to new commands."""
@@ -226,13 +256,13 @@ def main():
 
     for i in range(3):
         com = "sleep 5 && echo Hello from JOB " + str(i)
-        pool.put_command(CMD_TYPE_JOB_SUBMISSION, com, print_result)
+        pool.put_command(SuiteProcPool.JOB_SUBMIT, com, print_result)
         com = "sleep 5 && echo Hello from POLL " + str(i)
-        pool.put_command(CMD_TYPE_JOB_POLL_KILL, com, print_result)
+        pool.put_command("poll", com, print_result)
         com = "sleep 5 && echo Hello from HANDLER " + str(i)
-        pool.put_command(CMD_TYPE_EVENT_HANDLER, com, print_result)
+        pool.put_command("event-handler", com, print_result)
         com = "sleep 5 && echo Hello from HANDLER && badcommand"
-        pool.put_command(CMD_TYPE_EVENT_HANDLER, com, print_result)
+        pool.put_command("event-handler", com, print_result)
 
     log.info('  sleeping')
     time.sleep(3)
@@ -240,7 +270,7 @@ def main():
     log.info('  sleeping')
     time.sleep(3)
     pool.close()
-    #pool.terminate()
+    # pool.terminate()
     pool.handle_results_async()
     log.info('  sleeping')
     time.sleep(3)
diff --git a/lib/cylc/multisubprocess.py b/lib/cylc/multisubprocess.py
index 3b71e7d..5910b24 100644
--- a/lib/cylc/multisubprocess.py
+++ b/lib/cylc/multisubprocess.py
@@ -21,15 +21,17 @@ from cylc.wallclock import get_current_time_string
 
 
 class multisubprocess:
-    def __init__( self, commandlist, shell=True ):
+    def __init__(self, commandlist, shell=True):
         self.shell = shell
         self.commandlist = commandlist
 
-    def execute( self ):
+    def execute(self):
         procs = []
         for command in self.commandlist:
-            proc = subprocess.Popen( command, shell=self.shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
-            procs.append( proc )
+            proc = subprocess.Popen(
+                command, shell=self.shell,
+                stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            procs.append(proc)
 
         out = []
         err = []
@@ -38,21 +40,24 @@ class multisubprocess:
             out.append(o)
             err.append(e)
 
-        return ( out, err )
+        return (out, err)
+
 
 if __name__ == "__main__":
     commands = []
-    for i in range(1,5):
+    for i in range(1, 5):
         if i == 4:
-            command = "echoX hello from " + str(i) + "; sleep 10; echo bye from " + str(i)
+            command = ("echoX hello from " + str(i) +
+                       "; sleep 10; echo bye from " + str(i))
         else:
-            command = "echo hello from " + str(i) + "; sleep 10; echo bye from " + str(i)
+            command = ("echo hello from " + str(i) +
+                       "; sleep 10; echo bye from " + str(i))
 
-        commands.append( command )
+        commands.append(command)
 
     print 'SRT:', get_current_time_string(display_sub_seconds=True)
 
-    mp = multisubprocess( commands )
+    mp = multisubprocess(commands)
     out, err = mp.execute()
 
     count = 1
diff --git a/lib/cylc/network/__init__.py b/lib/cylc/network/__init__.py
new file mode 100644
index 0000000..fdec509
--- /dev/null
+++ b/lib/cylc/network/__init__.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""Package for network interfaces to cylc suite server objects."""
+
+import threading
+from logging import getLogger
+
+# Names for network-connected objects.
+# WARNING: these names are don't have consistent formatting, but changing them
+# will break backward compatibility with older cylc clients!
+PYRO_SUITEID_OBJ_NAME = 'cylcid'
+PYRO_EXT_TRIG_OBJ_NAME = 'ext-trigger-interface'
+PYRO_BCAST_OBJ_NAME = 'broadcast_receiver'
+PYRO_CMD_OBJ_NAME = 'command-interface'
+PYRO_INFO_OBJ_NAME = 'suite-info'
+PYRO_LOG_OBJ_NAME = 'log'
+PYRO_STATE_OBJ_NAME = 'state_summary'
+
+# Ordered privilege levels for authenticated users.
+PRIVILEGE_LEVELS = [
+    "identity",
+    "description",
+    "state-totals",
+    "full-read",
+    "shutdown",  # (Not used yet - for the post-passhprase era.)
+    "full-control"
+]
+
+CONNECT_DENIED_PRIV_TMPL = (
+    "[client-connect] DENIED (privilege '%s' < '%s') %s@%s:%s %s")
+
+# Dummy passphrase for client access from users without the suite passphrase.
+NO_PASSPHRASE = 'the quick brown fox'
+
+
+def access_priv_ok(server_obj, required_privilege_level):
+    """Return True if a client is allowed access to info from server_obj.
+
+    The required privilege level is compared to the level granted to the
+    client by the connection validator (held in thread local storage).
+
+    """
+    if threading.current_thread().__class__.__name__ == '_MainThread':
+        # Server methods may be called internally as well as by clients.
+        return True
+    caller = server_obj.getLocalStorage().caller
+    client_privilege_level = caller.privilege_level
+    return (PRIVILEGE_LEVELS.index(client_privilege_level) >=
+            PRIVILEGE_LEVELS.index(required_privilege_level))
+
+
+def check_access_priv(server_obj, required_privilege_level):
+    """Raise an exception if client privilege is insufficient for server_obj.
+
+    (See the documentation above for the boolean version of this function).
+
+    """
+    if threading.current_thread().__class__.__name__ == '_MainThread':
+        # Server methods may be called internally as well as by clients.
+        return
+    caller = server_obj.getLocalStorage().caller
+    client_privilege_level = caller.privilege_level
+    if not (PRIVILEGE_LEVELS.index(client_privilege_level) >=
+            PRIVILEGE_LEVELS.index(required_privilege_level)):
+        err = CONNECT_DENIED_PRIV_TMPL % (
+            client_privilege_level, required_privilege_level,
+            caller.user, caller.host, caller.prog_name, caller.uuid)
+        getLogger("main").warn(err)
+        # Raise an exception to be sent back to the client.
+        raise Exception(err)
diff --git a/lib/cylc/network/client_reporter.py b/lib/cylc/network/client_reporter.py
new file mode 100644
index 0000000..41f6318
--- /dev/null
+++ b/lib/cylc/network/client_reporter.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import datetime
+import threading
+import time
+import cylc.flags
+
+
+class PyroClientReporter(object):
+    """For logging cylc client requests with identifying information."""
+
+    _INSTANCE = None
+    CLIENT_FORGET_SEC = 60
+    CLIENT_ID_MIN_REPORT_RATE = 1.0  # 1 Hz
+    CLIENT_ID_REPORT_SECONDS = 3600  # Report every 1 hour.
+    LOG_COMMAND_TMPL = '[client-command] %s %s@%s:%s %s'
+    LOG_IDENTIFY_TMPL = '[client-identify] %d id requests in PT%dS'
+    LOG_SIGNOUT_TMPL = '[client-sign-out] %s@%s:%s %s'
+    LOG_FORGET_TMPL = '[client-forget] %s'
+
+    @classmethod
+    def get_inst(cls):
+        """Return a singleton instance."""
+        if cls._INSTANCE is None:
+            cls._INSTANCE = cls()
+        return cls._INSTANCE
+
+    def __init__(self):
+        self.clients = {}  # {uuid: time-of-last-connect}
+        self._id_start_time = time.time()  # Start of id requests measurement.
+        self._num_id_requests = 0  # Number of client id requests.
+
+    def report(self, request, server_obj):
+        """Log client requests with identifying information.
+
+        In debug mode log all requests including task messages. Otherwise log
+        all user commands, and just the first info request from each client.
+
+        """
+        if threading.current_thread().__class__.__name__ == '_MainThread':
+            # Server methods may be called internally as well as by clients.
+            return
+        name = server_obj.__class__.__name__
+        caller = server_obj.getLocalStorage().caller
+        log_me = (
+            cylc.flags.debug or
+            name in ["SuiteCommandServer",
+                     "ExtTriggerServer",
+                     "BroadcastServer"] or
+            (name not in ["SuiteIdServer", "TaskMessageServer"] and
+             caller.uuid not in self.clients))
+        if log_me:
+            logging.getLogger("main").info(
+                self.__class__.LOG_COMMAND_TMPL % (
+                    request, caller.user, caller.host, caller.prog_name,
+                    caller.uuid))
+        if name == "SuiteIdServer":
+            self._num_id_requests += 1
+            self.report_id_requests()
+        self.clients[caller.uuid] = datetime.datetime.utcnow()
+        self._housekeep()
+
+    def report_id_requests(self):
+        """Report the frequency of identification (scan) requests."""
+        current_time = time.time()
+        interval = current_time - self._id_start_time
+        if interval > self.CLIENT_ID_REPORT_SECONDS:
+            rate = float(self._num_id_requests) / interval
+            if rate > self.CLIENT_ID_MIN_REPORT_RATE:
+                logging.getLogger("main").warning(
+                    self.__class__.LOG_IDENTIFY_TMPL % (
+                        self._num_id_requests, interval)
+                )
+            elif cylc.flags.debug:
+                logging.getLogger("main").info(
+                    self.__class__.LOG_IDENTIFY_TMPL % (
+                        self._num_id_requests, interval)
+                )
+            self._id_start_time = current_time
+            self._num_id_requests = 0
+
+    def signout(self, server_obj):
+        """Force forget this client (for use by GUI etc.)."""
+
+        caller = server_obj.getLocalStorage().caller
+        logging.getLogger("main").info(
+            self.__class__.LOG_SIGNOUT_TMPL % (
+                caller.user, caller.host, caller.prog_name, caller.uuid))
+        try:
+            del self.clients[caller.uuid]
+        except:
+            # Already forgotten.
+            pass
+        self._housekeep()
+
+    def _housekeep(self):
+        """Forget inactive clients."""
+
+        for uuid in self.clients.keys():
+            dtime = self.clients[uuid]
+            if (self._total_seconds(datetime.datetime.utcnow() - dtime) >
+                    self.__class__.CLIENT_FORGET_SEC):
+                del self.clients[uuid]
+                logging.getLogger("main").debug(
+                    self.__class__.LOG_FORGET_TMPL % uuid)
+
+    def _total_seconds(self, td):
+        """Return total seconds as a datetime.timedelta object.
+
+        For back compat - timedelta.total_seconds() in Pyton >= 2.7.
+
+        """
+        return (td.microseconds + (
+                td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6
diff --git a/lib/cylc/network/connection_validator.py b/lib/cylc/network/connection_validator.py
new file mode 100644
index 0000000..5ef0443
--- /dev/null
+++ b/lib/cylc/network/connection_validator.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+try:
+    import Pyro.core
+except ImportError, x:
+    raise SystemExit("ERROR: Pyro is not installed")
+import logging
+import os
+import sys
+
+from Pyro.protocol import DefaultConnValidator
+import Pyro.constants
+import Pyro.errors
+import hmac
+try:
+    import hashlib
+    md5 = hashlib.md5
+except ImportError:
+    import md5
+    md5 = md5.md5
+
+from cylc.network import NO_PASSPHRASE, PRIVILEGE_LEVELS
+from cylc.config import SuiteConfig
+from cylc.suite_host import is_remote_host
+from cylc.owner import user, host
+
+
+# Access for users without the suite passphrase: encrypting the "no passphrase"
+# passphrase is unnecessary, but doing so allows common passphrase handling.
+NO_PASSPHRASE_MD5 = md5(NO_PASSPHRASE).hexdigest()
+
+CONNECT_DENIED_TMPL = "[client-connect] DENIED %s@%s:%s %s"
+CONNECT_ALLOWED_TMPL = "[client-connect] %s@%s:%s privilege='%s' %s"
+
+
+class ConnValidator(DefaultConnValidator):
+    """Custom Pyro connection validator for user authentication."""
+
+    def set_pphrase(self, pphrase):
+        """Store encrypted suite passphrase (called by the server)."""
+        self.pphrase = md5(pphrase).hexdigest()
+
+    def acceptIdentification(self, daemon, connection, token, challenge):
+        """Authorize client login."""
+
+        logger = logging.getLogger('main')
+        is_old_client = False
+        # Processes the token returned by createAuthToken.
+        try:
+            user, host, uuid, prog_name, proc_passwd = token.split(':', 4)
+        except ValueError as exc:
+            # Back compat for old suite client (passphrase only)
+            # (Allows old scan to see new suites.)
+            proc_passwd = token
+            is_old_client = True
+            user = "(user)"
+            host = "(host)"
+            uuid = "(uuid)"
+            prog_name = "(OLD_CLIENT)"
+
+        # Check username and password, and set privilege level accordingly.
+        # The auth token has a binary hash that needs conversion to ASCII.
+        if hmac.new(challenge,
+                    self.pphrase.decode("hex")).digest() == proc_passwd:
+            # The client has the suite passphrase.
+            # Access granted at highest privilege level.
+            priv_level = PRIVILEGE_LEVELS[-1]
+        elif (hmac.new(
+                challenge,
+                NO_PASSPHRASE_MD5.decode("hex")).digest() == proc_passwd):
+            # The client does not have the suite passphrase.
+            # Public access granted at level determined by global/suite config.
+            config = SuiteConfig.get_inst()
+            priv_level = config.cfg['cylc']['authentication']['public']
+        else:
+            # Access denied.
+            if not is_old_client:
+                # Avoid logging large numbers of denials from old scan clients
+                # that try all passphrases available to them.
+                logger.warn(CONNECT_DENIED_TMPL % (
+                    user, host, prog_name, uuid))
+            return (0, Pyro.constants.DENIED_SECURITY)
+
+        # Store client details for use in the connection thread.
+        connection.user = user
+        connection.host = host
+        connection.prog_name = prog_name
+        connection.uuid = uuid
+        connection.privilege_level = priv_level
+        logger.debug(CONNECT_ALLOWED_TMPL % (
+                     user, host, prog_name, priv_level, uuid))
+        return (1, 0)
+
+    def createAuthToken(self, authid, challenge, peeraddr, URI, daemon):
+        """Return a secure auth token based on the server challenge string.
+
+        Argument authid is what's returned by mungeIdent().
+
+        """
+        return ":".join(
+            list(authid[:4]) + [hmac.new(challenge, authid[4]).digest()])
+
+    def mungeIdent(self, ident):
+        """Receive (uuid, passphrase) from client. Encrypt the passphrase.
+
+        Also pass client identification info to server for logging:
+        (user, host, prog name).
+
+        """
+        uuid, passphrase = ident
+        prog_name = os.path.basename(sys.argv[0])
+        if passphrase is None:
+            passphrase = NO_PASSPHRASE
+        return (user, host, str(uuid), prog_name, md5(passphrase).digest())
diff --git a/lib/cylc/network/ext_trigger.py b/lib/cylc/network/ext_trigger.py
new file mode 100644
index 0000000..4f7bb75
--- /dev/null
+++ b/lib/cylc/network/ext_trigger.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+from time import sleep
+from Queue import Queue, Empty
+import Pyro.errors
+import cylc.flags
+from cylc.network import PYRO_EXT_TRIG_OBJ_NAME
+from cylc.network.pyro_base import PyroClient, PyroServer
+from cylc.network.suite_broadcast import BroadcastServer
+from cylc.network import check_access_priv
+from cylc.task_id import TaskID
+
+
+class ExtTriggerServer(PyroServer):
+    """Server-side external trigger interface."""
+
+    _INSTANCE = None
+
+    @classmethod
+    def get_inst(cls):
+        """Return a singleton instance."""
+        if cls._INSTANCE is None:
+            cls._INSTANCE = cls()
+        return cls._INSTANCE
+
+    def __init__(self):
+        super(ExtTriggerServer, self).__init__()
+        self.queue = Queue()
+
+    def put(self, event_message, event_id):
+        """Server-side external event trigger interface."""
+
+        check_access_priv(self, 'full-control')
+        self.report("ext_trigger")
+        self.queue.put((event_message, event_id))
+        return (True, 'event queued')
+
+    def retrieve(self, itask):
+        """Match external triggers for a waiting task proxy."""
+
+        # Note this has to allow multiple same-message triggers to be queued
+        # and only used one at a time.
+
+        if self.queue.empty():
+            return
+        if len(itask.external_triggers) == 0:
+            return
+        bcast = BroadcastServer.get_inst()
+        queued = []
+        while True:
+            try:
+                queued.append(self.queue.get_nowait())
+            except Empty:
+                break
+        used = []
+        for trig, satisfied in itask.external_triggers.items():
+            if satisfied:
+                continue
+            for qmsg, qid in queued:
+                if trig == qmsg:
+                    # Matched.
+                    name, point_string = TaskID.split(itask.identity)
+                    # Set trigger satisfied.
+                    itask.external_triggers[trig] = True
+                    cylc.flags.pflag = True
+                    # Broadcast the event ID to the cycle point.
+                    if qid is not None:
+                        bcast.put(
+                            [point_string],
+                            ["root"],
+                            [{
+                                'environment': {
+                                    'CYLC_EXT_TRIGGER_ID': qid
+                                }
+                            }]
+                        )
+                    used.append((qmsg, qid))
+                    break
+        for q in queued:
+            if q not in used:
+                self.queue.put(q)
+
+
+class ExtTriggerClient(PyroClient):
+    """Client-side external trigger interface."""
+
+    target_server_object = PYRO_EXT_TRIG_OBJ_NAME
+
+    MAX_N_TRIES = 5
+    RETRY_INTVL_SECS = 10.0
+
+    MSG_SEND_FAILED = "Send message: try %s of %s failed"
+    MSG_SEND_RETRY = "Retrying in %s seconds, timeout is %s"
+    MSG_SEND_SUCCEED = "Send message: try %s of %s succeeded"
+
+    def put(self, *args):
+        return self.call_server_func("put", *args)
+
+    def send_retry(self, event_message, event_id,
+                   max_n_tries, retry_intvl_secs):
+        """CLI external trigger interface."""
+
+        max_n_tries = int(max_n_tries or self.__class__.MAX_N_TRIES)
+        retry_intvl_secs = float(
+            retry_intvl_secs or self.__class__.RETRY_INTVL_SECS)
+
+        sent = False
+        i_try = 0
+        while not sent and i_try < max_n_tries:
+            i_try += 1
+            try:
+                self.put(event_message, event_id)
+            except Pyro.errors.NamingError as exc:
+                print >> sys.stderr, exc
+                print self.__class__.MSG_SEND_FAILED % (
+                    i_try,
+                    max_n_tries,
+                )
+                break
+            except Exception as exc:
+                print >> sys.stderr, exc
+                print self.__class__.MSG_SEND_FAILED % (
+                    i_try,
+                    max_n_tries,
+                )
+                if i_try >= max_n_tries:
+                    break
+                print self.__class__.MSG_SEND_RETRY % (
+                    retry_intvl_secs,
+                    self.pyro_timeout
+                )
+                sleep(retry_intvl_secs)
+            else:
+                if i_try > 1:
+                    print self.__class__.MSG_SEND_SUCCEEDED % (
+                        i_try,
+                        max_n_tries
+                    )
+                sent = True
+                break
+        if not sent:
+            sys.exit('ERROR: send failed')
+        return sent
diff --git a/lib/cylc/network/port_file.py b/lib/cylc/network/port_file.py
new file mode 100644
index 0000000..95f5223
--- /dev/null
+++ b/lib/cylc/network/port_file.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+from cylc.suite_host import is_remote_host
+from cylc.owner import user, is_remote_user
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+import cylc.flags
+
+"""At start-up the suite port number is written to ~/.cylc/ports/SUITE.
+
+Task messaging commands get the suite port number from $CYLC_SUITE_PORT.
+Other commands get the port number of the target suite from the port file.
+"""
+
+
+class PortFileError(Exception):
+
+    def __init__(self, msg):
+        self.msg = msg
+
+    def __str__(self):
+        return repr(self.msg)
+
+
+class PortFileExistsError(PortFileError):
+    pass
+
+
+class PortFile(object):
+    """Write, remember, and unlink a suite port file on localhost.
+
+    """
+    def __init__(self, suite, port):
+        self.suite = suite
+        # The ports directory is assumed to exist.
+        pdir = GLOBAL_CFG.get(['pyro', 'ports directory'])
+        self.local_path = os.path.join(pdir, suite)
+        try:
+            self.port = str(int(port))
+        except ValueError, x:
+            print >> sys.stderr, x
+            raise PortFileError("ERROR, illegal port number: %s" % port)
+        self.write()
+
+    def write(self):
+        if os.path.exists(self.local_path):
+            raise PortFileExistsError(
+                "ERROR, port file exists: %s" % self.local_path)
+        try:
+            f = open(self.local_path, 'w')
+        except OSError:
+            raise PortFileError(
+                "ERROR, failed to open port file: %s " % self.port)
+        f.write(self.port)
+        f.close()
+
+    def unlink(self):
+        try:
+            os.unlink(self.local_path)
+        except OSError as exc:
+            print >> sys.stderr, str(exc)
+            raise PortFileError(
+                "ERROR, failed to remove port file: %s" % self.local_path)
+
+
+class PortRetriever(object):
+    """Retrieve a suite port number from a port file (local or remote).
+
+    """
+    def __init__(self, suite, host, owner):
+        self.suite = suite
+        self.host = host
+        self.owner = owner
+        self.locn = None
+        self.local_path = os.path.join(
+            GLOBAL_CFG.get(['pyro', 'ports directory']), suite)
+
+    def get_local(self):
+        self.locn = self.local_path
+        if not os.path.exists(self.local_path):
+            raise PortFileError("Port file not found - suite not running?.")
+        f = open(self.local_path, 'r')
+        str_port = f.readline().rstrip('\n')
+        f.close()
+        return str_port
+
+    def get_remote(self):
+        import subprocess
+        target = self.owner + '@' + self.host
+        remote_path = self.local_path.replace(os.environ['HOME'], '$HOME')
+        self.locn = target + ':' + remote_path
+        ssh = subprocess.Popen(
+            ['ssh', '-oBatchMode=yes', target, 'cat', remote_path],
+            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+        str_port = ssh.stdout.readline().rstrip('\n')
+        err = ssh.stderr.readline()
+        res = ssh.wait()
+        if err:
+            print >> sys.stderr, err.rstrip('\n')
+        if res != 0:
+            raise PortFileError("Port file not found - suite not running?.")
+        return str_port
+
+    def get(self):
+        if is_remote_host(self.host) or is_remote_user(self.owner):
+            str_port = self.get_remote()
+        else:
+            str_port = self.get_local()
+        try:
+            port = int(str_port)
+        except ValueError, x:
+            # This also catches an empty port file (touch).
+            print >> sys.stderr, x
+            print >> sys.stderr, "ERROR: bad port file", self.locn
+            raise PortFileError(
+                "ERROR, illegal port file content: %s" % str_port)
+        if cylc.flags.verbose:
+            print "Suite port is", port
+        return port
diff --git a/lib/cylc/network/port_scan.py b/lib/cylc/network/port_scan.py
new file mode 100644
index 0000000..ae6ff89
--- /dev/null
+++ b/lib/cylc/network/port_scan.py
@@ -0,0 +1,170 @@
+#!/usr/bin/pyro
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+import Pyro.errors
+import Pyro.core
+
+import cylc.flags
+from cylc.owner import user
+from cylc.suite_host import get_hostname
+from cylc.registration import localdb
+from cylc.passphrase import passphrase, get_passphrase, PassphraseError
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.network import PYRO_SUITEID_OBJ_NAME, NO_PASSPHRASE
+from cylc.network.connection_validator import ConnValidator
+
+passphrases = []
+
+
+def load_passphrases(db):
+    """Load all of the user's passphrases (back-compat for <= 6.4.1)."""
+    global passphrases
+    if passphrases:
+        return passphrases
+
+    # Find passphrases in all registered suite directories.
+    reg = localdb(db)
+    reg_suites = reg.get_list()
+    for item in reg_suites:
+        rg = item[0]
+        di = item[1]
+        try:
+            p = passphrase(rg, user, get_hostname()).get(suitedir=di)
+        except Exception, x:
+            # Suite has no passphrase.
+            if cylc.flags.debug:
+                print >> sys.stderr, x
+        else:
+            passphrases.append(p)
+
+    # Find all passphrases installed under $HOME/.cylc/
+    for root, dirs, files in os.walk(
+            os.path.join(os.environ['HOME'], '.cylc')):
+        if 'passphrase' in files:
+            pfile = os.path.join(root, 'passphrase')
+            lines = []
+            try:
+                with open(pfile, 'r') as pf:
+                    pphrase = pf.readline()
+                passphrases.append(pphrase.strip())
+            except:
+                pass
+    return passphrases
+
+
+def get_proxy(host, port, pyro_timeout):
+    proxy = Pyro.core.getProxyForURI(
+        'PYROLOC://%s:%s/%s' % (
+            host, port, PYRO_SUITEID_OBJ_NAME))
+    proxy._setTimeout(pyro_timeout)
+    return proxy
+
+
+def scan(host=get_hostname(), db=None, pyro_timeout=None, owner=user):
+    """Scan ports, return a list of suites found: [(port, suite.identify())].
+
+    Note that we could easily scan for a given suite+owner and return its
+    port instead of reading port files, but this may not always be fast enough.
+    """
+    base_port = GLOBAL_CFG.get(['pyro', 'base port'])
+    last_port = base_port + GLOBAL_CFG.get(['pyro', 'maximum number of ports'])
+    if pyro_timeout:
+        pyro_timeout = float(pyro_timeout)
+    else:
+        pyro_timeout = None
+
+    results = []
+    for port in range(base_port, last_port):
+        try:
+            proxy = get_proxy(host, port, pyro_timeout)
+            proxy._setNewConnectionValidator(ConnValidator())
+            proxy._setIdentification((user, NO_PASSPHRASE))
+            result = (port, proxy.identify())
+        except Pyro.errors.ConnectionDeniedError as exc:
+            if cylc.flags.debug:
+                print '%s:%s (connection denied)' % (host, port)
+            # Back-compat <= 6.4.1
+            msg = '  Old daemon at %s:%s?' % (host, port)
+            for pphrase in load_passphrases(db):
+                try:
+                    proxy = get_proxy(host, port, pyro_timeout)
+                    proxy._setIdentification(pphrase)
+                    info = proxy.id()
+                    result = (port, {'name': info[0], 'owner': info[1]})
+                except Pyro.errors.ConnectionDeniedError:
+                    connected = False
+                else:
+                    connected = True
+                    break
+            if not connected:
+                if cylc.flags.verbose:
+                    print >> sys.stderr, msg, "- connection denied (%s)" % exc
+                continue
+            else:
+                if cylc.flags.verbose:
+                    print >> sys.stderr, msg, "- connected with passphrase"
+        except (Pyro.errors.ProtocolError, Pyro.errors.NamingError) as exc:
+            # No suite at this port.
+            if cylc.flags.debug:
+                print str(exc)
+                print '%s:%s (no suite)' % (host, port)
+            continue
+        except Pyro.errors.TimeoutError as exc:
+            # E.g. Ctrl-Z suspended suite - holds up port scanning!
+            if cylc.flags.debug:
+                print '%s:%s (connection timed out)' % (host, port)
+            print >> sys.stderr, (
+                'suite? owner?@%s:%s - connection timed out (%s)' % (
+                    host, port, exc))
+        except Exception as exc:
+            if cylc.flags.debug:
+                print str(exc)
+                break
+            else:
+                print >> sys.stderr, str(exc)
+        else:
+            name = result[1].get('name')
+            owner = result[1].get('owner')
+            states = result[1].get('states', None)
+            if cylc.flags.debug:
+                print '   suite:', name, owner
+            if states is None:
+                # This suite keeps its state info private.
+                # Try again with the passphrase if I have it.
+                try:
+                    pphrase = get_passphrase(name, owner, host, localdb(db))
+                except PassphraseError:
+                    if cylc.flags.debug:
+                        print '    (no passphrase)'
+                else:
+                    try:
+                        proxy = get_proxy(host, port, pyro_timeout)
+                        proxy._setNewConnectionValidator(ConnValidator())
+                        proxy._setIdentification((user, pphrase))
+                        result = (port, proxy.identify())
+                    except Exception:
+                        # Nope (private suite, wrong passphrase).
+                        if cylc.flags.debug:
+                            print '    (wrong passphrase)'
+                    else:
+                        if cylc.flags.debug:
+                            print '    (got states with passphrase)'
+        results.append(result)
+    return results
diff --git a/lib/cylc/network/pyro_base.py b/lib/cylc/network/pyro_base.py
new file mode 100644
index 0000000..e3aea92
--- /dev/null
+++ b/lib/cylc/network/pyro_base.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import sys
+from uuid import uuid4
+
+try:
+    import Pyro.core
+    import Pyro.errors
+except ImportError, x:
+    raise SystemExit("ERROR: Pyro is not installed")
+
+import cylc.flags
+from cylc.owner import user, host, user_at_host
+from cylc.passphrase import get_passphrase, PassphraseError
+from cylc.registration import localdb
+from cylc.network.port_file import PortRetriever
+from cylc.network.connection_validator import ConnValidator
+from cylc.network.client_reporter import PyroClientReporter
+
+
+class PyroServer(Pyro.core.ObjBase):
+    """Base class for server-side suite object interfaces."""
+
+    def __init__(self):
+        Pyro.core.ObjBase.__init__(self)
+        self.client_reporter = PyroClientReporter.get_inst()
+
+    def signout(self):
+        self.client_reporter.signout(self)
+
+    def report(self, command):
+        self.client_reporter.report(command, self)
+
+
+class PyroClient(object):
+    """Base class for client-side suite object interfaces."""
+
+    target_server_object = None
+
+    def __init__(self, suite, owner=user, host=host, pyro_timeout=None,
+                 port=None, db=None, my_uuid=None, print_uuid=False):
+        self.suite = suite
+        self.host = host
+        self.owner = owner
+        if pyro_timeout is not None:
+            pyro_timeout = float(pyro_timeout)
+        self.pyro_timeout = pyro_timeout
+        self.hard_port = port
+        self.pyro_proxy = None
+        self.my_uuid = my_uuid or uuid4()
+        if print_uuid:
+            print >> sys.stderr, '%s' % self.my_uuid
+        try:
+            self.pphrase = get_passphrase(suite, owner, host, localdb(db))
+        except PassphraseError:
+            # No passphrase: public access client.
+            self.pphrase = None
+
+    def call_server_func(self, fname, *fargs):
+        """Call server_object.fname(*fargs)
+
+        Get a Pyro proxy for the server object if we don't already have it,
+        and handle back compat retry for older daemons.
+
+        """
+        self._get_proxy()
+        func = getattr(self.pyro_proxy, fname)
+        try:
+            return func(*fargs)
+        except Pyro.errors.ConnectionDeniedError:
+            # Back compat for daemons <= 6.4.1: passphrase-only auth.
+            if cylc.flags.debug:
+                print >> sys.stderr, "Old daemon? - trying passphrases."
+            self.pyro_proxy = None
+            self._get_proxy_old()
+            func = getattr(self.pyro_proxy, fname)
+            return func(*fargs)
+
+    def _set_uri(self):
+        # Find the suite port number (fails if port file not found)
+        port = (self.hard_port or
+                PortRetriever(self.suite, self.host, self.owner).get())
+        # Qualify the obj name with user and suite name (unnecessary but
+        # can't change it until we break back-compat with older daemons).
+        name = "%s.%s.%s" % (self.owner, self.suite,
+                             self.__class__.target_server_object)
+        self.uri = "PYROLOC://%s:%s/%s" % (self.host, str(port), name)
+
+    def _get_proxy_common(self):
+        if self.pyro_proxy is None:
+            self._set_uri()
+            # Fails only for unknown hosts (no connection till RPC call).
+            self.pyro_proxy = Pyro.core.getProxyForURI(self.uri)
+            self.pyro_proxy._setTimeout(self.pyro_timeout)
+
+    def _get_proxy(self):
+        self._get_proxy_common()
+        self.pyro_proxy._setNewConnectionValidator(ConnValidator())
+        self.pyro_proxy._setIdentification((self.my_uuid, self.pphrase))
+
+    def _get_proxy_old(self):
+        """Back compat: passphrase-only daemons (<= 6.4.1)."""
+        self._get_proxy_common()
+        self.pyro_proxy._setIdentification(self.pphrase)
+
+    def reset(self):
+        self.pyro_proxy = None
+
+    def signout(self):
+        """Multi-connect clients should call this on exit."""
+        try:
+            self._get_proxy()
+            try:
+                self.pyro_proxy.signout()
+            except AttributeError:
+                # Back-compat for pre client reporting daemons <= 6.4.1.
+                pass
+        except Exception:
+            # Suite may have stopped before the client exits.
+            pass
diff --git a/lib/cylc/cylc_pyro_server.py b/lib/cylc/network/pyro_daemon.py
similarity index 50%
rename from lib/cylc/cylc_pyro_server.py
rename to lib/cylc/network/pyro_daemon.py
index 567609b..f83f45c 100644
--- a/lib/cylc/cylc_pyro_server.py
+++ b/lib/cylc/network/pyro_daemon.py
@@ -20,35 +20,40 @@ import sys
 import socket
 try:
     import Pyro
-except ImportError, x:
-    print >> sys.stderr, x
-    sys.exit( "ERROR: Pyro is not installed" )
-from passphrase import passphrase
-from suite_host import get_hostname
-from owner import user
+except ImportError:
+    sys.exit("ERROR: Pyro is not installed")
 
-class pyro_server( object ):
-    def __init__( self, suite, suitedir, base_port, max_n_ports, user=user ):
+from cylc.owner import user
+from cylc.network.connection_validator import ConnValidator
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
 
-        self.suite = suite
-        self.owner = user
 
-        # SINGLE THREADED PYRO
+class PyroDaemon(object):
+    def __init__(self, suite):
+
         Pyro.config.PYRO_MULTITHREADED = 1
-        # USE DNS NAMES INSTEAD OF FIXED IP ADDRESSES FROM /etc/hosts
+        # Use dns names instead of fixed ip addresses from /etc/hosts
         # (see the Userguide "Networking Issues" section).
         Pyro.config.PYRO_DNS_URI = True
 
-        # base (lowest allowed) Pyro socket number
-        Pyro.config.PYRO_PORT = base_port
-        # max number of sockets starting at base
-        Pyro.config.PYRO_PORT_RANGE = max_n_ports
+        # Base Pyro socket number.
+        Pyro.config.PYRO_PORT = GLOBAL_CFG.get(['pyro', 'base port'])
+        # Max number of sockets starting at base.
+        Pyro.config.PYRO_PORT_RANGE = GLOBAL_CFG.get(
+            ['pyro', 'maximum number of ports'])
 
         Pyro.core.initServer()
+        self.daemon = None
+        # Suite only needed for back-compat with old clients (see below):
+        self.suite = suite
+
+    def set_auth(self, passphrase):
         self.daemon = Pyro.core.Daemon()
-        self.daemon.setAllowedIdentifications( [passphrase(suite,user,get_hostname()).get(suitedir=suitedir)] )
+        cval = ConnValidator()
+        cval.set_pphrase(passphrase)
+        self.daemon.setNewConnectionValidator(cval)
 
-    def shutdown( self ):
+    def shutdown(self):
         self.daemon.shutdown(True)
         # If a suite shuts down via 'stop --now' or # Ctrl-C, etc.,
         # any existing client end connections will hang for a long time
@@ -56,22 +61,22 @@ class pyro_server( object ):
         # presumably) which daemon.shutdown() does not do (why not?):
 
         try:
-            self.daemon.sock.shutdown( socket.SHUT_RDWR )
+            self.daemon.sock.shutdown(socket.SHUT_RDWR)
         except socket.error, x:
             print >> sys.stderr, x
 
-    def connect( self, obj, name, qualified=True ):
-        if qualified:
-            qname = self.owner + '.' + self.suite + '.' + name
-        else:
-            qname = name
-        uri = self.daemon.connect( obj, qname )
+    def connect(self, obj, name):
+        if not obj.__class__.__name__ == 'SuiteIdServer':
+            # Qualify the obj name with user and suite name (unnecessary but
+            # can't change it until we break back-compat with older daemons).
+            name = "%s.%s.%s" % (user, self.suite, name)
+        uri = self.daemon.connect(obj, name)
 
-    def disconnect( self, obj ):
-        self.daemon.disconnect( obj )
+    def disconnect(self, obj):
+        self.daemon.disconnect(obj)
 
-    def handleRequests( self, timeout=None ):
-        self.daemon.handleRequests( timeout )
+    def handleRequests(self, timeout=None):
+        self.daemon.handleRequests(timeout)
 
-    def get_port( self ):
+    def get_port(self):
         return self.daemon.port
diff --git a/lib/cylc/broadcast.py b/lib/cylc/network/suite_broadcast.py
similarity index 51%
rename from lib/cylc/broadcast.py
rename to lib/cylc/network/suite_broadcast.py
index cb7de27..f0c552d 100644
--- a/lib/cylc/broadcast.py
+++ b/lib/cylc/network/suite_broadcast.py
@@ -15,36 +15,61 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""Handle broadcast from clients."""
 
-import Pyro.core
+import sys
 import logging
 import cPickle as pickle
+import threading
+
 from cylc.broadcast_report import (
-    get_broadcast_change_report, get_broadcast_bad_options_report)
-from cylc.task_id import TaskID
+    get_broadcast_change_iter,
+    get_broadcast_change_report,
+    get_broadcast_bad_options_report)
 from cylc.cycling.loader import get_point, standardise_point_string
-from cylc.rundb import RecordBroadcastObject
 from cylc.wallclock import get_current_time_string
+from cylc.network import PYRO_BCAST_OBJ_NAME
+from cylc.network.pyro_base import PyroClient, PyroServer
+from cylc.network import check_access_priv
+from cylc.task_id import TaskID
+from cylc.rundb import CylcSuiteDAO
 
 
-class Broadcast(Pyro.core.ObjBase):
-    """Receive broadcast variables from cylc clients.
+class BroadcastServer(PyroServer):
+    """Server-side suite broadcast interface.
 
     Examples:
     self.settings['*']['root'] = {'environment': {'FOO': 'bar'}}
     self.settings['20100808T06Z']['root'] = {'command scripting': 'stuff'}
     """
 
+    _INSTANCE = None
     ALL_CYCLE_POINTS_STRS = ["*", "all-cycle-points", "all-cycles"]
+    TABLE_BROADCAST_EVENTS = CylcSuiteDAO.TABLE_BROADCAST_EVENTS
+    TABLE_BROADCAST_STATES = CylcSuiteDAO.TABLE_BROADCAST_STATES
+
+    @classmethod
+    def get_inst(cls, linearized_ancestors=None):
+        """Return a singleton instance.
+
+        On 1st call, instantiate the singleton.
+        Argument linearized_ancestors is only relevant on 1st call.
+
+        """
+        if cls._INSTANCE is None:
+            cls._INSTANCE = cls(linearized_ancestors)
+        return cls._INSTANCE
 
     def __init__(self, linearized_ancestors):
+        super(BroadcastServer, self).__init__()
         self.log = logging.getLogger('main')
         self.settings = {}
-        self.prev_dump = self._get_dump()
-        self.settings_queue = []
+        self.db_inserts_map = {
+            self.TABLE_BROADCAST_EVENTS: [],
+            self.TABLE_BROADCAST_STATES: []}
+        self.db_deletes_map = {
+            self.TABLE_BROADCAST_STATES: []}
         self.linearized_ancestors = linearized_ancestors
-        Pyro.core.ObjBase.__init__(self)
+        self.lock = threading.RLock()
 
     def _prune(self):
         """Remove empty leaves left by unsetting broadcast values.
@@ -56,21 +81,22 @@ class Broadcast(Pyro.core.ObjBase):
             ["20020202", "bar", "environment", "BAR"],
         ]
         """
-        prunes = []
-        stuff_stack = [([], self.settings, True)]
-        while stuff_stack:
-            keys, stuff, is_new = stuff_stack.pop()
-            if is_new:
-                stuff_stack.append((keys, stuff, False))
-                for key, value in stuff.items():
-                    if isinstance(value, dict):
-                        stuff_stack.append((keys + [key], value, True))
-            else:
-                for key, value in stuff.items():
-                    if value in [None, {}]:
-                        del stuff[key]
-                        prunes.append(keys + [key])
-        return prunes
+        with self.lock:
+            prunes = []
+            stuff_stack = [([], self.settings, True)]
+            while stuff_stack:
+                keys, stuff, is_new = stuff_stack.pop()
+                if is_new:
+                    stuff_stack.append((keys, stuff, False))
+                    for key, value in stuff.items():
+                        if isinstance(value, dict):
+                            stuff_stack.append((keys + [key], value, True))
+                else:
+                    for key, value in stuff.items():
+                        if value in [None, {}]:
+                            del stuff[key]
+                            prunes.append(keys + [key])
+            return prunes
 
     def _addict(self, target, source):
         """Recursively add source dict to target dict."""
@@ -83,42 +109,46 @@ class Broadcast(Pyro.core.ObjBase):
                 target[key] = source[key]
 
     def put(self, point_strings, namespaces, settings):
-        """Add new broadcast settings.
+        """Add new broadcast settings (server side interface).
 
         Return a tuple (modified_settings, bad_options) where:
           modified_settings is list of modified settings in the form:
             [("20200202", "foo", {"command scripting": "true"}, ...]
           bad_options is as described in the docstring for self.clear().
         """
+        check_access_priv(self, 'full-control')
+        self.report('broadcast_put')
         modified_settings = []
         bad_point_strings = []
         bad_namespaces = []
-        
-        for setting in settings:
-            for point_string in point_strings:
-                # Standardise the point and check its validity.
-                bad_point = False
-                try:
-                    point_string = standardise_point_string(point_string)
-                except Exception as exc:
-                    if point_string != '*':
-                        bad_point_strings.append(point_string)
-                        bad_point = True
-                if not bad_point and point_string not in self.settings:
-                    self.settings[point_string] = {}
-                for namespace in namespaces:
-                    if namespace not in self.linearized_ancestors:
-                        bad_namespaces.append(namespace)
-                    elif not bad_point:
-                        if namespace not in self.settings[point_string]:
-                            self.settings[point_string][namespace] = {}
-                        self._addict(
-                            self.settings[point_string][namespace], setting)
-                        modified_settings.append(
-                            (point_string, namespace, setting))
+
+        with self.lock:
+            for setting in settings:
+                for point_string in point_strings:
+                    # Standardise the point and check its validity.
+                    bad_point = False
+                    try:
+                        point_string = standardise_point_string(point_string)
+                    except Exception as exc:
+                        if point_string != '*':
+                            bad_point_strings.append(point_string)
+                            bad_point = True
+                    if not bad_point and point_string not in self.settings:
+                        self.settings[point_string] = {}
+                    for namespace in namespaces:
+                        if namespace not in self.linearized_ancestors:
+                            bad_namespaces.append(namespace)
+                        elif not bad_point:
+                            if namespace not in self.settings[point_string]:
+                                self.settings[point_string][namespace] = {}
+                            self._addict(
+                                self.settings[point_string][namespace],
+                                setting)
+                            modified_settings.append(
+                                (point_string, namespace, setting))
 
         # Log the broadcast
-        self._update_db_queue()
+        self._append_db_queue(modified_settings)
         self.log.info(get_broadcast_change_report(modified_settings))
 
         bad_options = {}
@@ -130,6 +160,8 @@ class Broadcast(Pyro.core.ObjBase):
 
     def get(self, task_id=None):
         """Retrieve all broadcast variables that target a given task ID."""
+        check_access_priv(self, 'full-read')
+        self.report('broadcast_get')
         if not task_id:
             # all broadcast settings requested
             return self.settings
@@ -153,11 +185,12 @@ class Broadcast(Pyro.core.ObjBase):
         cutoff_point = None
         if cutoff is not None:
             cutoff_point = get_point(str(cutoff))
-        for point_string in self.settings:
-            if cutoff_point is None or (
-                    point_string not in self.ALL_CYCLE_POINTS_STRS and
-                    get_point(point_string) < cutoff_point):
-                point_strings.append(point_string)
+        with self.lock:
+            for point_string in self.settings:
+                if cutoff_point is None or (
+                        point_string not in self.ALL_CYCLE_POINTS_STRS and
+                        get_point(point_string) < cutoff_point):
+                    point_strings.append(point_string)
         if not point_strings:
             return (None, {"expire": [cutoff]})
         return self.clear(point_strings=point_strings)
@@ -182,33 +215,35 @@ class Broadcast(Pyro.core.ObjBase):
 
         # Clear settings
         modified_settings = []
-        for point_string, point_string_settings in self.settings.items():
-            if point_strings and point_string not in point_strings:
-                continue
-            for namespace, namespace_settings in point_string_settings.items():
-                if namespaces and namespace not in namespaces:
+        with self.lock:
+            for point_string, point_string_settings in self.settings.items():
+                if point_strings and point_string not in point_strings:
                     continue
-                stuff_stack = [([], namespace_settings)]
-                while stuff_stack:
-                    keys, stuff = stuff_stack.pop()
-                    for key, value in stuff.items():
-                        if isinstance(value, dict):
-                            stuff_stack.append((keys + [key], value))
-                        elif (not cancel_keys_list or
-                                keys + [key] in cancel_keys_list):
-                            stuff[key] = None
-                            setting = {key: value}
-                            for rkey in reversed(keys):
-                                setting = {rkey: setting}
-                            modified_settings.append(
-                                (point_string, namespace, setting))
+                for namespace, namespace_settings in (
+                        point_string_settings.items()):
+                    if namespaces and namespace not in namespaces:
+                        continue
+                    stuff_stack = [([], namespace_settings)]
+                    while stuff_stack:
+                        keys, stuff = stuff_stack.pop()
+                        for key, value in stuff.items():
+                            if isinstance(value, dict):
+                                stuff_stack.append((keys + [key], value))
+                            elif (not cancel_keys_list or
+                                    keys + [key] in cancel_keys_list):
+                                stuff[key] = None
+                                setting = {key: value}
+                                for rkey in reversed(keys):
+                                    setting = {rkey: setting}
+                                modified_settings.append(
+                                    (point_string, namespace, setting))
 
         # Prune any empty branches
         bad_options = self._get_bad_options(
             self._prune(), point_strings, namespaces, cancel_keys_list)
 
         # Log the broadcast
-        self._update_db_queue()
+        self._append_db_queue(modified_settings, is_cancel=True)
         self.log.info(
             get_broadcast_change_report(modified_settings, is_cancel=True))
         if bad_options:
@@ -242,22 +277,43 @@ class Broadcast(Pyro.core.ObjBase):
 
     def dump(self, file_):
         """Write broadcast variables to the state dump file."""
-        pickle.dump(self.settings, file_)
-        file_.write("\n")
-
-    def get_db_ops(self):
-        """Return the next DB operations from DB queue."""
-        ops = self.settings_queue
-        self.settings_queue = []
-        return ops
+        with self.lock:
+            pickle.dump(self.settings, file_)
+            file_.write("\n")
 
     def load(self, pickled_settings):
         """Load broadcast variables from the state dump file."""
-        self.settings = pickle.loads(pickled_settings)
+        with self.lock:
+            self.settings = pickle.loads(pickled_settings)
+
+            # Ensure database table is in sync
+            modified_settings = []
+            for point_string, point_string_settings in self.settings.items():
+                for namespace, namespace_settings in (
+                        point_string_settings.items()):
+                    stuff_stack = [([], namespace_settings)]
+                    while stuff_stack:
+                        keys, stuff = stuff_stack.pop()
+                        for key, value in stuff.items():
+                            if isinstance(value, dict):
+                                stuff_stack.append((keys + [key], value))
+                            else:
+                                setting = {key: value}
+                                for rkey in reversed(keys):
+                                    setting = {rkey: setting}
+                                modified_settings.append(
+                                    (point_string, namespace, setting))
+        for broadcast_change in get_broadcast_change_iter(modified_settings):
+            self.db_inserts_map[self.TABLE_BROADCAST_STATES].append({
+                "point": broadcast_change["point"],
+                "namespace": broadcast_change["namespace"],
+                "key": broadcast_change["key"],
+                "value": broadcast_change["value"]})
 
     def _get_dump(self):
         """Return broadcast variables as written to the state dump file."""
-        return pickle.dumps(self.settings) + "\n"
+        with self.lock:
+            return pickle.dumps(self.settings) + "\n"
 
     @classmethod
     def _get_bad_options(
@@ -304,10 +360,43 @@ class Broadcast(Pyro.core.ObjBase):
         return (list(cancel_keys) in
                 [prune[2:] for prune in prunes if prune[2:]])
 
-    def _update_db_queue(self):
+    def _append_db_queue(self, modified_settings, is_cancel=False):
         """Update the queue to the runtime DB."""
-        this_dump = self._get_dump()
-        if this_dump != self.prev_dump:
-            now = get_current_time_string(display_sub_seconds=True)
-            self.settings_queue.append(RecordBroadcastObject(now, this_dump))
-            self.prev_dump = this_dump
+        now = get_current_time_string(display_sub_seconds=True)
+        for broadcast_change in (
+                get_broadcast_change_iter(modified_settings, is_cancel)):
+            broadcast_change["time"] = now
+            self.db_inserts_map[self.TABLE_BROADCAST_EVENTS].append(
+                broadcast_change)
+            if is_cancel:
+                self.db_deletes_map[self.TABLE_BROADCAST_STATES].append({
+                    "point": broadcast_change["point"],
+                    "namespace": broadcast_change["namespace"],
+                    "key": broadcast_change["key"]})
+                # Delete statements are currently executed before insert
+                # statements, so we should clear out any insert statements that
+                # are deleted here.
+                # (Not the most efficient logic here, but unless we have a
+                # large number of inserts, then this should not be a big
+                # concern.)
+                inserts = []
+                for insert in self.db_inserts_map[self.TABLE_BROADCAST_STATES]:
+                    if any([insert[key] != broadcast_change[key] for key in
+                            ["point", "namespace", "key"]]):
+                        inserts.append(insert)
+                self.db_inserts_map[self.TABLE_BROADCAST_STATES] = inserts
+            else:
+                self.db_inserts_map[self.TABLE_BROADCAST_STATES].append({
+                    "point": broadcast_change["point"],
+                    "namespace": broadcast_change["namespace"],
+                    "key": broadcast_change["key"],
+                    "value": broadcast_change["value"]})
+
+
+class BroadcastClient(PyroClient):
+    """Client-side suite broadcast interface."""
+
+    target_server_object = PYRO_BCAST_OBJ_NAME
+
+    def broadcast(self, cmd, *args):
+        return self.call_server_func(cmd, *args)
diff --git a/lib/cylc/network/suite_command.py b/lib/cylc/network/suite_command.py
new file mode 100644
index 0000000..00296d6
--- /dev/null
+++ b/lib/cylc/network/suite_command.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+import os
+from Queue import Queue
+
+import cylc.flags
+from cylc.network import PYRO_CMD_OBJ_NAME
+from cylc.network.pyro_base import PyroClient, PyroServer
+from cylc.network import check_access_priv
+
+# Back-compat for older suite daemons <= 6.4.1.
+back_compat = {
+    'set_stop_cleanly': 'stop cleanly',
+    'stop_now': 'stop now',
+    'set_stop_after_point': 'stop after point',
+    'set_stop_after_clock_time': 'stop after clock time',
+    'set_stop_after_task': 'stop after task',
+    'release_suite': 'release suite',
+    'release_task': 'release task',
+    'remove_cycle': 'remove cycle',
+    'remove_task': 'remove task',
+    'hold_suite': 'hold suite now',
+    'hold_after_point_string': 'hold suite after',
+    'hold_task': 'hold task now',
+    'set_runahead': 'set runahead',
+    'set_verbosity': 'set verbosity',
+    'purge_tree': 'purge tree',
+    'reset_task_state': 'reset task state',
+    'trigger_task': 'trigger task',
+    'dry_run_task': 'dry run task',
+    'nudge': 'nudge suite',
+    'insert_task': 'insert task',
+    'reload_suite': 'reload suite',
+    'add_prerequisite': 'add prerequisite',
+    'poll_tasks': 'poll tasks',
+    'kill_tasks': 'kill tasks'
+}
+
+
+class SuiteCommandServer(PyroServer):
+    """Server-side suite command interface."""
+
+    def __init__(self):
+        super(SuiteCommandServer, self).__init__()
+        self.queue = Queue()
+
+    def put(self, command, *command_args):
+        if 'stop' in command:
+            check_access_priv(self, 'shutdown')
+        else:
+            check_access_priv(self, 'full-control')
+        self.report(command)
+        self.queue.put((command, command_args))
+        return (True, 'Command queued')
+
+    def get_queue(self):
+        return self.queue
+
+
+class SuiteCommandClient(PyroClient):
+    """Client-side suite command interface."""
+
+    target_server_object = PYRO_CMD_OBJ_NAME
+
+    def put_command(self, *args):
+        success, msg = self.call_server_func("put", *args)
+        if msg.startswith('ERROR: Illegal command:'):
+            # Back-compat for older suite daemons <= 6.4.1.
+            command = back_compat[args[0]]
+            args = tuple([command]) + args[1:]
+            success, msg = self.call_server_func("put", *args)
+        return (success, msg)
diff --git a/lib/cylc/network/suite_identifier.py b/lib/cylc/network/suite_identifier.py
new file mode 100644
index 0000000..85923c2
--- /dev/null
+++ b/lib/cylc/network/suite_identifier.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# A minimal Pyro-connected object to allow client programs to identify
+# what suite is running at a given cylc port - by suite name and owner.
+
+# All *other* suite objects should be connected to Pyro via qualified
+# names: owner.suite.object, to prevent accidental access to the wrong
+# suite. This object, however, should be connected unqualified so that
+# that same ID method can be called on any active cylc port.
+
+import cylc.flags
+from cylc.network.pyro_base import PyroServer
+from cylc.network.suite_state import StateSummaryServer
+from cylc.network import access_priv_ok
+from cylc.config import SuiteConfig
+
+
+class SuiteIdServer(PyroServer):
+    """Server-side external trigger interface."""
+
+    _INSTANCE = None
+
+    @classmethod
+    def get_inst(cls, name=None, owner=None):
+        """Return a singleton instance."""
+        if cls._INSTANCE is None:
+            cls._INSTANCE = cls(name, owner)
+        return cls._INSTANCE
+
+    def __init__(self, name, owner):
+        self.owner = owner
+        self.name = name
+        super(SuiteIdServer, self).__init__()
+
+    def identify(self):
+        self.report("identify")
+        result = {}
+        if access_priv_ok(self, "identity"):
+            result['name'] = self.name
+            result['owner'] = self.owner
+        if access_priv_ok(self, "description"):
+            config = SuiteConfig.get_inst()
+            result['title'] = config.cfg['title']
+            result['description'] = config.cfg['description']
+        if access_priv_ok(self, "state-totals"):
+            result['states'] = StateSummaryServer.get_inst().get_state_totals()
+            result['update-time'] = (
+                StateSummaryServer.get_inst().get_summary_update_time())
+        return result
+
+    def id(self):
+        # Back-compat for older clients <=6.4.1.
+        # (Allows old scan to see new suites.)
+        return (self.name, self.owner)
diff --git a/lib/cylc/network/suite_info.py b/lib/cylc/network/suite_info.py
new file mode 100644
index 0000000..9ec50aa
--- /dev/null
+++ b/lib/cylc/network/suite_info.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+
+import cylc.flags
+from cylc.network import PYRO_INFO_OBJ_NAME
+from cylc.network.pyro_base import PyroClient, PyroServer
+from cylc.network import check_access_priv
+
+
+# Back-compat for older suite daemons <= 6.4.1.
+back_compat = {
+    'ping_suite': 'ping suite',
+    'ping_task': 'ping task',
+    'get_suite_info': 'suite info',
+    'get_task_info': 'task info',
+    'get_all_families': 'all families',
+    'get_triggering_families': 'triggering families',
+    'get_first_parent_ancestors': 'first-parent ancestors',
+    'get_first_parent_descendants': 'first-parent descendants',
+    'get_graph_raw': 'graph raw',
+    'get_task_requisites': 'task requisites',
+    'get_cylc_version': 'get cylc version',
+    'get_task_jobfile_path': 'task job file path'
+}
+
+
+class SuiteInfoServer(PyroServer):
+    """Server-side suite information interface."""
+
+    def __init__(self, info_commands):
+        super(SuiteInfoServer, self).__init__()
+        self.commands = info_commands
+
+    def get(self, command, *command_args):
+        if ('ping' in command or 'version' in command):
+            # Free info.
+            pass
+        elif 'suite' in command and 'info' in command:
+            # Suite title and description only.
+            check_access_priv(self, 'description')
+        else:
+            check_access_priv(self, 'full-read')
+        self.report(command)
+        return self.commands[command](*command_args)
+
+
+class SuiteInfoClient(PyroClient):
+    """Client-side suite information interface."""
+
+    target_server_object = PYRO_INFO_OBJ_NAME
+
+    def get_info(self, *args):
+        try:
+            return self.call_server_func("get", *args)
+        except KeyError:
+            # Back-compat for older suite daemons <= 6.4.1.
+            command = back_compat[args[0]]
+            args = tuple([command]) + args[1:]
+            return self.call_server_func("get", *args)
diff --git a/lib/cylc/suite_log_interface.py b/lib/cylc/network/suite_log.py
similarity index 68%
rename from lib/cylc/suite_log_interface.py
rename to lib/cylc/network/suite_log.py
index a30bd4e..bbad803 100644
--- a/lib/cylc/suite_log_interface.py
+++ b/lib/cylc/network/suite_log.py
@@ -16,25 +16,27 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import Pyro.core
 import os
+from cylc.network import PYRO_LOG_OBJ_NAME
+from cylc.network.pyro_base import PyroClient, PyroServer
+from cylc.network import check_access_priv
 
 
-class log_interface(Pyro.core.ObjBase):
-
-    """Implement an interface to the log files."""
+class SuiteLogServer(PyroServer):
+    """Server-side suite log interface."""
 
     def __init__(self, log):
-        Pyro.core.ObjBase.__init__(self)
+        super(SuiteLogServer, self).__init__()
         self.log = log
         self.err_file = log.get_err_path()
 
-    def get_err_has_changed(self, prev_err_size):
+    def _get_err_has_changed(self, prev_err_size):
         """Return True if the file has changed size compared to prev_size."""
-        return self.get_err_size() != prev_err_size
+        return self._get_err_size() != prev_err_size
 
-    def get_err_size(self):
+    def _get_err_size(self):
         """Return the os.path.getsize result for the error file."""
+
         try:
             size = os.path.getsize(self.err_file)
         except (IOError, OSError) as e:
@@ -44,16 +46,28 @@ class log_interface(Pyro.core.ObjBase):
 
     def get_err_content(self, prev_size=0, max_lines=100):
         """Return the content and new size of the error file."""
-        if not self.get_err_has_changed(prev_size):
+
+        check_access_priv(self, 'full-read')
+        self.report("get_err_content")
+        if not self._get_err_has_changed(prev_size):
             return [], prev_size
         try:
             f = open(self.err_file, "r")
             f.seek(prev_size)
             new_content = f.read()
             f.close()
-            size = self.get_err_size()
+            size = self._get_err_size()
         except (IOError, OSError) as e:
             self.log.warning("Could not read suite err log file: %s" % e)
             return "", prev_size
         new_content_lines = new_content.splitlines()[-max_lines:]
         return "\n".join(new_content_lines), size
+
+
+class SuiteLogClient(PyroClient):
+    """Client-side suite log interface."""
+
+    target_server_object = PYRO_LOG_OBJ_NAME
+
+    def get_err_content(self, *args):
+        return self.call_server_func("get_err_content", *args)
diff --git a/lib/cylc/state_summary.py b/lib/cylc/network/suite_state.py
similarity index 51%
rename from lib/cylc/state_summary.py
rename to lib/cylc/network/suite_state.py
index 35133eb..d4b313e 100644
--- a/lib/cylc/state_summary.py
+++ b/lib/cylc/network/suite_state.py
@@ -16,37 +16,55 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import Pyro.core
-import logging
-from cylc.task_id import TaskID
 import time
-from datetime import datetime
-import flags
-from wallclock import TIME_ZONE_LOCAL_INFO, TIME_ZONE_UTC_INFO
+import datetime
+
+import cylc.flags
+from cylc.task_id import TaskID
+from cylc.wallclock import TIME_ZONE_LOCAL_INFO, TIME_ZONE_UTC_INFO
+from cylc.config import SuiteConfig
+from cylc.network import PYRO_STATE_OBJ_NAME
+from cylc.network.pyro_base import PyroClient, PyroServer
+from cylc.network import check_access_priv
+
+
+class SuiteStillInitialisingError(Exception):
+    """Exception raised if a summary is requested before the first update.
+
+    This can happen if client connects during start-up for large suites.
 
+    """
+    def __str__(self):
+        return "Suite initializing..."
 
 
-class state_summary( Pyro.core.ObjBase ):
-    """supply suite state summary information to remote cylc clients."""
+class StateSummaryServer(PyroServer):
+    """Server-side suite state summary interface."""
 
-    def __init__( self, config, run_mode, start_time ):
-        Pyro.core.ObjBase.__init__(self)
+    _INSTANCE = None
+
+    @classmethod
+    def get_inst(cls, run_mode=None):
+        """Return a singleton instance."""
+        if cls._INSTANCE is None:
+            cls._INSTANCE = cls(run_mode)
+        return cls._INSTANCE
+
+    def __init__(self, run_mode):
+        super(StateSummaryServer, self).__init__()
         self.task_summary = {}
         self.global_summary = {}
-        self.task_name_list = []
         self.family_summary = {}
-        # external monitors should access config via methods in this
-        # class, in case config items are ever updated dynamically by
-        # remote control
-        self.config = config
         self.run_mode = run_mode
-        self.start_time = start_time
+        self.first_update_completed = False
         self._summary_update_time = None
 
-    def update( self, tasks, tasks_rh, min_point, max_point, max_point_rh, paused,
-            will_pause_at, stopping, will_stop_at, ns_defn_order ):
+        self.state_count_totals = {}
+        self.state_count_cycles = {}
 
-        task_name_list = []
+    def update(self, tasks, tasks_rh, min_point, max_point, max_point_rh,
+               paused, will_pause_at, stopping, will_stop_at, ns_defn_order,
+               reloading):
         task_summary = {}
         global_summary = {}
         family_summary = {}
@@ -64,11 +82,8 @@ class state_summary( Pyro.core.ObjBase ):
                 task_states.setdefault(point_string, {})
                 task_states[point_string][name] = (
                     task_summary[task.identity]['state'])
-                task_name_list.append(name)
             fs = 'runahead'
 
-        task_name_list = list(set(task_name_list))
-
         fam_states = {}
         all_states = []
         for point_string, c_task_states in task_states.items():
@@ -76,12 +91,14 @@ class state_summary( Pyro.core.ObjBase ):
             # based on the first-parent single-inheritance tree
 
             c_fam_task_states = {}
+            config = SuiteConfig.get_inst()
 
-            for key, parent_list in self.config.get_first_parent_ancestors().items():
+            for key, parent_list in (
+                    config.get_first_parent_ancestors().items()):
                 state = c_task_states.get(key)
                 if state is None:
                     continue
-                all_states.append( state )
+                all_states.append(state)
                 for parent in parent_list:
                     if parent == key:
                         continue
@@ -94,7 +111,7 @@ class state_summary( Pyro.core.ObjBase ):
                 if state is None:
                     continue
                 try:
-                    famcfg = self.config.cfg['runtime'][fam]
+                    famcfg = config.cfg['runtime'][fam]
                 except KeyError:
                     famcfg = {}
                 description = famcfg.get('description')
@@ -107,75 +124,116 @@ class state_summary( Pyro.core.ObjBase ):
 
         all_states.sort()
 
-        global_summary[ 'start time' ] = self.str_or_None(self.start_time)
-        global_summary[ 'oldest cycle point string' ] = (
+        # Compute state_counts (total, and per cycle).
+        state_count_totals = {}
+        state_count_cycles = {}
+        for point_string, name_states in task_states.items():
+            count = {}
+            for name, state in name_states.items():
+                try:
+                    count[state] += 1
+                except KeyError:
+                    count[state] = 1
+                try:
+                    state_count_totals[state] += 1
+                except KeyError:
+                    state_count_totals[state] = 1
+            state_count_cycles[point_string] = count
+
+        global_summary['oldest cycle point string'] = (
             self.str_or_None(min_point))
-        global_summary[ 'newest cycle point string' ] = (
+        global_summary['newest cycle point string'] = (
             self.str_or_None(max_point))
-        global_summary[ 'newest runahead cycle point string' ] = (
+        global_summary['newest runahead cycle point string'] = (
             self.str_or_None(max_point_rh))
-        if flags.utc:
-            global_summary[ 'daemon time zone info' ] = TIME_ZONE_UTC_INFO
+        if cylc.flags.utc:
+            global_summary['daemon time zone info'] = TIME_ZONE_UTC_INFO
         else:
-            global_summary[ 'daemon time zone info' ] = TIME_ZONE_LOCAL_INFO
-        global_summary[ 'last_updated' ] = time.time()
-        global_summary[ 'run_mode' ] = self.run_mode
-        global_summary[ 'paused' ] = paused
-        global_summary[ 'stopping' ] = stopping
-        global_summary[ 'will_pause_at' ] = self.str_or_None(will_pause_at)
-        global_summary[ 'will_stop_at' ] = self.str_or_None(will_stop_at)
-        global_summary[ 'states' ] = all_states
-        global_summary[ 'namespace definition order' ] = ns_defn_order
+            global_summary['daemon time zone info'] = TIME_ZONE_LOCAL_INFO
+        global_summary['last_updated'] = time.time()
+        global_summary['run_mode'] = self.run_mode
+        global_summary['paused'] = paused
+        global_summary['stopping'] = stopping
+        global_summary['will_pause_at'] = self.str_or_None(will_pause_at)
+        global_summary['will_stop_at'] = self.str_or_None(will_stop_at)
+        global_summary['states'] = all_states
+        global_summary['namespace definition order'] = ns_defn_order
+        global_summary['reloading'] = reloading
+        global_summary['state totals'] = state_count_totals
 
         self._summary_update_time = time.time()
-        # replace the originals
-        self.task_name_list = task_name_list
+
+        # Replace the originals (atomic update, for access from other threads).
         self.task_summary = task_summary
         self.global_summary = global_summary
         self.family_summary = family_summary
         task_states = {}
+        self.first_update_completed = True
+        self.state_count_totals = state_count_totals
+        self.state_count_cycles = state_count_cycles
 
-    def str_or_None( self, s ):
+    def str_or_None(self, s):
         if s:
             return str(s)
         else:
             return None
 
-    def get_task_name_list( self ):
-        """Return the list of active task ids."""
-        self.task_name_list.sort()
-        return self.task_name_list
+    def get_state_totals(self):
+        # (Access to this is controlled via the suite_identity server.)
+        return (self.state_count_totals, self.state_count_cycles)
 
-    def get_state_summary( self ):
+    def get_state_summary(self):
         """Return the global, task, and family summary data structures."""
-        return [ self.global_summary, self.task_summary, self.family_summary ]
+        check_access_priv(self, 'full-read')
+        self.report('get_state_summary')
+        if not self.first_update_completed:
+            raise SuiteStillInitialisingError()
+        return (self.global_summary, self.task_summary, self.family_summary)
 
-    def get_summary_update_time( self ):
+    def get_summary_update_time(self):
         """Return the last time the summaries were changed (Unix time)."""
+        check_access_priv(self, 'state-totals')
+        self.report('get_state_summary_update_time')
+        if not self.first_update_completed:
+            raise SuiteStillInitialisingError()
         return self._summary_update_time
 
 
-def extract_group_state( child_states, is_stopped=False ):
+class StateSummaryClient(PyroClient):
+    """Client-side suite state summary interface."""
+
+    target_server_object = PYRO_STATE_OBJ_NAME
+
+    def get_suite_state_summary(self):
+        return self.call_server_func("get_state_summary")
+
+    def get_suite_state_summary_update_time(self):
+        return self.call_server_func("get_summary_update_time")
+
+
+def extract_group_state(child_states, is_stopped=False):
     """Summarise child states as a group."""
-    ordered_states = ['submit-failed', 'failed', 'submit-retrying', 'retrying', 'running',
-            'submitted', 'ready', 'queued', 'waiting', 'held', 'succeeded', 'runahead']
+
+    ordered_states = ['submit-failed', 'failed', 'expired', 'submit-retrying',
+                      'retrying', 'running', 'submitted', 'ready', 'queued',
+                      'waiting', 'held', 'succeeded', 'runahead']
     if is_stopped:
         ordered_states = ['submit-failed', 'failed', 'running', 'submitted',
-            'ready', 'submit-retrying', 'retrying', 'succeeded', 'queued', 'waiting',
-            'held', 'runahead']
+                          'expired', 'ready', 'submit-retrying', 'retrying',
+                          'succeeded', 'queued', 'waiting', 'held', 'runahead']
     for state in ordered_states:
         if state in child_states:
             return state
     return None
 
 
-def get_id_summary( id_, task_state_summary, fam_state_summary, id_family_map ):
+def get_id_summary(id_, task_state_summary, fam_state_summary, id_family_map):
     """Return some state information about a task or family id."""
     prefix_text = ""
     meta_text = ""
     sub_text = ""
     sub_states = {}
-    stack = [( id_, 0 )]
+    stack = [(id_, 0)]
     done_ids = []
     for summary in [task_state_summary, fam_state_summary]:
         if id_ in summary:
@@ -186,10 +244,10 @@ def get_id_summary( id_, task_state_summary, fam_state_summary, id_family_map ):
             if description:
                 meta_text += "\n" + description.strip()
     while stack:
-        this_id, depth = stack.pop( 0 )
+        this_id, depth = stack.pop(0)
         if this_id in done_ids:  # family dive down will give duplicates
             continue
-        done_ids.append( this_id )
+        done_ids.append(this_id)
         prefix = "\n" + " " * 4 * depth + this_id
         if this_id in task_state_summary:
             submit_num = task_state_summary[this_id].get('submit_num')
@@ -197,24 +255,24 @@ def get_id_summary( id_, task_state_summary, fam_state_summary, id_family_map ):
                 prefix += "(%02d)" % submit_num
             state = task_state_summary[this_id]['state']
             sub_text += prefix + " " + state
-            sub_states.setdefault( state, 0 )
+            sub_states.setdefault(state, 0)
             sub_states[state] += 1
         elif this_id in fam_state_summary:
             name, point_string = TaskID.split(this_id)
             sub_text += prefix + " " + fam_state_summary[this_id]['state']
-            for child in reversed( sorted( id_family_map[name] ) ):
+            for child in reversed(sorted(id_family_map[name])):
                 child_id = TaskID.get(child, point_string)
-                stack.insert( 0, ( child_id, depth + 1 ) )
+                stack.insert(0, (child_id, depth + 1))
         if not prefix_text:
             prefix_text = sub_text.strip()
             sub_text = ""
-    if len( sub_text.splitlines() ) > 10:
+    if len(sub_text.splitlines()) > 10:
         state_items = sub_states.items()
         state_items.sort()
-        state_items.sort( lambda x, y: cmp( y[1], x[1] ) )
+        state_items.sort(lambda x, y: cmp(y[1], x[1]))
         sub_text = ""
         for state, number in state_items:
-            sub_text += "\n    {0} tasks {1}".format( number, state )
+            sub_text += "\n    {0} tasks {1}".format(number, state)
     if sub_text and meta_text:
         sub_text = "\n" + sub_text
     text = prefix_text + meta_text + sub_text
diff --git a/lib/cylc/network/task_msgqueue.py b/lib/cylc/network/task_msgqueue.py
new file mode 100644
index 0000000..52690cf
--- /dev/null
+++ b/lib/cylc/network/task_msgqueue.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+from Queue import Queue
+from cylc.owner import user
+from cylc.suite_host import get_hostname
+from cylc.network.pyro_base import PyroClient, PyroServer
+from cylc.network import check_access_priv
+
+
+class TaskMessageServer(PyroServer):
+    """Server-side task messaging interface"""
+
+    def __init__(self):
+        super(TaskMessageServer, self).__init__()
+        self.queue = Queue()
+
+    def put(self, priority, message):
+        check_access_priv(self, 'full-control')
+        self.report('task_message')
+        self.queue.put((priority, message))
+        return (True, 'Message queued')
+
+    def get_queue(self):
+        return self.queue
+
+
+class TaskMessageClient(PyroClient):
+    """Client-side task messaging interface"""
+
+    def __init__(self, suite, task_id, pphrase, owner=user,
+                 host=get_hostname(), pyro_timeout=None, port=None):
+        self.__class__.target_server_object = task_id
+        super(TaskMessageClient, self).__init__(
+            suite, pphrase, owner, host, pyro_timeout, port)
+
+    def put(self, *args):
+        self.call_server_func('put', *args)
diff --git a/lib/cylc/output.py b/lib/cylc/output.py
index 081daa1..f633427 100644
--- a/lib/cylc/output.py
+++ b/lib/cylc/output.py
@@ -20,10 +20,11 @@ import re
 from cycling.loader import get_interval, get_interval_cls
 from trigger import BACK_COMPAT_MSG_RE, MSG_RE
 
+
 class output(object):
     """
     Task outputs, used to generate message output strings.
-    
+
     Unlike prerequisites generated by task triggers, which can have message
     and graph offsets, outputs only have message offsets; they are always
     evaluated at the task's own cycle point.
@@ -39,7 +40,8 @@ class output(object):
             # Old-style offset
             prefix, signed_offset, sign, offset, suffix = m.groups()
             if signed_offset is not None:
-                self.msg_offset = base_interval.get_inferred_child(signed_offset)
+                self.msg_offset = base_interval.get_inferred_child(
+                    signed_offset)
         else:
             n = re.match(MSG_RE, msg)
             if n:
diff --git a/lib/cylc/outputs.py b/lib/cylc/outputs.py
index 25ec491..e89892a 100644
--- a/lib/cylc/outputs.py
+++ b/lib/cylc/outputs.py
@@ -16,13 +16,14 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import re, sys
+import sys
 
 # OUTPUTS:
 # A collection of messages representing the outputs of ONE TASK.
 
-class outputs( object ):
-    def __init__( self, owner_id ):
+
+class outputs(object):
+    def __init__(self, owner_id):
 
         self.owner_id = owner_id
         # Store completed and not-completed outputs in separate
@@ -35,77 +36,78 @@ class outputs( object ):
         self.completed = {}
         self.not_completed = {}
 
-    def count( self ):
-        return len( self.completed ) + len( self.not_completed )
+    def count(self):
+        return len(self.completed) + len(self.not_completed)
 
-    def count_completed( self ):
-        return len( self.completed )
+    def count_completed(self):
+        return len(self.completed)
 
-    def dump( self ):
+    def dump(self):
         # return a list of strings representing each message and its state
         res = []
         for key in self.not_completed:
-            res.append( [ key, False ]  )
+            res.append([key, False])
         for key in self.completed:
-            res.append( [ key, True ]  )
+            res.append([key, True])
         return res
 
-    def all_completed( self ):
-        if len( self.not_completed ) == 0:
+    def all_completed(self):
+        if len(self.not_completed) == 0:
             return True
         else:
             return False
 
-    def is_completed( self, message ):
+    def is_completed(self, message):
         if message in self.completed:
             return True
         else:
             return False
 
-    def set_completed( self, message ):
+    def set_completed(self, message):
         try:
             del self.not_completed[message]
         except:
             pass
-        self.completed[ message ] = self.owner_id
+        self.completed[message] = self.owner_id
 
-    def exists( self, message ):
+    def exists(self, message):
         if message in self.completed or message in self.not_completed:
             return True
         else:
             return False
 
-    def set_all_incomplete( self ):
+    def set_all_incomplete(self):
         for message in self.completed.keys():
             del self.completed[message]
-            self.not_completed[ message ] = self.owner_id
+            self.not_completed[message] = self.owner_id
 
-    def set_all_completed( self ):
+    def set_all_completed(self):
         for message in self.not_completed.keys():
             del self.not_completed[message]
-            self.completed[ message ] = self.owner_id
+            self.completed[message] = self.owner_id
 
-    def add( self, message, completed=False ):
+    def add(self, message, completed=False):
         # Add a new output message
         if message in self.completed or message in self.not_completed:
             # duplicate output messages are an error.
-            print >> sys.stderr, 'WARNING: output already registered: ' + message
+            print >> sys.stderr, (
+                'WARNING: output already registered: ' + message)
         if not completed:
             self.not_completed[message] = self.owner_id
         else:
             self.completed[message] = self.owner_id
 
-    def remove( self, message, fail_silently=False ):
+    def remove(self, message, fail_silently=False):
         if message in self.completed:
-            del self.completed[ message ]
+            del self.completed[message]
         elif message in self.not_completed:
-            del self.not_completed[ message ]
+            del self.not_completed[message]
         elif not fail_silently:
             print >> sys.stderr, 'WARNING: no such output to delete:'
             print >> sys.stderr, ' => ', message
 
-    def register( self ):
+    def register(self):
         # automatically define special outputs common to all tasks
-        self.add( self.owner_id + ' submitted' )
-        self.add( self.owner_id + ' started' )
-        self.add( self.owner_id + ' succeeded' )
+        self.add(self.owner_id + ' submitted')
+        self.add(self.owner_id + ' started')
+        self.add(self.owner_id + ' succeeded')
diff --git a/lib/cylc/owner.py b/lib/cylc/owner.py
index dda7737..d07f19c 100644
--- a/lib/cylc/owner.py
+++ b/lib/cylc/owner.py
@@ -19,9 +19,14 @@
 """In analogy with cylc.hostname.is_remote_host(), determine if a
 username is "remote"."""
 
-import os, pwd
+import os
+import pwd
+from cylc.suite_host import get_hostname
+
+user = os.environ.get('USER', pwd.getpwuid(os.getuid()).pw_name)
+host = get_hostname()
+user_at_host = "%s@%s" % (user, host)
 
-user = os.environ.get( 'USER', pwd.getpwuid(os.getuid()).pw_name )
 
 def is_remote_user(name):
     """Return True if name is different than the current username.
diff --git a/lib/cylc/passphrase.py b/lib/cylc/passphrase.py
index 5e2d1c8..91f6380 100644
--- a/lib/cylc/passphrase.py
+++ b/lib/cylc/passphrase.py
@@ -16,55 +16,42 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, re
-from stat import *
+import os
+import re
+import sys
 import random
 import string
-from mkdir_p import mkdir_p
-from suite_host import get_hostname, is_remote_host
-from owner import user, is_remote_user
-import flags
+from stat import *
+
+import cylc.flags
+from cylc.mkdir_p import mkdir_p
+from cylc.suite_host import get_hostname, is_remote_host
+from cylc.owner import user, is_remote_user
+
+# TODO - Pyro passphrase handling could do with a complete overhaul, but
+# it will soon be made obsolete by the upcoming communications refactor.
 
-class SecurityError( Exception ):
+
+class PassphraseError(Exception):
     """
     Attributes:
         message - what the problem is.
     """
-    def __init__( self, msg ):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
-        return repr(self.msg)
-
-class PassphraseNotFoundError( SecurityError ):
-    pass
-
-class PassphraseNotReadableError( SecurityError ):
-    pass
 
-class InsecurePassphraseError( SecurityError ):
-    pass
+    def __str__(self):
+        return repr(self.msg)
 
-class InvalidPassphraseError( SecurityError ):
-    pass
 
 class passphrase(object):
-    def __init__( self, suite, owner=user, host=get_hostname() ):
+    def __init__(self, suite, owner=user, host=get_hostname()):
         self.suite = suite
         self.owner = owner
         self.host = host
         self.location = None
 
-        ### ?? this doesn't matter, we now set permissions explicitly:
-        ### ?? TODO - handle existing file that owner can't read? etc.?
-        ##mode = os.stat( ppfile )[ST_MODE]
-        ##if not S_IRUSR & mode:
-        ##    raise PassphraseNotReadableError, 'Owner cannot read passphrase file: ' + ppfile
-        ##if S_IROTH & mode or S_IWOTH & mode or S_IXOTH & mode:
-        ##    raise InsecurePassphraseError, 'OTHERS have access to passphrase file: ' + ppfile
-        ##if S_IRGRP & mode or S_IWGRP & mode or S_IXGRP & mode:
-        ##    raise InsecurePassphraseError, 'GROUP has access to passphrase file: ' + ppfile
-
-    def get_passphrase_file( self, pfile=None, suitedir=None ):
+    def get_passphrase_file(self, pfile=None, suitedir=None):
         """
 Passphrase location, order of preference:
 
@@ -93,29 +80,29 @@ environment.
 These are more sensible locations for remote suite control from accounts
 that do not actually need the suite definition directory to be installed.
 """
-        # 1/ explicit location given on the command line
+        # 1/ Explicit suite definition directory given on the command line.
         if pfile:
-            if os.path.isdir( pfile ):
-                # if a directory is given assume the filename
-                pfile = os.path.join( pfile, 'passphrase' )
-            if os.path.isfile( pfile ):
-                self.set_location( pfile )
-
+            if os.path.isdir(pfile):
+                pfile = os.path.join(pfile, 'passphrase')
+            if os.path.isfile(pfile):
+                self.set_location(pfile)
             else:
-                # if an explicit location is given, the file must exist
-                raise SecurityError, 'ERROR, file not found on ' + user + '@' + get_hostname() + ': ' + pfile
+                # If an explicit location is given, the file must exist.
+                raise PassphraseError(
+                    'ERROR, file not found on %s@%s: %s' % (
+                        user, get_hostname(), pfile))
 
-        # 2/ cylc commands with suite definition directory from local registration
+        # 2/ Cylc commands with suite definition directory from local reg.
         if not self.location and suitedir:
-            pfile = os.path.join( suitedir, 'passphrase' )
-            if os.path.isfile( pfile ):
-                self.set_location( pfile )
+            pfile = os.path.join(suitedir, 'passphrase')
+            if os.path.isfile(pfile):
+                self.set_location(pfile)
 
         # (2 before 3 else sub-suites load their parent suite's
         # passphrase on start-up because the "cylc run" command runs in
         # a parent suite task execution environment).
 
-        # 3/ running tasks: suite definition directory (from the task execution environment)
+        # 3/ Running tasks: suite def dir from the task execution environment.
         if not self.location:
             try:
                 # Test for presence of task execution environment
@@ -126,22 +113,22 @@ that do not actually need the suite definition directory to be installed.
                 pass
             else:
                 # called by a task
-                if is_remote_host( suite_host ) or is_remote_user( suite_owner ):
-                    # 2(i)/ cylc messaging calls on a remote account.
+                if is_remote_host(suite_host) or is_remote_user(suite_owner):
+                    # 2(i)/ Task messaging call on a remote account.
 
                     # First look in the remote suite definition
                     # directory ($CYLC_SUITE_DEF_PATH is modified for
                     # remote tasks):
                     try:
-                        pfile = os.path.join( os.environ['CYLC_SUITE_DEF_PATH'], 'passphrase' )
+                        pfile = os.path.join(
+                            os.environ['CYLC_SUITE_DEF_PATH'], 'passphrase')
                     except KeyError:
                         pass
                     else:
-                        if os.path.isfile( pfile ):
-                            self.set_location( pfile )
-
+                        if os.path.isfile(pfile):
+                            self.set_location(pfile)
                 else:
-                    # 2(ii)/ cylc messaging calls on the suite host and account.
+                    # 2(ii)/ Task messaging call on the suite host account.
 
                     # Could be a local task or a remote task with 'ssh
                     # messaging = True'. In either case use
@@ -149,71 +136,91 @@ that do not actually need the suite definition directory to be installed.
                     # changes, not $CYLC_SUITE_DEF_PATH which gets
                     # modified for remote tasks as described above.
                     try:
-                        pfile = os.path.join( os.environ['CYLC_SUITE_DEF_PATH_ON_SUITE_HOST'], 'passphrase' )
+                        pfile = os.path.join(
+                            os.environ['CYLC_SUITE_DEF_PATH_ON_SUITE_HOST'],
+                            'passphrase')
                     except KeyError:
                         pass
                     else:
-                        if os.path.isfile( pfile ):
-                            self.set_location( pfile )
+                        if os.path.isfile(pfile):
+                            self.set_location(pfile)
 
-        # 4/ other allowed locations, as documented above
+        # 4/ Other allowed locations, as documented above.
         if not self.location:
             locations = []
             # For remote control commands, self.host here will be fully
             # qualified or not depending on what's given on the command line.
-            short_host = re.sub( '\..*', '', self.host )
-
-            locations.append( os.path.join( os.environ['HOME'], '.cylc', self.host, self.owner, self.suite, 'passphrase' ))
+            short_host = re.sub('\..*', '', self.host)
+            prefix = os.path.join(os.environ['HOME'], '.cylc')
+            locations.append(
+                os.path.join(
+                    prefix, self.host, self.owner, self.suite, 'passphrase'))
             if short_host != self.host:
-                locations.append( os.path.join( os.environ['HOME'], '.cylc', short_host, self.owner, self.suite, 'passphrase' ))
-            locations.append( os.path.join( os.environ['HOME'], '.cylc', self.host, self.suite, 'passphrase' ))
+                locations.append(os.path.join(
+                    prefix, short_host, self.owner, self.suite, 'passphrase'))
+            locations.append(
+                os.path.join(prefix, self.host, self.suite, 'passphrase'))
             if short_host != self.host:
-                locations.append( os.path.join( os.environ['HOME'], '.cylc', short_host, self.suite, 'passphrase' ))
-            locations.append( os.path.join( os.environ['HOME'], '.cylc', self.suite, 'passphrase' ))
+                locations.append(os.path.join(
+                    prefix, short_host, self.suite, 'passphrase'))
+            locations.append(os.path.join(prefix, self.suite, 'passphrase'))
             for pfile in locations:
-                if os.path.isfile( pfile ):
-                    self.set_location( pfile )
+                if os.path.isfile(pfile):
+                    self.set_location(pfile)
                     break
 
         if not self.location:
-            raise SecurityError, 'ERROR: passphrase for suite ' + self.suite + ' not found on ' + user + '@' + get_hostname()
-
+            raise PassphraseError(
+                'ERROR: passphrase for suite %s not found on %s@%s' % (
+                    self.suite, user, get_hostname()))
         return self.location
 
-    def set_location( self, pfile ):
-        if flags.verbose:
-            print 'Passphrase detected at', pfile, 'on', user + '@' + get_hostname()
+    def set_location(self, pfile):
+        if cylc.flags.debug:
+            print '%s (%s@%s)' % (pfile, user, get_hostname())
         self.location = pfile
 
-    def generate( self, dir ):
+    def generate(self, dir):
         pfile = os.path.join(dir, 'passphrase')
-        if os.path.isfile( pfile ):
+        if os.path.isfile(pfile):
             try:
-                self.get( pfile )
+                self.get(pfile)
                 return
-            except SecurityError:
+            except PassphraseError:
                 pass
         # Note: Perhaps a UUID might be better here?
-        char_set = string.ascii_uppercase + string.ascii_lowercase + string.digits
+        char_set = (
+            string.ascii_uppercase + string.ascii_lowercase + string.digits)
         self.passphrase = ''.join(random.sample(char_set, 20))
         mkdir_p(dir)
         f = open(pfile, 'w')
         f.write(self.passphrase)
         f.close()
         # set passphrase file permissions to owner-only
-        os.chmod( pfile, 0600 )
-        if flags.verbose:
-            print 'Generated suite passphrase file on', user + '@' + get_hostname() + ':', pfile
-
-    def get( self, pfile=None, suitedir=None ):
-        ppfile = self.get_passphrase_file( pfile, suitedir )
-        psf = open( ppfile, 'r' )
+        os.chmod(pfile, 0600)
+        if cylc.flags.verbose:
+            print 'Generated suite passphrase: %s@%s:%s' % (
+                user, get_hostname(), pfile)
+
+    def get(self, pfile=None, suitedir=None):
+        ppfile = self.get_passphrase_file(pfile, suitedir)
+        psf = open(ppfile, 'r')
         lines = psf.readlines()
         psf.close()
-        if len(lines) == 0:
-            raise InvalidPassphraseError, 'ERROR, passphrase file is empty, on ' + user + '@' + get_hostname() + ': ' + ppfile
-        if len(lines) > 1:
-            raise InvalidPassphraseError, 'ERROR, passphrase file contains multiple lines, on ' + user + '@' + get_hostname() + ': ' + ppfile
+        if len(lines) != 1:
+            raise PassphraseError(
+                'ERROR, invalid passphrase file: %s@%s:%s' % (
+                    user, get_hostname(), ppfile))
         # chomp trailing whitespace and newline
         self.passphrase = lines[0].strip()
         return self.passphrase
+
+
+def get_passphrase(suite, owner, host, db):
+    """Find a suite passphrase."""
+    if not is_remote_host(host) and not is_remote_user(owner):
+        # Local suite, retrieve suite definition directory location.
+        suitedir = os.path.dirname(db.get_suiterc(suite))
+    else:
+        suitedir = None
+    return passphrase(suite, owner, host).get(None, suitedir)
diff --git a/lib/cylc/port_file.py b/lib/cylc/port_file.py
deleted file mode 100644
index 5c87458..0000000
--- a/lib/cylc/port_file.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import os, sys
-from suite_host import is_remote_host
-from owner import user, is_remote_user
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-import flags
-
-"""Processes connecting to a running suite must know which port the
-suite server is listening on: at start-up cylc writes the port to
-$HOME/.cylc/ports/SUITE.
-
-Task messaging commands know the port number of the target suite from
-the task execution environment supplied by the suite: $CYLC_SUITE_PORT,
-so they do not need to read the port file (they do not use this class).
-
-Other cylc commands: on the suite host read the port file; on remote
-hosts use passwordless ssh to read the port file on the suite host. If
-passwordless ssh to the suite host is not configured this will fail and
-the user will have to give the port number on the command line."""
-
-class PortFileError( Exception ):
-    """
-    Attributes:
-        message - what the problem is.
-    """
-    def __init__( self, msg ):
-        self.msg = msg
-    def __str__( self ):
-        return repr(self.msg)
-
-class PortFileExistsError( PortFileError ):
-    pass
-
-class port_file( object ):
-    def __init__(self, suite, port ):
-        self.suite = suite
-
-        # the ports directory is assumed to exist
-
-        pdir = GLOBAL_CFG.get( ['pyro','ports directory'] )
- 
-        self.local_path = os.path.join( pdir, suite )
-
-        try:
-            self.port = str(int(port))
-        except ValueError, x:
-            print >> sys.stderr, x
-            raise PortFileError( "ERROR, illegal port number: " + str(port) )
-
-        self.write()
-
-    def write( self ):
-        if os.path.exists( self.local_path ):
-            raise PortFileExistsError( "ERROR, port file exists: " + self.local_path )
-        if flags.verbose:
-            print "Writing port file:", self.local_path
-        try:
-            f = open( self.local_path, 'w' )
-        except OSError,x:
-            raise PortFileError( "ERROR, failed to open port file: " + self.port )
-        f.write( self.port )
-        f.close()
-
-    def unlink( self ):
-        if flags.verbose:
-            print "Removing port file:", self.local_path
-        try:
-            os.unlink( self.local_path )
-        except OSError,x:
-            print >> sys.stderr, x
-            raise PortFileError( "ERROR, cannot remove port file: " + self.local_path )
-
-class port_retriever( object ):
-    def __init__(self, suite, host, owner ):
-        self.suite = suite
-        self.host = host
-        self.owner = owner
-        self.locn = None
-
-        self.local_path = os.path.join( GLOBAL_CFG.get( ['pyro','ports directory'] ), suite )
-
-    def get_local( self ):
-        self.locn = self.local_path
-        if not os.path.exists( self.local_path ):
-            raise PortFileError( "ERROR, port file not found: " + self.local_path )
-        f = open( self.local_path, 'r' )
-        str_port = f.readline().rstrip('\n')
-        f.close()
-        return str_port
-
-    def get_remote( self ):
-        import subprocess
-        target = self.owner + '@' + self.host
-        remote_path = self.local_path.replace( os.environ['HOME'], '$HOME' )
-        self.locn = target + ':' + remote_path
-        ssh = subprocess.Popen( ['ssh', '-oBatchMode=yes', target, 'cat', remote_path],
-                stdout=subprocess.PIPE, stderr=subprocess.PIPE )
-        str_port = ssh.stdout.readline().rstrip('\n')
-        err = ssh.stderr.readline()
-        res = ssh.wait()
-        if err:
-            print >> sys.stderr, err.rstrip('\n')
-        if res != 0:
-            raise PortFileError( "ERROR, remote port file not found" )
-        return str_port
-
-    def get( self ):
-        if flags.verbose:
-            print "Retrieving suite port number..."
-
-        if is_remote_host( self.host ) or is_remote_user( self.owner ):
-            str_port = self.get_remote()
-        else:
-            str_port = self.get_local()
-
-        try:
-            # convert to integer
-            port = int( str_port )
-        except ValueError, x:
-            # this also catches an empty port file (touch)
-            print >> sys.stderr, x
-            print >> sys.stderr, "ERROR: bad port file", self.locn
-            raise PortFileError( "ERROR, illegal port file content: " + str_port )
-
-        if flags.verbose:
-            print '...', port
-
-        return port
diff --git a/lib/cylc/port_scan.py b/lib/cylc/port_scan.py
deleted file mode 100644
index 53bf955..0000000
--- a/lib/cylc/port_scan.py
+++ /dev/null
@@ -1,269 +0,0 @@
-#!/usr/bin/pyro
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import os, sys
-from suite_host import get_hostname
-from owner import user
-from passphrase import passphrase
-from registration import localdb
-import datetime
-import Pyro.errors, Pyro.core
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-import flags
-
-class SuiteIdentificationError( Exception ):
-    """
-    Attributes: None
-    """
-    def __init__( self, msg ):
-        self.msg = msg
-    def __str__( self ):
-        return repr(self.msg)
-
-class ConnectionDeniedError( SuiteIdentificationError ):
-    pass
-
-class ConnectionTimedOutError( SuiteIdentificationError ):
-    pass
-
-class NoSuiteFoundError( SuiteIdentificationError ):
-    pass
-
-class SuiteNotFoundError( SuiteIdentificationError ):
-    pass
-
-class OtherSuiteFoundError( SuiteIdentificationError ):
-    pass
-
-class OtherServerFoundError( SuiteIdentificationError ):
-    pass
-
-class port_interrogator(object):
-    # find which suite is running on a given port
-    def __init__( self, host, port, my_passphrases=None, pyro_timeout=None ):
-        self.host = host
-        self.port = port
-        if pyro_timeout: # convert from string
-            self.pyro_timeout = float(pyro_timeout)
-        else:
-            self.pyro_timeout = None
-        self.my_passphrases = my_passphrases
-
-    def interrogate( self ):
-        # get a proxy to the cylcid object
-        # this raises ProtocolError if connection fails
-        uri = 'PYROLOC://' + self.host + ':' + str(self.port) + '/cylcid'
-        self.proxy = Pyro.core.getProxyForURI(uri)
-        self.proxy._setTimeout(self.pyro_timeout)
-
-        # first try access with no passphrase
-        name = owner = None
-        try:
-            # (caller handles TimeoutError)
-            name, owner = self.proxy.id()
-        except Pyro.errors.ConnectionDeniedError, x:
-            # must be a secure suite, try my passphrases
-            if not self.my_passphrases:
-                raise
-            for reg in self.my_passphrases:
-                self.proxy._setIdentification( self.my_passphrases[reg] )
-                try:
-                    name, owner = self.proxy.id()
-                except:
-                    # denied, try next passphrase
-                    continue
-                else:
-                    # got access
-                    if name == reg and owner == user:
-                        return name, owner, 'secure'
-                    else:
-                        # this indicates that one of my suites has an
-                        # identical passphrase to this other suite.
-                        continue
-
-            # loop end without returning ID => all of my passphrases denied
-            raise Pyro.errors.ConnectionDeniedError, x
-        except:
-            raise
-        else:
-            # got access with no passphrase => not a secure suite
-            # TODO - THIS IS NO LONGER LEGAL from cylc-4.5.0
-            return name, owner, 'insecure'
-
-def warn_timeout( host, port, timeout ):
-    print >> sys.stderr, "WARNING: connection timed out (" + str(timeout) + "s) at", portid( host, port )
-    #print >> sys.stderr, '  This could mean a Ctrl-Z stopped suite or similar is holding up the port,'
-    #print >> sys.stderr, '  or your pyro connection timeout needs to be longer than', str(timeout), 'seconds.'
-
-def portid( host, port ):
-    return host + ":" + str(port)
-
-# old complex output format for scan command etc.: '[suite] owner at host:port'
-# new simple output format is: 'suite owner host port' - better for parsing.
-##def suiteid( name, owner, host, port=None ):
-##    if port != None:
-##        res = "[" + name + "] " + owner + "@" + portid( host,port)
-##    else:
-##        res = "[" + name + "] " + owner + "@" + host
-##    return res
-
-def cylcid_uri( host, port ):
-    return 'PYROLOC://' + host + ':' + str(port) + '/cylcid'
-
-def get_port( suite, owner=user, host=get_hostname(), pphrase=None, pyro_timeout=None ):
-    # Scan ports until a particular suite is found.
-
-    pyro_base_port = GLOBAL_CFG.get( ['pyro','base port'] )
-    pyro_port_range = GLOBAL_CFG.get( ['pyro','maximum number of ports'] )
-
-    for port in range( pyro_base_port, pyro_base_port + pyro_port_range ):
-        uri = cylcid_uri( host, port )
-        try:
-            proxy = Pyro.core.getProxyForURI(uri)
-        except Pyro.errors.URIError, x:
-            # No such host?
-            raise SuiteNotFoundError, x
-
-        if pyro_timeout: # convert from string
-            pyro_timeout = float( pyro_timeout )
-
-        proxy._setTimeout(pyro_timeout)
-        proxy._setIdentification( pphrase )
-
-        before = datetime.datetime.now()
-        try:
-            name, xowner = proxy.id()
-        except Pyro.errors.TimeoutError:
-            warn_timeout( host, port, pyro_timeout )
-            pass
-        except Pyro.errors.ConnectionDeniedError:
-            #print >> sys.stderr, "Wrong suite or wrong passphrase at " + portid( host, port )
-            pass
-        except Pyro.errors.ProtocolError:
-            #print >> sys.stderr, "No Suite Found at " + portid( host, port )
-            pass
-        except Pyro.errors.NamingError:
-            #print >> sys.stderr, "Non-cylc pyro server found at " + portid( host, port )
-            pass
-        else:
-            if flags.verbose:
-                after = datetime.datetime.now()
-                print "Pyro connection on port " +str(port) + " took: " + str( after - before )
-            if name == suite and xowner == owner:
-                if flags.verbose:
-                    print suite, owner, host, port
-                # RESULT
-                return port
-            else:
-                # ID'd some other suite.
-                #print 'OTHER SUITE:', name, xowner, host, port
-                pass
-    raise SuiteNotFoundError, "Suite not running: " + suite + ' ' + owner + ' ' + host
-
-def check_port( suite, pphrase, port, owner=user, host=get_hostname(), pyro_timeout=None ):
-    # is a particular suite running at host:port?
-
-    uri = cylcid_uri( host, port )
-    proxy = Pyro.core.getProxyForURI(uri)
-    if pyro_timeout: # convert from string
-        pyro_timeout = float(pyro_timeout)
-    proxy._setTimeout(pyro_timeout)
-
-    proxy._setIdentification( pphrase )
-
-    before = datetime.datetime.now()
-    try:
-        name, xowner = proxy.id()
-    except Pyro.errors.TimeoutError:
-        warn_timeout( host, port, pyro_timeout )
-        raise ConnectionTimedOutError, "ERROR, Connection Timed Out " + portid( host, port )
-    except Pyro.errors.ConnectionDeniedError:
-        raise ConnectionDeniedError, "ERROR: Connection Denied  at " + portid( host, port )
-    except Pyro.errors.ProtocolError:
-        raise NoSuiteFoundError, "ERROR: " + suite + " not found at " + portid( host, port )
-    except Pyro.errors.NamingError:
-        raise OtherServerFoundError, "ERROR: non-cylc pyro server found at " + portid( host, port )
-    else:
-        if flags.verbose:
-            after = datetime.datetime.now()
-            print "Pyro connection on port " +str(port) + " took: " + str( after - before )
-        if name == suite and xowner == owner:
-            # RESULT
-            if flags.verbose:
-                print suite, owner, host, port
-            return True
-        else:
-            # ID'd some other suite.
-            print >> sys.stderr, 'Found ' + name + ' ' + xowner + ' ' + host + ' ' + port
-            print >> sys.stderr, ' NOT ' + suite + ' ' + owner + ' ' + host + ' ' + port
-            raise OtherSuiteFoundError, "ERROR: Found another suite"
-
-def scan(host=get_hostname(), db=None, pyro_timeout=None):
-    #print 'SCANNING PORTS'
-    # scan all cylc Pyro ports for cylc suites
-
-    pyro_base_port = GLOBAL_CFG.get( ['pyro','base port'] )
-    pyro_port_range = GLOBAL_CFG.get( ['pyro','maximum number of ports'] )
-
-    # In non-verbose mode print nothing (scan is used by cylc db viewer).
-
-    # load my suite passphrases
-    reg = localdb(db)
-    reg_suites = reg.get_list()
-    my_passphrases = {}
-    for item in reg_suites:
-        rg = item[0]
-        di = item[1]
-        try:
-            pp = passphrase( rg, user, host ).get( suitedir=di )
-        except Exception, x:
-            #print >> sys.stderr, x
-            # no passphrase defined for this suite
-            pass
-        else:
-            my_passphrases[ rg ] = pp
-
-    results = []
-    for port in range( pyro_base_port, pyro_base_port + pyro_port_range ):
-        before = datetime.datetime.now()
-        try:
-            name, owner, security = port_interrogator( host, port, my_passphrases, pyro_timeout ).interrogate()
-        except Pyro.errors.TimeoutError:
-            warn_timeout( host, port, pyro_timeout )
-            pass
-        except Pyro.errors.ConnectionDeniedError:
-            # secure suite
-            if flags.verbose:
-                print >> sys.stderr, "Connection Denied at " + portid( host, port )
-        except Pyro.errors.ProtocolError:
-            # no suite
-            #if flags.verbose:
-            #    print >> sys.stderr, "No Suite Found at " + portid( host, port )
-            pass
-        except Pyro.errors.NamingError:
-            # other Pyro server
-            if flags.verbose:
-                print >> sys.stderr, "Non-cylc Pyro server found at " + portid( host, port )
-        except:
-            raise
-        else:
-            if flags.verbose:
-                after = datetime.datetime.now()
-                print "Pyro connection on port " +str(port) + " took: " + str( after - before )
-            results.append((name, owner, host, port))
-    return results
diff --git a/lib/cylc/prerequisite.py b/lib/cylc/prerequisite.py
new file mode 100755
index 0000000..ed5de7d
--- /dev/null
+++ b/lib/cylc/prerequisite.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import re
+import sys
+from cylc.conditional_simplifier import ConditionalSimplifier
+from cylc.cycling.loader import get_point
+
+
+"""A task prerequisite.
+
+The concrete result of an abstract logical trigger expression.
+
+"""
+
+
+class TriggerExpressionError(Exception):
+    def __init__(self, msg):
+        self.msg = msg
+
+    def __str__(self):
+        return repr(self.msg)
+
+
+class Prerequisite(object):
+
+    # Extracts T from "foo.T succeeded" etc.
+    CYCLE_POINT_RE = re.compile('^\w+\.(\S+) .*$')
+
+    def __init__(self, owner_id, start_point=None):
+        self.owner_id = owner_id
+        self.labels = {}   # labels[ message ] = label
+        self.messages = {}   # messages[ label ] = message
+        self.satisfied = {}    # satisfied[ label ] = True/False
+        self.satisfied_by = {}   # self.satisfied_by[ label ] = task_id
+        self.target_point_strings = []   # list of target cycle points
+        self.start_point = start_point
+        self.pre_initial_messages = []
+        self.conditional_expression = None
+        self.raw_conditional_expression = None
+
+    def add(self, message, label, pre_initial=False):
+        # Add a new prerequisite message in an UNSATISFIED state.
+        self.messages[label] = message
+        self.labels[message] = label
+        self.satisfied[label] = False
+        m = re.match(self.__class__.CYCLE_POINT_RE, message)
+        if m:
+            self.target_point_strings.append(m.groups()[0])
+        if pre_initial:
+            self.pre_initial_messages.append(label)
+
+    def get_not_satisfied_list(self):
+        not_satisfied = []
+        for label in self.satisfied:
+            if not self.satisfied[label]:
+                not_satisfied.append(label)
+        return not_satisfied
+
+    def set_condition(self, expr):
+        # 'foo | bar & baz'
+        # 'foo:fail | foo'
+        # 'foo[T-6]:out1 | baz'
+
+        drop_these = []
+
+        if self.pre_initial_messages:
+            for k in self.pre_initial_messages:
+                drop_these.append(k)
+
+        # Needed to drop pre warm-start dependence:
+        for k in self.messages:
+            if k in drop_these:
+                continue
+            if self.start_point:
+                task = re.search(r'(.*).(.*) ', self.messages[k])
+                if task.group:
+                    try:
+                        foo = task.group().split(".")[1].rstrip()
+                        if get_point(foo) < self.start_point:
+                            drop_these.append(k)
+                    except IndexError:
+                        pass
+
+        for label in drop_these:
+            if self.messages.get(label):
+                msg = self.messages[label]
+                self.messages.pop(label)
+                self.satisfied.pop(label)
+                self.labels.pop(msg)
+
+        if '|' in expr:
+            if drop_these:
+                simpler = ConditionalSimplifier(expr, drop_these)
+                expr = simpler.get_cleaned()
+            # Make a Python expression so we can eval() the logic.
+            self.raw_conditional_expression = expr
+            for label in self.messages:
+                expr = re.sub(
+                    r'\b' + label + r'\b', 'self.satisfied[\'' + label + '\']',
+                    expr)
+            self.conditional_expression = expr
+
+    def is_satisfied(self):
+        if not self.satisfied:
+            # No prerequisites left after pre-initial simplification.
+            return True
+        elif not self.conditional_expression:
+            # Single trigger or several with '&' only; don't need eval.
+            return all(self.satisfied.values())
+        else:
+            # Trigger expression with at least one '|': use eval.
+            try:
+                res = eval(self.conditional_expression)
+            except Exception, x:
+                print >> sys.stderr, 'ERROR:', x
+                if str(x).find("unexpected EOF") != -1:
+                    print >> sys.stderr, (
+                        "(?could be unmatched parentheses in the graph" +
+                        " string?)")
+                raise TriggerExpressionError(
+                    '"' + self.raw_conditional_expression + '"')
+            return res
+
+    def satisfy_me(self, outputs):
+        # Can any completed outputs satisfy any of my prequisites?
+        for label in self.satisfied:
+            for msg in outputs:
+                if self.messages[label] == msg:
+                    self.satisfied[label] = True
+                    self.satisfied_by[label] = outputs[msg]  # owner_id
+
+    def dump(self):
+        # TODO - CHECK THIS WORKS NOW
+        # return an array of strings representing each message and its state
+        res = []
+        if self.raw_conditional_expression:
+            for label, val in self.satisfied.items():
+                res.append(['    LABEL: %s = %s' %
+                            (label, self.messages[label]), val])
+            res.append(['CONDITION: %s' %
+                        self.raw_conditional_expression, self.is_satisfied()])
+        elif self.satisfied:
+            for label, val in self.satisfied.items():
+                res.append([self.messages[label], val])
+        # (Else trigger wiped out by pre-initial simplification.)
+        return res
+
+    def set_satisfied(self):
+        for label in self.messages:
+            self.satisfied[label] = True
+
+    def set_not_satisfied(self):
+        for label in self.messages:
+            self.satisfied[label] = False
+
+    def get_target_points(self):
+        """Return a list of cycle points target by each prerequisite,
+        including each component of conditionals."""
+        return [get_point(p) for p in self.target_point_strings]
diff --git a/lib/cylc/prerequisites/__init__.py b/lib/cylc/prerequisites/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/lib/cylc/prerequisites/conditionals.py b/lib/cylc/prerequisites/conditionals.py
deleted file mode 100755
index 8b86147..0000000
--- a/lib/cylc/prerequisites/conditionals.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import re, sys
-from simplify import conditional_simplifier
-from cylc.cycling.loader import get_point
-
-# label1 => "foo ready for <CYCLE_POINT>
-# label2 => "bar.<CYCLE_POINT> succeeded"
-# expr   => "( [label1] or [label2] )"
-
-class TriggerExpressionError( Exception ):
-    def __init__( self, msg ):
-        self.msg = msg
-    def __str__( self ):
-        return repr(self.msg)
-
-class conditional_prerequisites(object):
-
-    # Extracts T from "foo.T succeeded" etc.
-    CYCLE_POINT_RE = re.compile('^\w+\.(\S+) .*$')
-
-    def __init__( self, owner_id, start_point=None ):
-        self.owner_id = owner_id
-        self.labels = {}   # labels[ message ] = label
-        self.messages = {}   # messages[ label ] = message
-        self.satisfied = {}    # satisfied[ label ] = True/False
-        self.satisfied_by = {}   # self.satisfied_by[ label ] = task_id
-        self.target_point_strings = []   # list of target cycle points
-        self.auto_label = 0
-        self.excess_labels = []
-        self.start_point = start_point
-        self.pre_initial_messages = []
-
-    def add( self, message, label = None, pre_initial = False ):
-        # Add a new prerequisite message in an UNSATISFIED state.
-        if label:
-            # TODO - autolabelling NOT USED? (and is broken because the
-            # supplied condition is necessarily expressed in terms of
-            # user labels?).
-            pass
-        else:
-            self.auto_label += 1
-            label = str( self.auto_label )
-
-        if message in self.labels:
-            # DUPLICATE PREREQUISITE - IMPOSSIBLE IN CURRENT USE OF THIS CLASS?
-            # (TODO - if impossible, remove related code from this file)
-            #raise SystemExit( "Duplicate prerequisite: " + message )
-            print >> sys.stderr, "WARNING, " + self.owner_id + ": duplicate prerequisite: " + message
-            self.excess_labels.append(label)
-            return
-
-        self.messages[ label ] = message
-        self.labels[ message ] = label
-        self.satisfied[label]  = False
-        m = re.match( self.__class__.CYCLE_POINT_RE, message )
-        if m:
-            self.target_point_strings.append( m.groups()[0] )
-        if pre_initial:
-            self.pre_initial_messages.append(label)
-
-    def get_not_satisfied_list( self ):
-        not_satisfied = []
-        for label in self.satisfied:
-            if not self.satisfied[ label ]:
-                not_satisfied.append( label )
-        return not_satisfied
-
-    def set_condition( self, expr ):
-        # 'foo | bar & baz'
-        # 'foo:fail | foo'
-        # 'foo[T-6]:out1 | baz'
-
-        drop_these = []
-        for k in self.messages:
-            if self.start_point:
-                task = re.search( r'(.*).(.*) ', self.messages[k])
-                if task.group:
-                    try:
-                        foo = task.group().split(".")[1].rstrip()
-                        if ( get_point( foo ) <  self.start_point and
-                                 foo != '1' ):
-                            # TODO - ASYNC TASKS '1' ONLY NEEDS UPDATING FOR
-                            # INTEGER CYCLING (AND MORE?)
-                            drop_these.append(k)
-                    except IndexError:
-                        pass
-
-        if self.pre_initial_messages:
-            for k in self.pre_initial_messages:
-                drop_these.append(k)
-
-        if drop_these:
-            simpler = conditional_simplifier(expr, drop_these)
-            expr = simpler.get_cleaned()
-
-        # make into a python expression
-        self.raw_conditional_expression = expr
-        for label in self.messages:
-            # match label start and end on on word boundary
-            expr = re.sub( r'\b' + label + r'\b', 'self.satisfied[\'' + label + '\']', expr )
-
-        for label in self.excess_labels:
-            # treat duplicate triggers as always satisfied
-            expr = re.sub( r'\b' + label + r'\b', 'True', expr )
-            self.raw_conditional_expression = re.sub( r'\b' + label + r'\b', 'True', self.raw_conditional_expression )
-
-        for label in drop_these:
-            if self.messages.get(label):
-                msg = self.messages[label]
-                self.messages.pop(label)
-                self.satisfied.pop(label)
-                self.labels.pop(msg)
-
-        self.conditional_expression = expr
-
-    def all_satisfied( self ):
-        if self.conditional_expression == "()":
-            return True
-        else:
-            try:
-                res = eval( self.conditional_expression )
-            except Exception, x:
-                print >> sys.stderr, 'ERROR:', x
-                if str(x).find("unexpected EOF") != -1:
-                    print >> sys.stderr, "(?could be unmatched parentheses in the graph string?)"
-                raise TriggerExpressionError, '"' + self.raw_conditional_expression + '"'
-            return res
-
-    def satisfy_me( self, outputs ):
-        # Can any completed outputs satisfy any of my prequisites?
-        for label in self.satisfied:
-            for msg in outputs:
-                if self.messages[label] == msg:
-                    self.satisfied[ label ] = True
-                    self.satisfied_by[ label ] = outputs[msg] # owner_id
-
-    def count( self ):
-        # how many messages are stored
-        return len( self.satisfied.keys() )
-
-    def dump( self ):
-        # return an array of strings representing each message and its state
-        res = []
-        for label in self.satisfied:
-            msg = self.messages[label]
-            res.append( [ '    LABEL: ' + label + ' = ' + self.messages[label], self.satisfied[ label ] ]  )
-        res.append( [     'CONDITION: ' + self.raw_conditional_expression, self.all_satisfied() ] )
-        return res
-
-    def set_all_satisfied( self ):
-        for label in self.messages:
-            self.satisfied[ label ] = True
-
-    def set_all_unsatisfied( self ):
-        for label in self.messages:
-            self.satisfied[ label ] = False
-
-    def get_target_points( self ):
-        """Return a list of cycle points target by each prerequisite,
-        including each component of conditionals."""
-        return [get_point(p) for p in self.target_point_strings]
diff --git a/lib/cylc/prerequisites/plain_prerequisites.py b/lib/cylc/prerequisites/plain_prerequisites.py
deleted file mode 100644
index 55850c6..0000000
--- a/lib/cylc/prerequisites/plain_prerequisites.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import re, sys
-from cylc.cycling.loader import get_point
-
-# PREREQUISITES: A collection of messages representing the prerequisite
-# conditions for a task, each of which can be "satisfied" or not.  An
-# unsatisfied prerequisite becomes satisfied if it matches a satisfied
-# output message from another task (via the cylc requisite broker).
-
-class plain_prerequisites(object):
-
-    # Extracts T from "foo.T succeeded" etc.
-    CYCLE_POINT_RE = re.compile('^\w+\.(\S+) .*$')
-
-    def __init__( self, owner_id, start_point=None ):
-        self.labels = {}   # labels[ message ] = label
-        self.messages = {}   # messages[ label ] = message
-        self.satisfied = {}    # satisfied[ label ] = True/False
-        self.satisfied_by = {}   # self.satisfied_by[ label ] = task_id
-        self.target_point_strings = []   # list of target cycle points (tags)
-        self.auto_label = 0
-        self.owner_id = owner_id
-        self.start_point = start_point
-
-    def add( self, message, label = None ):
-        # Add a new prerequisite message in an UNSATISFIED state.
-        if self.start_point:
-            task = re.search( r'(.*).(.*) ', message)
-            if task.group:
-                try:
-                    foo = task.group().split(".")[1].rstrip()
-                    if ( get_point( foo ) <  self.start_point ):
-                        return
-                except IndexError:
-                    pass
-        if label:
-            pass
-        else:
-            self.auto_label += 1
-            label = str( self.auto_label )
-
-        if message in self.labels:
-            # IGNORE A DUPLICATE PREREQUISITE (the same trigger must
-            # occur in multiple non-conditional graph string sections).
-            # Warnings disabled pending a global check across all
-            # prerequisites held by a task.
-            ##print >> sys.stderr, "WARNING, " + self.owner_id + ": duplicate prerequisite: " + message
-            return
-
-        self.messages[ label ] = message
-        self.labels[ message ] = label
-        self.satisfied[label] = False
-        self.satisfied_by[label] = None
-        m = re.match( self.__class__.CYCLE_POINT_RE, message )
-        if m:
-            self.target_point_strings.append( m.groups()[0] )
-
-    def remove( self, message ):
-        lbl = self.labels[message]
-        del self.labels[message]
-        del self.messages[lbl]
-        del self.satisfied[lbl]
-        del self.satisfied_by[lbl]
-        m = re.match( self.__class__.CYCLE_POINT_RE, message )
-        if m and m.groups()[0] in self.target_point_strings:
-            self.target_point_strings.remove( m.groups()[0] )
-
-    def all_satisfied( self ):
-        return not ( False in self.satisfied.values() )
-
-    def satisfy_me( self, outputs ):
-        # Can any completed outputs satisfy any of my prerequisites?
-        for label, message in self.messages.items():
-            if label in self.satisfied and message in outputs:
-                self.satisfied[ label ] = True
-                self.satisfied_by[ label ] = outputs[message] # owner_id
-
-    def get_satisfied_by( self ):
-        return self.satisfied_by
-
-    def count( self ):
-        # how many messages are stored
-        return len( self.satisfied.keys() )
-
-    def dump( self ):
-        # return an array of strings representing each message and its state
-        res = []
-        for key in self.satisfied:
-            res.append( [ self.messages[key], self.satisfied[ key ] ]  )
-        return res
-
-    def set_all_satisfied( self ):
-        for label in self.messages:
-            self.satisfied[ label ] = True
-
-    def set_all_unsatisfied( self ):
-        for label in self.messages:
-            self.satisfied[ label ] = False
-
-    def get_target_points( self ):
-        """Return a list of cycle points target by each prerequisite,
-        including each component of conditionals."""
-        return [ get_point(p) for p in self.target_point_strings ]
diff --git a/lib/cylc/prerequisites/prerequisites.py b/lib/cylc/prerequisites/prerequisites.py
deleted file mode 100644
index a132dbf..0000000
--- a/lib/cylc/prerequisites/prerequisites.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import re
-
-class prerequisites(object):
-    """A container for other prerequisite types."""
-
-    def __init__( self, start_point=None ):
-        self.container = []
-        self.start_point = start_point
-
-    def add_requisites( self, reqs ):
-        self.container.append( reqs )
-
-    def get_satisfied_list( self ):
-        satisfied = []
-        for reqs in self.container:
-            satisfied.append( reqs.get_satisfied() )
-        return satisfied
-
-    def eval_all( self ):
-        # used to test validity of conditional prerequisite expression.
-        # (all_satisfied() is not sufficient as it breaks out early).
-        for reqs in self.container:
-            reqs.all_satisfied()
-
-    def all_satisfied( self ):
-        result = True
-        for reqs in self.container:
-            if not reqs.all_satisfied():
-                result = False
-                break
-        return result
-
-    def satisfy_me( self, outputs ):
-        # Can any completed outputs satisfy any of my prerequisites?
-        for reqs in self.container:
-        ##    for label in reqs.satisfied:
-        ##        for msg in outputs:
-        ##            if reqs.messages[label] == msg:
-        ##                reqs.satisfied[ label ] = True
-        ##                reqs.satisfied_by[ label ] = outputs[msg]  # (owner_id)
-            reqs.satisfy_me( outputs )
-
-    def get_satisfied_by( self ):
-        satisfied_by = {}
-        for reqs in self.container:
-            for label in reqs.satisfied_by.keys():
-                satisfied_by[ label ] = reqs.satisfied_by[label]
-        return satisfied_by
-
-    def count( self ):
-        # how many messages are stored
-        count = 0
-        for reqs in self.container:
-            count += len( reqs.satisfied.keys() )
-        return count
-
-    def dump( self ):
-        # return an array of strings representing each message and its state
-        res = []
-        for reqs in self.container:
-            res += reqs.dump()
-        return res
-
-    def set_all_satisfied( self ):
-        for reqs in self.container:
-            for label in reqs.messages:
-                reqs.satisfied[ label ] = True
-
-    def set_all_unsatisfied( self ):
-        for reqs in self.container:
-            for label in reqs.messages:
-                reqs.satisfied[ label ] = False
-
-    def get_target_points( self ):
-        """Return a list of cycle points target by each prerequisite,
-        including each component of conditionals."""
-        points = []
-        for reqs in self.container:
-            points += reqs.get_target_points()
-        return points
diff --git a/lib/cylc/print_tree.py b/lib/cylc/print_tree.py
index c365e88..cc12a7c 100644
--- a/lib/cylc/print_tree.py
+++ b/lib/cylc/print_tree.py
@@ -33,12 +33,14 @@ u_vbar = u'\u2502'
 u_tee = u'\u251C' + u_hbar
 u_trm = u'\u2514' + u_hbar
 
-def print_tree( tree, padding, use_unicode=False, prefix='', labels=None, eq=False ):
+
+def print_tree(tree, padding, use_unicode=False, prefix='', labels=None,
+               eq=False):
     if use_unicode:
         vbar = u_vbar
         trm = u_trm
         tee = u_tee
-        tee_re= tee
+        tee_re = tee
     else:
         vbar = a_vbar
         trm = a_trm
@@ -55,15 +57,15 @@ def print_tree( tree, padding, use_unicode=False, prefix='', labels=None, eq=Fal
             pprefix = prefix + ' ' + tee
 
         pp = pprefix
-        pp = re.sub( '^ (' + trm + '|' + tee_re + ')', '', pp )
-        pp = re.sub( trm + ' ', '  ', pp )
-        pp = re.sub( tee_re + ' ', vbar + ' ', pp )
+        pp = re.sub('^ (' + trm + '|' + tee_re + ')', '', pp)
+        pp = re.sub(trm + ' ', '  ', pp)
+        pp = re.sub(tee_re + ' ', vbar + ' ', pp)
 
         result = pp + item
-        line = result + ' ' + padding[ len(result): ]
-        if isinstance( tree[item], dict ):
+        line = result + ' ' + padding[len(result):]
+        if isinstance(tree[item], dict):
             print line
-            print_tree( tree[item], padding, use_unicode, pprefix, labels, eq )
+            print_tree(tree[item], padding, use_unicode, pprefix, labels, eq)
         else:
             if labels:
                 if item in labels:
@@ -76,4 +78,4 @@ def print_tree( tree, padding, use_unicode=False, prefix='', labels=None, eq=Fal
                     joiner = '= '
                 else:
                     joiner = ''
-                print line + joiner + str( tree[item] )
+                print line + joiner + str(tree[item])
diff --git a/lib/cylc/prompt.py b/lib/cylc/prompt.py
index 9b31e5f..e843498 100644
--- a/lib/cylc/prompt.py
+++ b/lib/cylc/prompt.py
@@ -20,11 +20,33 @@ import sys
 
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 
-def prompt( reason, force=False ):
-    if force or GLOBAL_CFG.get( ['disable interactive command prompts'] ):
-        return
-    response = raw_input( reason + ' (y/n)? ' )
-    if response == 'y':
-        return
+
+def prompt(question, force=False, gui=False, no_force=False, no_abort=False):
+    """Interactive Yes/No prompt for cylc CLI scripts.
+
+    For convenience, on No we just exit rather than return.
+    If force is True don't prompt, just return immediately.
+
+    """
+    if (force or GLOBAL_CFG.get(['disable interactive command prompts'])) and (
+            not no_force):
+        return True
+    if gui:
+        import gtk
+        dialog = gtk.MessageDialog(
+            None, gtk.DIALOG_DESTROY_WITH_PARENT,
+            gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO,
+            question
+        )
+        gui_response = dialog.run()
+        response_no = (gui_response != gtk.RESPONSE_YES)
+    else:
+        cli_response = raw_input('%s (y/n)? ' % question)
+        response_no = (cli_response not in ['y', 'Y'])
+    if response_no:
+        if no_abort:
+            return False
+        else:
+            sys.exit(0)
     else:
-        sys.exit(0)
+        return True
diff --git a/lib/cylc/registration.py b/lib/cylc/registration.py
index 252c643..63f8f86 100644
--- a/lib/cylc/registration.py
+++ b/lib/cylc/registration.py
@@ -27,100 +27,102 @@ from cylc.owner import user
 
 """Simple suite name registration database."""
 
-regdb_path = os.path.join( os.environ['HOME'], '.cylc', 'REGDB' )
+regdb_path = os.path.join(os.environ['HOME'], '.cylc', 'REGDB')
 
-class RegistrationError( Exception ):
-    def __init__( self, msg ):
+
+class RegistrationError(Exception):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
+
 class localdb(object):
-    def __init__( self, file=None ):
-        dbpath = file # (back compat)
+    def __init__(self, file=None):
+        dbpath = file  # (back compat)
         global regdb_path
         self.dbpath = dbpath or regdb_path
         # create initial database directory if necessary
-        if not os.path.exists( self.dbpath ):
+        if not os.path.exists(self.dbpath):
             try:
-                os.makedirs( self.dbpath )
-            except Exception,x:
-                sys.exit( str(x) )
+                os.makedirs(self.dbpath)
+            except Exception, x:
+                sys.exit(str(x))
 
-    def list_all_suites( self ):
+    def list_all_suites(self):
         try:
-            suites = os.listdir( self.dbpath )
+            suites = os.listdir(self.dbpath)
         except Exception, x:
             sys.exit(str(x))
         return suites
 
-    def register( self, name, dir ):
+    def register(self, name, dir):
         name = RegPath(name).get()
         for suite in self.list_all_suites():
             if name == suite:
-                raise RegistrationError, "ERROR: " + name + " is already registered."
-            elif suite.startswith( name + RegPath.delimiter ):
-                raise RegistrationError, "ERROR: " + name + " is a registered group."
-            elif name.startswith( suite + RegPath.delimiter ):
+                raise RegistrationError(
+                    "ERROR: " + name + " is already registered.")
+            elif suite.startswith(name + RegPath.delimiter):
+                raise RegistrationError(
+                    "ERROR: " + name + " is a registered group.")
+            elif name.startswith(suite + RegPath.delimiter):
                 # suite starts with, to some level, an existing suite name
-                raise RegistrationError, "ERROR: " + suite + " is a registered suite."
-        dir = dir.rstrip( '/' )  # strip trailing '/'
-        dir = re.sub( '^\./', '', dir ) # strip leading './'
-        if not dir.startswith( '/' ):
+                raise RegistrationError(
+                    "ERROR: " + suite + " is a registered suite.")
+        dir = dir.rstrip('/')  # strip trailing '/'
+        dir = re.sub('^\./', '', dir)  # strip leading './'
+        if not dir.startswith('/'):
             # On AIX on GPFS os.path.abspath(dir) returns the path with
             # full 'fileset' prefix. Manual use of $PWD to absolutize a
             # relative path gives a cleaner result.
-            dir = os.path.join( os.environ['PWD'], dir )
-        try:
-            title = self.get_suite_title( name, path=dir )
-        except Exception, x:
-            print >> sys.stderr, 'WARNING: an error occurred parsing the suite definition:\n  ', x
-            print >> sys.stderr, "Registering the suite with temporary title 'SUITE PARSE ERROR'."
-            print >> sys.stderr, "You can update the title later with 'cylc db refresh'.\n"
-            title = "SUITE PARSE ERROR"
-
-        title = title.split('\n')[0] # use the first of multiple lines
+            dir = os.path.join(os.environ['PWD'], dir)
+        title = self.get_suite_title(name, path=dir)
+        title = title.split('\n')[0]  # use the first of multiple lines
         print 'REGISTER', name + ':', dir
-        with open( os.path.join( self.dbpath, name ), 'w' ) as file:
-            file.write( 'path=' + dir + '\n' )
-            file.write( 'title=' + title + '\n' )
+        with open(os.path.join(self.dbpath, name), 'w') as file:
+            file.write('path=' + dir + '\n')
+            file.write('title=' + title + '\n')
 
         # create a new passphrase for the suite if necessary
-        passphrase(name,user,get_hostname()).generate(dir)
+        passphrase(name, user, get_hostname()).generate(dir)
 
-    def get_suite_data( self, suite ):
+    def get_suite_data(self, suite):
         suite = RegPath(suite).get()
-        fpath = os.path.join( self.dbpath, suite )
-        if not os.path.isfile( fpath ):
-            raise RegistrationError, "ERROR: Suite not found " + suite
+        fpath = os.path.join(self.dbpath, suite)
+        if not os.path.isfile(fpath):
+            raise RegistrationError("ERROR: Suite not found " + suite)
         data = {}
-        with open( fpath, 'r' ) as file:
+        with open(fpath, 'r') as file:
             lines = file.readlines()
         count = 0
         for line in lines:
             count += 1
             line = line.rstrip()
             try:
-                key,val = line.split('=')
+                key, val = line.split('=')
             except ValueError:
-                print >> sys.stderr, 'ERROR: failed to parse line ' + str(count) + ' from ' + fpath + ':'
+                print >> sys.stderr, (
+                    'ERROR: failed to parse line ' + str(count) + ' from ' +
+                    fpath + ':')
                 print >> sys.stderr, '  ', line
                 continue
             data[key] = val
         if 'title' not in data or 'path' not in data:
-            raise RegistrationError, 'ERROR, ' + suite + ' suite registration corrupted?: ' + fpath
+            raise RegistrationError(
+                'ERROR, ' + suite + ' suite registration corrupted?: ' + fpath)
         return data
 
-    def get_suitedir( self, reg ):
-        data = self.get_suite_data( reg )
+    def get_suitedir(self, reg):
+        data = self.get_suite_data(reg)
         return data['path']
 
-    def get_suiterc( self, reg ):
-        data = self.get_suite_data( reg )
-        return os.path.join( data['path'], 'suite.rc' )
+    def get_suiterc(self, reg):
+        data = self.get_suite_data(reg)
+        return os.path.join(data['path'], 'suite.rc')
 
-    def get_list( self, regfilter=None ):
-        # Return a filtered list of registered suites
+    def get_list(self, regfilter=None):
+        # Return a filtered list of valid suite registrations.
         res = []
         for suite in self.list_all_suites():
             if regfilter:
@@ -128,88 +130,101 @@ class localdb(object):
                     if not re.search(regfilter, suite):
                         continue
                 except:
-                    raise RegistrationError, "ERROR, Invalid filter expression: " + regfilter
-            data = self.get_suite_data( suite )
-            dir, title = data['path'], data['title']
-            res.append( [suite, dir, title] )
+                    raise RegistrationError(
+                        "ERROR, Invalid filter expression: " + regfilter)
+            try:
+                data = self.get_suite_data(suite)
+            except RegistrationError as exc:
+                print >> sys.stderr, str(exc)
+            else:
+                dir, title = data['path'], data['title']
+                res.append([suite, dir, title])
         return res
 
-    def unregister( self, exp ):
+    def unregister(self, exp):
         suitedirs = []
         for key in self.list_all_suites():
-            if re.search( exp + '$', key ):
-                data = self.get_suite_data(key)
-                dir = data['path']
-                print 'UNREGISTER', key + ':', dir
-                os.unlink( os.path.join( self.dbpath, key ) )
-                for f in ['passphrase', 'suite.rc.processed']:
-                    try:
-                        os.unlink( os.path.join( dir, f ) )
-                    except OSError:
-                        pass
-                if dir not in suitedirs:
-                    # (could be multiple registrations of the same suite).
-                    suitedirs.append(dir)
+            if re.search(exp + '$', key):
+                try:
+                    data = self.get_suite_data(key)
+                except RegistrationError:
+                    pass
+                else:
+                    dir = data['path']
+                    for f in ['passphrase', 'suite.rc.processed']:
+                        try:
+                            os.unlink(os.path.join(dir, f))
+                        except OSError:
+                            pass
+                    if dir not in suitedirs:
+                        # (could be multiple registrations of the same suite).
+                        suitedirs.append(dir)
+                print 'UNREGISTER', key
+                os.unlink(os.path.join(self.dbpath, key))
         return suitedirs
 
-    def reregister( self, srce, targ ):
+    def reregister(self, srce, targ):
         targ = RegPath(targ).get()
         found = False
         for suite in self.list_all_suites():
             if suite == srce:
                 # single suite
                 newsuite = targ
-                data = self.get_suite_data( suite )
+                data = self.get_suite_data(suite)
                 dir, title = data['path'], data['title']
-                self.unregister( suite )
-                self.register( targ, data['path'] )
+                self.unregister(suite)
+                self.register(targ, data['path'])
                 found = True
-            elif suite.startswith( srce + RegPath.delimiter ):
+            elif suite.startswith(srce + RegPath.delimiter):
                 # group of suites
-                data = self.get_suite_data( suite )
+                data = self.get_suite_data(suite)
                 dir, title = data['path'], data['title']
-                newsuite = re.sub( '^' + srce, targ, suite )
-                self.unregister( suite )
-                self.register( newsuite, data['path'] )
+                newsuite = re.sub('^' + srce, targ, suite)
+                self.unregister(suite)
+                self.register(newsuite, data['path'])
                 found = True
         if not found:
-            raise RegistrationError, "ERROR, suite or group not found: " + srce
+            raise RegistrationError("ERROR, suite or group not found: " + srce)
 
-    def get_invalid( self ):
+    def get_invalid(self):
         invalid = []
         for reg in self.list_all_suites():
-            data = self.get_suite_data(reg)
-            dir = data['path']
-            rcfile = os.path.join( dir, 'suite.rc' )
-            if not os.path.isfile( rcfile ):
-                invalid.append( reg )
+            try:
+                data = self.get_suite_data(reg)
+            except RegistrationError:
+                invalid.append(reg)
+            else:
+                dir = data['path']
+                rcfile = os.path.join(dir, 'suite.rc')
+                if not os.path.isfile(rcfile):
+                    invalid.append(reg)
         return invalid
 
-    def get_suite_title( self, suite, path=None ):
+    def get_suite_title(self, suite, path=None):
         """Determine the (first line of) the suite title without a full
         file parse. Assumes the title is not in an include-file."""
 
         if not path:
-            data = self.get_suite_data( suite )
+            data = self.get_suite_data(suite)
             path = data['path']
-        suiterc = os.path.join( path, 'suite.rc' )
+        suiterc = os.path.join(path, 'suite.rc')
 
         title = "No title provided"
-        for line in open( suiterc, 'rb' ):
-            if re.search( '^\s*\[', line ):
+        for line in open(suiterc, 'rb'):
+            if re.search('^\s*\[', line):
                 # abort: title comes before first [section]
                 break
-            m = re.match( '^\s*title\s*=\s*(.*)\s*$', line )
+            m = re.match('^\s*title\s*=\s*(.*)\s*$', line)
             if m:
                 line = m.groups()[0]
                 title = line.strip('"\'')
 
         return title
 
-    def refresh_suite_title( self, suite ):
+    def refresh_suite_title(self, suite):
         data = self.get_suite_data(suite)
         dir, title = data['path'], data['title']
-        new_title = self.get_suite_title( suite )
+        new_title = self.get_suite_title(suite)
         if title == new_title:
             if flags.verbose:
                 print 'unchanged:', suite
@@ -219,20 +234,20 @@ class localdb(object):
             print '   old title:', title
             print '   new title:', new_title
             changed = True
-            self.unregister( suite )
-            self.register( suite, dir )
+            self.unregister(suite)
+            self.register(suite, dir)
         return changed
 
-    def get_rcfiles ( self, suite ):
+    def get_rcfiles(self, suite):
         # return a list of all include-files used by this suite
         # TODO - this needs to be made recursive
         rcfiles = []
         data = self.get_suite_data(suite)
         dir = data['path']
-        suiterc = os.path.join( dir, 'suite.rc' )
-        rcfiles.append( suiterc )
-        for line in open( suiterc, 'rb' ):
-            m = re.match( '^\s*%include\s+([\/\w\-\.]+)', line )
+        suiterc = os.path.join(dir, 'suite.rc')
+        rcfiles.append(suiterc)
+        for line in open(suiterc, 'rb'):
+            m = re.match('^\s*%include\s+([\/\w\-\.]+)', line)
             if m:
-                rcfiles.append(os.path.join( dir, m.groups()[0]))
+                rcfiles.append(os.path.join(dir, m.groups()[0]))
         return rcfiles
diff --git a/lib/cylc/regpath.py b/lib/cylc/regpath.py
index 7932f6f..f262c74 100644
--- a/lib/cylc/regpath.py
+++ b/lib/cylc/regpath.py
@@ -18,14 +18,17 @@
 
 import re
 
-class IllegalRegPathError( Exception ):
-    def __init__( self, suite, owner=None ):
+
+class IllegalRegPathError(Exception):
+    def __init__(self, suite, owner=None):
         self.msg = "ERROR, illegal suite name: " + suite
         if owner:
             self.msg += ' (' + owner + ')'
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
+
 class RegPath(object):
     # This class contains common code for checking suite registration
     # name correctness, and manipulating said names. It is currently
@@ -34,38 +37,39 @@ class RegPath(object):
     delimiter = '.'
     delimiter_re = '\.'
 
-    def __init__( self, rpath ):
+    def __init__(self, rpath):
         # Suite registration paths may contain [a-zA-Z0-9_.-]. They may
         # not contain colons, which would interfere with PATH variables.
-        if re.search( '[^\w.-]', rpath ):
-            raise IllegalRegPathError( rpath )
+        if re.search('[^\w.-]', rpath):
+            raise IllegalRegPathError(rpath)
         # If the path ends in delimiter it must be a group, otherwise it
         # may refer to a suite or a group. NOTE: this information is not
         # currently used.
-        if re.match( '.*' + self.__class__.delimiter_re + '$', rpath ):
+        if re.match('.*' + self.__class__.delimiter_re + '$', rpath):
             self.is_definitely_a_group = True
             rpath = rpath.strip(self.__class__.delimiter_re)
         else:
             self.is_definitely_a_group = False
         self.rpath = rpath
 
-    def get( self ):
+    def get(self):
         return self.rpath
 
-    def get_list( self ):
+    def get_list(self):
         return self.rpath.split(self.__class__.delimiter)
 
-    def get_fpath( self ):
-        return re.sub( self.__class__.delimiter_re, '/', self.rpath )
+    def get_fpath(self):
+        return re.sub(self.__class__.delimiter_re, '/', self.rpath)
 
-    def basename( self ):
+    def basename(self):
         # return baz from foo.bar.baz
         return self.rpath.split(self.__class__.delimiter)[-1]
 
-    def groupname( self ):
+    def groupname(self):
         # return foo.bar from foo.bar.baz
-        return self.__class__.delimiter.join( self.rpath.split(self.__class__.delimiter)[0:-1])
+        return self.__class__.delimiter.join(
+            self.rpath.split(self.__class__.delimiter)[0:-1])
 
-    def append( self, rpath2 ):
+    def append(self, rpath2):
         # join on another rpath
-        return RegPath( self.rpath + self.__class__.delimiter + rpath2.rpath )
+        return RegPath(self.rpath + self.__class__.delimiter + rpath2.rpath)
diff --git a/lib/cylc/regprompt.py b/lib/cylc/regprompt.py
index 5baa815..aa92e2f 100644
--- a/lib/cylc/regprompt.py
+++ b/lib/cylc/regprompt.py
@@ -16,7 +16,8 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-def prompt( question, default ):
+
+def prompt(question, default):
     def explain():
         print "Valid responses:"
         print "  [enter] - accept the default"
@@ -28,7 +29,8 @@ def prompt( question, default ):
     try_again = True
     while try_again:
         try_again = False
-        res = raw_input( question + " (default '" + default + "', else VALUE,q,s,?) " )
+        res = raw_input(
+            question + " (default '" + default + "', else VALUE,q,s,?) ")
         if res == '?':
             explain()
             try_again = True
diff --git a/lib/cylc/remote.py b/lib/cylc/remote.py
index 4472ac5..8bd29bc 100644
--- a/lib/cylc/remote.py
+++ b/lib/cylc/remote.py
@@ -25,10 +25,6 @@ import subprocess
 import sys
 from textwrap import TextWrapper
 
-from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.suite_host import is_remote_host
-from cylc.owner import is_remote_user
-from cylc.version import CYLC_VERSION
 import cylc.flags
 
 
@@ -68,10 +64,16 @@ class remrun(object):
             else:
                 self.args.append(arg)
 
-        self.is_remote = (
-            is_remote_user(self.owner) or is_remote_host(self.host))
+        if self.owner is None and self.host is None:
+            self.is_remote = False
+        else:
+            from cylc.suite_host import is_remote_host
+            from cylc.owner import is_remote_user
+            self.is_remote = (
+                is_remote_user(self.owner) or is_remote_host(self.host))
 
-    def execute(self, force_required=False, env=None, path=None, dry_run=False):
+    def execute(self, force_required=False, env=None, path=None,
+                dry_run=False):
         """Execute command on remote host.
 
         Returns False if remote re-invocation is not needed, True if it is
@@ -81,6 +83,9 @@ class remrun(object):
         if not self.is_remote:
             return False
 
+        from cylc.cfgspec.globalcfg import GLOBAL_CFG
+        from cylc.version import CYLC_VERSION
+
         name = os.path.basename(self.argv[0])[5:]  # /path/to/cylc-foo => foo
 
         user_at_host = ''
@@ -105,7 +110,7 @@ class remrun(object):
                 "use login shell", self.host, self.owner)
 
         # Pass cylc version through.
-        command += ["CYLC_VERSION=%s" % CYLC_VERSION]
+        command += ["env", "CYLC_VERSION=%s" % CYLC_VERSION]
 
         if ssh_login_shell:
             # A login shell will always source /etc/profile and the user's bash
diff --git a/lib/cylc/rolling_archive.py b/lib/cylc/rolling_archive.py
index ce0b0c7..75f01d2 100644
--- a/lib/cylc/rolling_archive.py
+++ b/lib/cylc/rolling_archive.py
@@ -21,39 +21,40 @@
 
 import os
 
+
 class rolling_archive(object):
 
-    def __init__( self, filename, archive_length=10, sep='-' ):
+    def __init__(self, filename, archive_length=10, sep='-'):
         self.sep = sep
         self.base_filename = filename
         self.archive_length = archive_length
 
-    def __filename( self, index ):
-        return self.base_filename + self.sep + str( index )
+    def __filename(self, index):
+        return self.base_filename + self.sep + str(index)
 
-    def roll( self ):
+    def roll(self):
         # roll the archive
 
-        if os.path.exists( self.__filename( self.archive_length )):
-            os.unlink( self.__filename( self.archive_length ))
+        if os.path.exists(self.__filename(self.archive_length)):
+            os.unlink(self.__filename(self.archive_length))
 
-        for i in reversed( range( 1, self.archive_length )):
-            if os.path.exists( self.__filename( i )):
+        for i in reversed(range(1, self.archive_length)):
+            if os.path.exists(self.__filename(i)):
                 try:
-                    os.rename( self.__filename(i), self.__filename(i+1) )
+                    os.rename(self.__filename(i), self.__filename(i + 1))
                 except OSError:
                     raise
 
-        if os.path.exists( self.base_filename):
-            os.rename( self.base_filename, self.__filename(1) )
+        if os.path.exists(self.base_filename):
+            os.rename(self.base_filename, self.__filename(1))
 
-        self.file_handle = open( self.base_filename, 'w' )
+        self.file_handle = open(self.base_filename, 'w')
         return self.file_handle
 
-if __name__ == '__main__':
 
-    munge = rolling_archive( 'munge', 5 )
-    for i in range(1,20):
+if __name__ == '__main__':
+    munge = rolling_archive('munge', 5)
+    for i in range(1, 20):
         FILE = munge.roll_open()
-        FILE.write( "This is munge " + str( i ) + "\n" )
+        FILE.write("This is munge " + str(i) + "\n")
         FILE.close()
diff --git a/lib/cylc/run.py b/lib/cylc/run.py
index 688a62c..5d7f47a 100644
--- a/lib/cylc/run.py
+++ b/lib/cylc/run.py
@@ -18,6 +18,7 @@
 
 """Provide the main function for "cylc run" and "cylc restart"."""
 
+import re
 import sys
 from daemonize import daemonize
 from version import CYLC_VERSION
@@ -27,36 +28,43 @@ from exceptions import SchedulerStop, SchedulerError
 
 
 def print_blurb():
-    lines = []
-    lines.append(" The Cylc Suite Engine [" + CYLC_VERSION + "] ")
-    lines.append(" Copyright (C) 2008-2015 NIWA ")
-
-    lic = """
- This program comes with ABSOLUTELY NO WARRANTY.  It is free software;
- you are welcome to redistribute it under certain conditions. Details:
-  `cylc license conditions'; `cylc license warranty' """
-    lines += lic.split('\n')
-
-    mx = 0
-    for line in lines:
-        if len(line) > mx:
-            mx = len(line)
-
-    print '*' * (mx + 2)
-    for line in lines:
-        print '*' + line.center(mx) + '*'
-    print '*' * (mx + 2)
+    logo = (
+        "            ,_,       \n"
+        "            | |       \n"
+        ",_____,_, ,_| |_____, \n"
+        "| ,___| | | | | ,___| \n"
+        "| |___| |_| | | |___, \n"
+        "\_____\___, |_\_____| \n"
+        "      ,___| |         \n"
+        "      \_____|         \n"
+    )
+    license = """
+The Cylc Suite Engine [%s]
+Copyright (C) 2008-2015 NIWA
+_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+This program comes with ABSOLUTELY NO WARRANTY;
+see `cylc warranty`.  It is free software, you
+are welcome to redistribute it under certain
+conditions; see `cylc conditions`.
+
+  """ % CYLC_VERSION
+
+    logo_lines = logo.splitlines()
+    license_lines = license.splitlines()
+    lmax = max(len(line) for line in license_lines)
+    for i in range(len(logo_lines)):
+        print logo_lines[i], ('{0: ^%s}' % lmax).format(license_lines[i])
+    print
 
 
 def main(name, start):
-
     # Parse the command line:
     server = start()
 
     # Print copyright and license information
     print_blurb()
 
-    # Create run directory tree and get port file.
+    # Create run directory tree and get port.
     try:
         GLOBAL_CFG.create_cylc_run_tree(server.suite)
         server.configure_pyro()
@@ -75,7 +83,7 @@ def main(name, start):
         server.run()
         # For profiling (see Python docs for how to display the stats).
         # import cProfile
-        # cProfile.run('server.run()', 'fooprof')
+        # cProfile.runctx('server.run()', globals(), locals(), 'stats')
     except SchedulerStop, x:
         # deliberate stop
         print str(x)
diff --git a/lib/cylc/rundb.py b/lib/cylc/rundb.py
index c818172..11e0f16 100644
--- a/lib/cylc/rundb.py
+++ b/lib/cylc/rundb.py
@@ -15,404 +15,489 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""Provide data access object for the suite runtime database."""
 
-from datetime import datetime
-import errno
-from time import sleep
-import os
-import Queue
-import shutil
+from logging import getLogger, WARNING
 import sqlite3
-import stat
 import sys
-from threading import Thread
-from mkdir_p import mkdir_p
-from cylc.wallclock import get_current_time_string
-import cPickle as pickle
-
-
-class UpdateObject(object):
-    """UpdateObject for using in tasks"""
-    def __init__(self, table, name, cycle, **kwargs):
-        """Update a row in a table."""
-        kwargs["time_updated"] = get_current_time_string()
-        s_fmt = "UPDATE %(table)s SET %(cols)s WHERE name==? AND cycle==?"
-        cols = ""
-        args = []
-        not_first = False
-        for k, v in kwargs.items():
-            if not_first:
-                cols += ", "
-            not_first = True
-            cols += k + "=?"
-            args.append(v)
-        args.append(name)
-        args.append(cycle)
-        self.s_fmt = s_fmt % {"table": table, "cols": cols}
-        self.args = args
-
-
-class RecordBroadcastObject(object):
-    """RecordBroadcastObject for using in broadcast settings dumps"""
-    def __init__(self, time_string, dump_string):
-        """Records a dumped string in the broadcast table"""
-        self.s_fmt = "INSERT INTO broadcast_settings VALUES(?, ?)"
-        self.args = [time_string, dump_string]
-
-
-class RecordEventObject(object):
-    """RecordEventObject for using in tasks"""
-    def __init__(self, name, cycle, submit_num, event=None, message=None, misc=None):
-        """Records an event in the table"""
-        self.s_fmt = "INSERT INTO task_events VALUES(?, ?, ?, ?, ?, ?, ?)"
-        self.args = [name, cycle, get_current_time_string(),
-                     submit_num, event, message, misc]
-
-
-class RecordStateObject(object):
-    """RecordStateObject for using in tasks"""
-    def __init__(self, name, cycle, time_created_string=None,
-                 time_updated_string=None, submit_num=None,
-                 is_manual_submit=None, try_num=None, host=None,
-                 submit_method=None, submit_method_id=None, status=None):
-        """Insert a new row into the states table"""
-        if time_created_string is None:
-            time_created_string = get_current_time_string()
-        self.s_fmt = "INSERT INTO task_states VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
-        self.args = [name, cycle, time_created_string, time_updated_string,
-                     submit_num, is_manual_submit, try_num, host,
-                     submit_method, submit_method_id, status]
-
-
-class BulkDBOperObject(object):
-    """BulkDBOperObject for grouping together related operations"""
-    def __init__(self, base_object):
-        self.s_fmt = base_object.s_fmt
-        self.args = []
-        self.args.append(base_object.args)
-    def add_oper(self, db_object):
-        if db_object.s_fmt != self.s_fmt:
-            raise Exception( "ERROR: cannot combine different types of database operation" )
-        self.args.append(db_object.args)
-
-
-class ThreadedCursor(Thread):
-    def __init__(self, db, dump, restart=False):
-        super(ThreadedCursor, self).__init__()
-        self.max_commit_attempts = 5
-        self.db=db
-        self.db_dump_name = dump
-        self.reqs=Queue.Queue()
-        self.db_dump_msg = ("[INFO] Dumping database queue (%s items) to: %s")
-        self.db_dump_load = ("[INFO] Loading dumped database queue (%s items) from: %s")
-        self.integrity_msg = ("Database Integrity Error: %s:\n"+
-                              "\tConverting INSERT to INSERT OR REPLACE for:\n"+
-                              "\trequest: %s\n\targs: %s")
-        self.generic_err_msg = ("%s:%s occurred while trying to run:\n"+
-                              "\trequest: %s\n\targs: %s")
-        self.db_not_found_err = ("Database Not Found Error:\n"+
-                                 "\tNo database found at %s")
-        self.retry_warning = ("[WARNING] retrying database operation on %s - retry %s \n"+
-                              "\trequest: %s\n\targs: %s")
-        if restart:
-            self.load_queue()
-        self.start()
-        self.exception = None
-
-
-    def run(self):
-        cnx = sqlite3.connect(self.db, timeout=10.0)
-        cursor = cnx.cursor()
-        counter = 1
-        while True:
-            if (counter % 10) == 0 or self.reqs.qsize() == 0:
-                counter = 0
-                attempt = 0
-                while attempt < self.max_commit_attempts:
-                    try:
-                        cnx.commit()
-                        break
-                    except Exception as e:
-                        attempt += 1
-                        if attempt >= self.max_commit_attempts:
-                            self.exception = e
-                            raise e
-                        sleep(1)
-            attempt = 0
-            req, arg, res, bulk = self.reqs.get()
-            self.lastreq = req
-            self.lastarg = arg
-            self.lastbulk = bulk
-            if req=='--close--': break
-            while attempt < self.max_commit_attempts:
-                try:
-                    if bulk:
-                        cursor.executemany(req, arg)
-                    else:
-                        cursor.execute(req, arg)
-                    if res:
-                        for rec in cursor:
-                            res.put(rec)
-                        res.put('--no more--')
-                    cnx.commit()
-                    break
-                except sqlite3.IntegrityError as e:
-                    # Capture integrity errors, refactor request and report to stderr
-                    attempt += 1
-                    if req.startswith("INSERT INTO"):
-                        print >> sys.stderr, self.integrity_msg%(str(e),req,arg)
-                        req = req.replace("INSERT INTO", "INSERT OR REPLACE INTO", 1)
-                    if attempt >= self.max_commit_attempts:
-                        self.exception = e
-                        # dump database queue - should only be readable by suite owner
-                        self.dump_queue()
-                        raise Exception(self.generic_err_msg%(type(e),str(e),req,arg))
-                    print >> sys.stderr, self.retry_warning%(self.db, str(attempt), req, arg)
-                    sleep(1)
-                except Exception as e:
-                    # Capture all other integrity errors and raise more helpful message
-                    attempt += 1
-                    if attempt >= self.max_commit_attempts:
-                        self.exception = e
-                        # dump database queue - should only be readable by suite owner
-                        self.dump_queue()
-                        raise Exception(self.generic_err_msg%(type(e),str(e),req,arg))
-                    print >> sys.stderr, self.retry_warning%(self.db, str(attempt), req, arg)
-                    sleep(1)
-            counter += 1
-        cnx.commit()
-        cnx.close()
-
-    def execute(self, req, arg=None, res=None, bulk=False):
-        self.reqs.put((req, arg or tuple(), res, bulk))
-
-    def select(self, req, arg=None):
-        res=Queue.Queue()
-        self.execute(req, arg, res)
-        while True:
-            rec=res.get()
-            if rec=='--no more--': break
-            yield rec
+import traceback
+import cylc.flags
+
+
+class CylcSuiteDAOTableColumn(object):
+    """Represent a column in a table."""
+
+    def __init__(self, name, datatype, is_primary_key):
+        self.name = name
+        self.datatype = datatype
+        self.is_primary_key = is_primary_key
+
+
+class CylcSuiteDAOTable(object):
+    """Represent a table in the suite runtime database."""
+
+    FMT_CREATE = "CREATE TABLE %(name)s(%(columns_str)s%(primary_keys_str)s)"
+    FMT_DELETE = "DELETE FROM %(name)s%(where_str)s"
+    FMT_INSERT = "INSERT OR REPLACE INTO %(name)s VALUES(%(values_str)s)"
+    FMT_UPDATE = "UPDATE %(name)s SET %(set_str)s%(where_str)s"
+
+    def __init__(self, name, column_items):
+        self.name = name
+        self.columns = []
+        for column_item in column_items:
+            name = column_item[0]
+            attrs = {}
+            if len(column_item) > 1:
+                attrs = column_item[1]
+            self.columns.append(CylcSuiteDAOTableColumn(
+                name,
+                attrs.get("datatype", "TEXT"),
+                attrs.get("is_primary_key", False)))
+        self.delete_queues = {}
+        self.insert_queue = []
+        self.update_queues = {}
+
+    def get_create_stmt(self):
+        """Return an SQL statement to create this table."""
+        column_str_list = []
+        primary_keys = []
+        for column in self.columns:
+            column_str_list.append(column.name + " " + column.datatype)
+            if column.is_primary_key:
+                primary_keys.append(column.name)
+        primary_keys_str = ""
+        if primary_keys:
+            primary_keys_str = ", PRIMARY KEY(" + ", ".join(primary_keys) + ")"
+        return self.FMT_CREATE % {
+            "name": self.name,
+            "columns_str": ", ".join(column_str_list),
+            "primary_keys_str": primary_keys_str}
+
+    def get_insert_stmt(self):
+        """Return an SQL statement to insert a row to this table."""
+        return self.FMT_INSERT % {
+            "name": self.name,
+            "values_str": ", ".join("?" * len(self.columns))}
+
+    def add_delete_item(self, where_args):
+        """Queue a DELETE item.
+
+        where_args should be a dict, delete will only apply to rows matching
+        all these items.
+
+        """
+        stmt_args = []
+        where_str = ""
+        if where_args:
+            where_strs = []
+            for column in self.columns:
+                if column.name in where_args:
+                    where_strs.append(column.name + "==?")
+                    stmt_args.append(where_args[column.name])
+            if where_strs:
+                where_str = " WHERE " + " AND ".join(where_strs)
+        stmt = self.FMT_DELETE % {"name": self.name, "where_str": where_str}
+        if stmt not in self.delete_queues:
+            self.delete_queues[stmt] = []
+        self.delete_queues[stmt].append(stmt_args)
+
+    def add_insert_item(self, args):
+        """Queue an INSERT args.
+
+        If args is a list, its length will be adjusted to be the same as the
+        number of columns. If args is a dict, will return a list with the same
+        length as the number of columns, the elements of which are determined
+        by matching the column names with the keys in the dict.
+
+        Empty elements are padded with None.
+
+        """
+        if isinstance(args, list):
+            if len(args) == len(self.columns):
+                stmt_args = list(args)
+            elif len(args) < len(self.columns):
+                stmt_args = args + [None] * (len(self.columns) - len(args))
+            else:  # len(args) > len(self.columns)
+                stmt_args = args[0:len(self.columns)]
+        else:
+            stmt_args = [
+                args.get(column.name, None) for column in self.columns]
+        self.insert_queue.append(stmt_args)
+
+    def add_update_item(self, set_args, where_args):
+        """Queue an UPDATE item.
+
+        set_args should be a dict, with colum keys and values to be set.
+        where_args should be a dict, update will only apply to rows matching
+        all these items.
+
+        """
+        set_strs = []
+        stmt_args = []
+        for column in self.columns:
+            if column.name in set_args:
+                set_strs.append(column.name + "=?")
+                stmt_args.append(set_args[column.name])
+        set_str = ", ".join(set_strs)
+        where_str = ""
+        if where_args:
+            where_strs = []
+            for column in self.columns:
+                if column.name in where_args:
+                    where_strs.append(column.name + "==?")
+                    stmt_args.append(where_args[column.name])
+            if where_strs:
+                where_str = " WHERE " + " AND ".join(where_strs)
+        stmt = self.FMT_UPDATE % {
+            "name": self.name,
+            "set_str": set_str,
+            "where_str": where_str}
+        if stmt not in self.update_queues:
+            self.update_queues[stmt] = []
+        self.update_queues[stmt].append(stmt_args)
+
+
+class CylcSuiteDAO(object):
+    """Data access object for the suite runtime database."""
+
+    CONN_TIMEOUT = 0.2
+    DB_FILE_BASE_NAME = "cylc-suite.db"
+    MAX_TRIES = 100
+    TABLE_BROADCAST_EVENTS = "broadcast_events"
+    TABLE_BROADCAST_STATES = "broadcast_states"
+    TABLE_TASK_JOBS = "task_jobs"
+    TABLE_TASK_JOB_LOGS = "task_job_logs"
+    TABLE_TASK_EVENTS = "task_events"
+    TABLE_TASK_STATES = "task_states"
+
+    TABLES_ATTRS = {
+        TABLE_BROADCAST_EVENTS: [
+            ["time"],
+            ["change"],
+            ["point"],
+            ["namespace"],
+            ["key"],
+            ["value"],
+        ],
+        TABLE_BROADCAST_STATES: [
+            ["point", {"is_primary_key": True}],
+            ["namespace", {"is_primary_key": True}],
+            ["key", {"is_primary_key": True}],
+            ["value"],
+        ],
+        TABLE_TASK_JOBS: [
+            ["cycle", {"is_primary_key": True}],
+            ["name", {"is_primary_key": True}],
+            ["submit_num", {"datatype": "INTEGER", "is_primary_key": True}],
+            ["is_manual_submit", {"datatype": "INTEGER"}],
+            ["try_num", {"datatype": "INTEGER"}],
+            ["time_submit"],
+            ["time_submit_exit"],
+            ["submit_status", {"datatype": "INTEGER"}],
+            ["time_run"],
+            ["time_run_exit"],
+            ["run_signal"],
+            ["run_status", {"datatype": "INTEGER"}],
+            ["user_at_host"],
+            ["batch_sys_name"],
+            ["batch_sys_job_id"],
+        ],
+        TABLE_TASK_JOB_LOGS: [
+            ["cycle", {"is_primary_key": True}],
+            ["name", {"is_primary_key": True}],
+            ["submit_num", {"datatype": "INTEGER", "is_primary_key": True}],
+            ["filename", {"is_primary_key": True}],
+            ["location"],
+            ["mtime"],
+            ["size", {"datatype": "INTEGER"}],
+        ],
+        TABLE_TASK_EVENTS: [
+            ["name"],
+            ["cycle"],
+            ["time"],
+            ["submit_num", {"datatype": "INTEGER"}],
+            ["event"],
+            ["message"],
+            ["misc"],
+        ],
+        TABLE_TASK_STATES: [
+            ["name", {"is_primary_key": True}],
+            ["cycle", {"is_primary_key": True}],
+            ["time_created"],
+            ["time_updated"],
+            ["submit_num", {"datatype": "INTEGER"}],
+            ["is_manual_submit", {"datatype": "INTEGER"}],
+            ["try_num", {"datatype": "INTEGER"}],
+            ["host"],
+            ["submit_method"],
+            ["submit_method_id"],
+            ["status"],
+        ],
+    }
+
+    def __init__(self, db_file_name=None, is_public=False):
+        """Initialise object.
+
+        db_file_name - Path to the database file
+        is_public - If True, allow retries, etc
+
+        """
+        self.db_file_name = db_file_name
+        self.is_public = is_public
+        self.conn = None
+        self.n_tries = 0
+
+        self.tables = {}
+        for name, attrs in sorted(self.TABLES_ATTRS.items()):
+            self.tables[name] = CylcSuiteDAOTable(name, attrs)
+
+        if not self.is_public:
+            self.create_tables()
+
+    def add_delete_item(self, table_name, where_args=None):
+        """Queue a DELETE item for a given table.
+
+        where_args should be a dict, update will only apply to rows matching
+        all these items.
+
+        """
+        self.tables[table_name].add_delete_item(where_args)
+
+    def add_insert_item(self, table_name, args):
+        """Queue an INSERT args for a given table.
+
+        If args is a list, its length will be adjusted to be the same as the
+        number of columns. If args is a dict, will return a list with the same
+        length as the number of columns, the elements of which are determined
+        by matching the column names with the keys in the dict.
+
+        Empty elements are padded with None.
+
+        """
+        self.tables[table_name].add_insert_item(args)
+
+    def add_update_item(self, table_name, set_args, where_args=None):
+        """Queue an UPDATE item for a given table.
+
+        set_args should be a dict, with colum keys and values to be set.
+        where_args should be a dict, update will only apply to rows matching
+        all these items.
+
+        """
+        self.tables[table_name].add_update_item(set_args, where_args)
 
     def close(self):
-        self.execute('--close--')
-
-    def dump_queue(self):
-        """Dump out queued database operations"""
-        queue_dump = {}
-        if not self.lastreq.startswith("SELECT"):
-            queue_dump[0] = {}
-            queue_dump[0]['req'] = self.lastreq
-            queue_dump[0]['args'] = self.lastarg
-            queue_dump[0]['is_bulk'] = self.lastbulk
-
-        i = 1
-        while True:
+        """Explicitly close the connection."""
+        if self.conn is not None:
             try:
-                req, arg, res, bulk = self.reqs.get_nowait()
-            except Queue.Empty:
-                break
-            # Ignore queries and database close messages
-            if not res and not req == "--close--":
-                queue_dump[i] = {}
-                queue_dump[i]['req'] = req
-                queue_dump[i]['args'] = arg
-                queue_dump[i]['is_bulk'] = bulk
-                i += 1
-
-        print >> sys.stderr, self.db_dump_msg%(len(queue_dump.keys()), str(self.db_dump_name))
-        pickle.dump(queue_dump, open(self.db_dump_name, "wb"))
-
-        # Protect the file
-        os.chmod(self.db_dump_name, stat.S_IRUSR | stat.S_IWUSR)
-        return
-
-    def load_queue(self):
-        """Reload queue from a dump"""
-        if os.path.exists(self.db_dump_name):
-            dumped_queue = pickle.load( open( self.db_dump_name, "rb" ) )
-            print >> sys.stdout, self.db_dump_load%(len(dumped_queue.keys()), str(self.db_dump_name))
-            for item in dumped_queue.keys():
-                self.execute(dumped_queue[item]['req'],
-                             dumped_queue[item]['args'],
-                             bulk=dumped_queue[item]['is_bulk'])
-            os.remove(self.db_dump_name)
-        return
-
-
-class CylcRuntimeDAO(object):
-    """Access object for a Cylc suite runtime database."""
+                self.conn.close()
+            except sqlite3.Error as exc:
+                pass
+            self.conn = None
 
-    DB_FILE_BASE_NAME = "cylc-suite.db"
-    DB_DUMP_BASE_NAME = "cylc_db_dump.p"
-    TASK_EVENTS = "task_events"
-    TASK_STATES = "task_states"
-    BROADCAST_SETTINGS = "broadcast_settings"
-    TABLES = {
-            TASK_EVENTS: [                      # each task event gets a row
-                    "name TEXT",
-                    "cycle TEXT",               # current cycle point of the task
-                    "time INTEGER",             # actual time
-                    "submit_num INTEGER",
-                    "event TEXT",
-                    "message TEXT",
-                    "misc TEXT"],               # e.g. record the user at host associated with this event
-            TASK_STATES: [                      # each task gets a status entry that is updated
-                    "name TEXT",
-                    "cycle TEXT",
-                    "time_created TEXT",        # actual serverside time
-                    "time_updated TEXT",        # actual serverside time
-                    "submit_num INTEGER",       # included in key to track status of different submissions for a task
-                    "is_manual_submit INTEGER", # boolean - user related or auto?
-                    "try_num INTEGER",          # auto-resubmit generates this
-                    "host TEXT",                # host for the task
-                    "submit_method TEXT",       # to be taken from loadleveller id/process - empty at the moment
-                    "submit_method_id TEXT",    # empty at the moment
-                    "status TEXT",
-                    # TODO: "rc TEXT",
-                    # TODO: "auth_key TEXT",
-                    ],
-            BROADCAST_SETTINGS: [
-                    "timestamp TEXT",
-                    "broadcast TEXT"
-                    ]}
-    PRIMARY_KEY_OF = {TASK_EVENTS: None,
-                      TASK_STATES: "name, cycle",
-                      BROADCAST_SETTINGS: None}
-
-
-    def __init__(self, suite_dir=None, new_mode=False, primary_db=True):
-        if suite_dir is None:
-            suite_dir = os.getcwd()
-        if primary_db:
-            prefix = os.path.join(suite_dir, 'state')
+    def connect(self):
+        """Connect to the database."""
+        if self.conn is None:
+            self.conn = sqlite3.connect(self.db_file_name, self.CONN_TIMEOUT)
+        return self.conn
+
+    def create_tables(self):
+        """Create tables."""
+        names = []
+        for row in self.connect().execute(
+                "SELECT name FROM sqlite_master WHERE type==? ORDER BY name",
+                ["table"]):
+            names.append(row[0])
+        for name, table in self.tables.items():
+            if name not in names:
+                self.conn.execute(table.get_create_stmt())
+                self.conn.commit()
+
+    def execute_queued_items(self):
+        """Execute queued items for each table."""
+        will_retry = False
+        for table in self.tables.values():
+            # DELETE statements may have varying number of WHERE args
+            # so we can only executemany for each identical template statement.
+            for stmt, stmt_args_list in table.delete_queues.items():
+                self.connect()
+                if self._execute_stmt(table, stmt, stmt_args_list):
+                    table.delete_queues.pop(stmt)
+                else:
+                    will_retry = True
+            # INSERT statements are uniform for each table, so all INSERT
+            # statements can be executed using a single "executemany" call.
+            if table.insert_queue:
+                self.connect()
+                if self._execute_stmt(
+                        table, table.get_insert_stmt(), table.insert_queue):
+                    table.insert_queue = []
+                else:
+                    will_retry = True
+            # UPDATE statements can have varying number of SET and WHERE args
+            # so we can only executemany for each identical template statement.
+            for stmt, stmt_args_list in table.update_queues.items():
+                self.connect()
+                if self._execute_stmt(table, stmt, stmt_args_list):
+                    table.update_queues.pop(stmt)
+                else:
+                    will_retry = True
+        if self.conn is not None:
+            try:
+                self.conn.commit()
+            except sqlite3.Error:
+                if not self.is_public:
+                    raise
+                self.conn.rollback()
+                if cylc.flags.debug:
+                    traceback.print_exc()
+                    sys.stderr.write(
+                        "WARNING: %s: db commit failed\n" % self.db_file_name)
+                will_retry = True
+
+        if will_retry:
+            self.n_tries += 1
+            logger = getLogger("main")
+            logger.log(
+                WARNING,
+                "%(file)s: write attempt (%(attempt)d) did not complete\n" % {
+                    "file": self.db_file_name, "attempt": self.n_tries})
         else:
-            prefix = suite_dir
-
-        self.db_file_name = os.path.join(prefix, self.DB_FILE_BASE_NAME)
-        self.db_dump_name = os.path.join(prefix, self.DB_DUMP_BASE_NAME)
-        # create the host directory if necessary
+            if self.n_tries:
+                logger = getLogger("main")
+                logger.log(
+                    WARNING,
+                    "%(file)s: recovered after (%(attempt)d) attempt(s)\n" % {
+                        "file": self.db_file_name, "attempt": self.n_tries})
+            self.n_tries = 0
+
+        # N.B. This is not strictly necessary. However, if the suite run
+        # directory is removed, a forced reconnection to the private database
+        # will ensure that the suite dies.
+        self.close()
+
+    def _execute_stmt(self, table, stmt, stmt_args_list):
+        """Helper for "self.execute_queued_items".
+
+        Execute a statement. If this is the public database, return True on
+        success and False on failure. If this is the private database, return
+        True on success, and raise on failure.
+        """
         try:
-            mkdir_p( suite_dir )
-        except Exception, x:
-            raise Exception( "ERROR: " + str(x) )
-
-        if new_mode:
-            if os.path.isdir(self.db_file_name):
-                shutil.rmtree(self.db_file_name)
-            else:
-                try:
-                    os.unlink(self.db_file_name)
-                except:
-                    pass
-        if not os.path.exists(self.db_file_name):
-            new_mode = True
-        if new_mode:
-            self.create()
-            # Restrict the primary database to user access only
-            if primary_db:
-                os.chmod(self.db_file_name, stat.S_IRUSR | stat.S_IWUSR)
-            # Clear out old db operations dump
-            if os.path.exists(self.db_dump_name):
-                os.remove(self.db_dump_name)
+            self.conn.executemany(stmt, stmt_args_list)
+        except sqlite3.Error:
+            if not self.is_public:
+                raise
+            if cylc.flags.debug:
+                traceback.print_exc()
+                sys.stderr.write(
+                    "WARNING: %(file)s: %(table)s: %(stmt)s\n" % {
+                        "file": self.db_file_name,
+                        "table": table.name,
+                        "stmt": stmt})
+                for stmt_args in stmt_args_list:
+                    sys.stderr.write("\t%(stmt_args)s\n" % {
+                        "stmt_args": stmt_args})
+            return False
         else:
-            self.lock_check()
-
-        self.c = ThreadedCursor(self.db_file_name, self.db_dump_name, not new_mode)
-
-    def close(self):
-        self.c.close()
-
-    def connect(self):
-        self.conn = sqlite3.connect(self.db_file_name)
-        return self.conn.cursor()
-
-    def create(self):
-        """Create the database tables."""
-        c = self.connect()
-        for table, cols in self.TABLES.items():
-            s = "CREATE TABLE " + table + "("
-            not_first = False
-            for col in cols:
-                if not_first:
-                    s += ", "
-                not_first = True
-                s += col
-            if self.PRIMARY_KEY_OF[table]:
-                s += ", PRIMARY KEY(" + self.PRIMARY_KEY_OF[table] + ")"
-            s += ")"
-            res = c.execute(s)
-        return
-
-    def lock_check(self):
-        """Try to create a dummy table"""
-        c = self.connect()
-        c.execute("CREATE TABLE lock_check (entry TEXT)")
-        c.execute("DROP TABLE lock_check")
-
-    def get_task_submit_num(self, name, cycle):
-        s_fmt = ("SELECT COUNT(*) FROM task_events" +
-                 " WHERE name==? AND cycle==? AND event==?")
-        args = [name, str(cycle), "incrementing submit number"]
-        count = 0
-        for row in self.c.select(s_fmt, args):
-            count = row[0]  # submission numbers should start at 0
-            break
-        return count + 1
-
-    def get_task_current_submit_num(self, name, cycle):
-        s_fmt = ("SELECT COUNT(*) FROM task_events" +
-                 " WHERE name==? AND cycle==? AND event==?")
-        args = [name, str(cycle), "incrementing submit number"]
-        for row in self.c.select(s_fmt, args):
-            return row[0]
-
-    def get_task_state_exists(self, name, cycle):
-        s_fmt = "SELECT COUNT(*) FROM task_states WHERE name==? AND cycle==?"
-        for row in self.c.select(s_fmt, [name, str(cycle)]):
-            return row[0] > 0
-        return False
-
-    def get_task_host(self, name, cycle):
-        """Return the host name for task "name" at a given cycle."""
-        s_fmt = r"SELECT host FROM task_states WHERE name==? AND cycle==?"
-        for row in self.c.select(s_fmt, [name, str(cycle)]):
-            return row[0]
-
-    def get_task_location(self, name, cycle):
-        s_fmt = """SELECT misc FROM task_events WHERE name==? AND cycle==?
-                   AND event=="submission succeeded" AND misc!=""
-                   ORDER BY submit_num DESC LIMIT 1"""
-        for row in self.c.select(s_fmt, [name, str(cycle)]):
-            return row
-
-    def get_task_submit_method_id_and_try(self, name, cycle):
-        s_fmt = """SELECT submit_method_id, try_num FROM task_states WHERE name==? AND cycle==?
-                   ORDER BY submit_num DESC LIMIT 1"""
-        for row in self.c.select(s_fmt, [name, str(cycle)]):
-            return row
-
-    def run_db_op(self, db_oper):
-        if not os.path.exists(self.db_file_name):
-            raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), self.db_file_name)
-        if isinstance(db_oper, BulkDBOperObject):
-            self.c.execute(db_oper.s_fmt, db_oper.args, bulk=True)
+            return True
+
+    def select_task_job(self, keys, cycle, name, submit_num=None):
+        """Select items from task_jobs by (cycle, name, submit_num).
+
+        Return a dict for mapping keys to the column values.
+
+        """
+        if keys is None:
+            keys = []
+            for column in self.tables[self.TABLE_TASK_JOBS].columns[3:]:
+                keys.append(column.name)
+        if submit_num in [None, "NN"]:
+            stmt = (r"SELECT %(keys_str)s FROM %(table)s" +
+                    r" WHERE cycle==? AND name==?" +
+                    r" ORDER BY submit_num DESC LIMIT 1") % {
+                "keys_str": ",".join(keys),
+                "table": self.TABLE_TASK_JOBS}
+            stmt_args = [cycle, name]
         else:
-            self.c.execute(db_oper.s_fmt, db_oper.args)
-
-    def get_restart_info(self, cycle):
-        """Get all the task names and submit count for a particular cycle"""
-        s_fmt = """SELECT name FROM task_states WHERE cycle ==?"""
-        args = [cycle]
-        res = {}
-        for row in self.c.select(s_fmt, args):
-            res[row[0]] = 0
-        
-        s_fmt = """SELECT name, count(*) FROM task_events WHERE cycle ==? AND
-                   event ==? GROUP BY name"""
-        args = [cycle, "incrementing submit number"]
-        
-        for name, count in self.c.select(s_fmt, args):
-            res[name] = count
-
-        return res
+            stmt = (r"SELECT %(keys_str)s FROM %(table)s" +
+                    r" WHERE cycle==? AND name==? AND submit_num==?") % {
+                "keys_str": ",".join(keys),
+                "table": self.TABLE_TASK_JOBS}
+            stmt_args = [cycle, name, submit_num]
+        try:
+            for row in self.connect().execute(stmt, stmt_args):
+                ret = {}
+                for key, value in zip(keys, row):
+                    ret[key] = value
+                return ret
+        except sqlite3.DatabaseError:
+            return None
+
+    def select_task_states_by_task_ids(self, keys, task_ids=None):
+        """Select items from task_states by task IDs.
+
+        Return a data structure like this:
+
+        {
+            (name1, point1): {key1: "value 1", ...},
+            ...,
+        }
+
+        task_ids should be specified as [[name, cycle], ...]
+
+        """
+        if keys is None:
+            keys = []
+            for column in self.tables[self.TABLE_TASK_STATES].columns[2:]:
+                keys.append(column.name)
+        stmt = r"SELECT name,cycle,%(keys_str)s FROM %(name)s" % {
+            "keys_str": ",".join(keys),
+            "name": self.TABLE_TASK_STATES}
+        stmt_args = []
+        if task_ids:
+            stmt += (
+                " WHERE (" +
+                ") OR (".join(["name==? AND cycle==?"] * len(task_ids)) +
+                ")")
+            for name, cycle in task_ids:
+                stmt_args += [name, cycle]
+        ret = {}
+        for row in self.connect().execute(stmt, stmt_args):
+            name, cycle = row[0:2]
+            ret[(name, cycle)] = {}
+            for key, value in zip(keys, row[2:]):
+                ret[(name, cycle)][key] = value
+        return ret
+
+    def select_task_states_by_cycles(self, keys, cycles=None):
+        """Select items from task_states by cycles.
+
+        Return a data structure like this:
+
+        {
+            (name1, point1): {key1: "value 1", ...},
+            ...,
+        }
+
+        cycles should be a list of relevant cycles.
+
+        """
+        stmt = r"SELECT name,cycle,%(keys_str)s FROM %(name)s" % {
+            "keys_str": ",".join(keys),
+            "name": self.TABLE_TASK_STATES}
+        stmt_args = []
+        if cycles:
+            stmt += " WHERE " + " OR ".join(["cycle==?"] * len(cycles))
+            stmt_args += [str(cycle) for cycle in cycles]
+        ret = {}
+        for row in self.connect().execute(stmt, stmt_args):
+            name, cycle = row[0:2]
+            ret[(name, cycle)] = {}
+            for key, value in zip(keys, row[2:]):
+                ret[(name, cycle)][key] = value
+        return ret
+
+    def vacuum(self):
+        """Vacuum to the database."""
+        return self.connect().execute("VACUUM")
diff --git a/lib/cylc/scheduler.py b/lib/cylc/scheduler.py
index 53a034a..e37d28d 100644
--- a/lib/cylc/scheduler.py
+++ b/lib/cylc/scheduler.py
@@ -16,49 +16,73 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from cylc_pyro_server import pyro_server
+import os
+import re
+import signal
+import sys
+import time
+from tempfile import mkstemp
+import traceback
+import datetime
+import logging
+import threading
+import subprocess
+from copy import deepcopy
+from Queue import Queue, Empty
+from shutil import copy as copyfile, copytree, rmtree
+
+from parsec.util import printcfg
+import isodatetime.data
+import isodatetime.parsers
+
+import cylc.flags
+from cylc.rundb import CylcSuiteDAO
 from cylc.job_host import RemoteJobHostManager, RemoteJobHostInitError
 from cylc.task_proxy import TaskProxy
 from cylc.job_file import JOB_FILE
 from cylc.suite_host import get_suite_host
 from cylc.owner import user
 from cylc.version import CYLC_VERSION
+from cylc.config import SuiteConfig
+from cylc.passphrase import passphrase
+from cylc.get_task_proxy import get_task_proxy
 from parsec.util import printcfg
-from shutil import copy as shcopy, copytree, rmtree
-from copy import deepcopy
-import datetime, time
+from copy import copy, deepcopy
+import time
+import datetime
 import logging
-import re, os, sys, traceback
-from state_summary import state_summary
-from passphrase import passphrase
-from suite_id import identifier
-from config import config
+import re
+import os
+import sys
+import traceback
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
-from cylc.port_file import port_file, PortFileExistsError, PortFileError
 from cylc.regpath import RegPath
 from cylc.CylcError import TaskNotFoundError, SchedulerError
 from cylc.RunEventHandler import RunHandler
 from cylc.LogDiagnosis import LogSpec
 from cylc.suite_state_dumping import SuiteStateDumper
 from cylc.suite_logging import suite_log
-import threading
-from cylc.suite_cmd_interface import comqueue
-from cylc.suite_info_interface import info_interface
-from cylc.suite_log_interface import log_interface
 from cylc.task_id import TaskID
 from cylc.task_pool import TaskPool
-import flags
-import cylc.rundb
-from Queue import Queue, Empty
-import subprocess
 from cylc.mp_pool import SuiteProcPool
-from exceptions import SchedulerStop, SchedulerError
-from wallclock import (
+from cylc.exceptions import SchedulerStop, SchedulerError
+from cylc.wallclock import (
     get_current_time_string, get_seconds_as_interval_string)
 from cylc.cycling import PointParsingError
 from cylc.cycling.loader import get_point, standardise_point_string
-import isodatetime.data
-import isodatetime.parsers
+from cylc.network import (
+    PYRO_SUITEID_OBJ_NAME, PYRO_STATE_OBJ_NAME,
+    PYRO_CMD_OBJ_NAME, PYRO_BCAST_OBJ_NAME, PYRO_EXT_TRIG_OBJ_NAME,
+    PYRO_INFO_OBJ_NAME, PYRO_LOG_OBJ_NAME)
+from cylc.network.pyro_daemon import PyroDaemon
+from cylc.network.suite_state import StateSummaryServer
+from cylc.network.suite_command import SuiteCommandServer
+from cylc.network.suite_broadcast import BroadcastServer
+from cylc.network.ext_trigger import ExtTriggerServer
+from cylc.network.suite_info import SuiteInfoServer
+from cylc.network.suite_log import SuiteLogServer
+from cylc.network.suite_identifier import SuiteIdServer
+from cylc.network.port_file import PortFile, PortFileExistsError, PortFileError
 
 
 class request_handler(threading.Thread):
@@ -79,13 +103,15 @@ class request_handler(threading.Thread):
 
 class scheduler(object):
 
-    def __init__( self, is_restart=False ):
+    FS_CHECK_PERIOD = 600.0  # 600 seconds
+
+    def __init__(self, is_restart=False):
 
         # SUITE OWNER
         self.owner = user
 
         # SUITE HOST
-        self.host= get_suite_host()
+        self.host = get_suite_host()
 
         self.is_restart = is_restart
 
@@ -100,7 +126,6 @@ class scheduler(object):
 
         # initialize some items in case of early shutdown
         # (required in the shutdown() method)
-        self.suite_id = None
         self.suite_state = None
         self.command_queue = None
         self.pool = None
@@ -129,138 +154,103 @@ class scheduler(object):
         self._cli_initial_point_string = None
         self._cli_start_point_string = None
 
-        self.parser.add_option( "--until",
-                help=("Shut down after all tasks have PASSED " +
-                      "this cycle point."),
-                metavar="CYCLE_POINT", action="store",
-                dest="final_point_string" )
-
-        self.parser.add_option( "--hold", help="Hold (don't run tasks) "
-                "immediately on starting.",
-                action="store_true", default=False, dest="start_held" )
+        self.parser.add_option(
+            "--until",
+            help=("Shut down after all tasks have PASSED " +
+                  "this cycle point."),
+            metavar="CYCLE_POINT", action="store",
+            dest="final_point_string")
+
+        self.parser.add_option(
+            "--hold",
+            help="Hold (don't run tasks) immediately on starting.",
+            action="store_true", default=False, dest="start_held")
+
+        self.parser.add_option(
+            "--hold-after",
+            help="Hold (don't run tasks) AFTER this cycle point.",
+            metavar="CYCLE_POINT", action="store", dest="hold_point_string")
+
+        self.parser.add_option(
+            "-m", "--mode",
+            help="Run mode: live, simulation, or dummy; default is live.",
+            metavar="STRING", action="store", default='live', dest="run_mode")
+
+        self.parser.add_option(
+            "--reference-log",
+            help="Generate a reference log for use in reference tests.",
+            action="store_true", default=False, dest="genref")
+
+        self.parser.add_option(
+            "--reference-test",
+            help="Do a test run against a previously generated reference log.",
+            action="store_true", default=False, dest="reftest")
 
-        self.parser.add_option( "--hold-after",
-                help="Hold (don't run tasks) AFTER this cycle point.",
-                metavar="CYCLE_POINT", action="store", dest="hold_point_string" )
-
-        self.parser.add_option( "-m", "--mode",
-                help="Run mode: live, simulation, or dummy; default is live.",
-                metavar="STRING", action="store", default='live', dest="run_mode" )
-
-        self.parser.add_option( "--reference-log",
-                help="Generate a reference log for use in reference tests.",
-                action="store_true", default=False, dest="genref" )
+        self.parse_commandline()
 
-        self.parser.add_option( "--reference-test",
-                help="Do a test run against a previously generated reference log.",
-                action="store_true", default=False, dest="reftest" )
+    def configure(self):
+        self.log_memory("scheduler.py: start configure")
+        SuiteProcPool.get_inst()
 
-        self.parse_commandline()
+        self.info_commands = {}
+        for attr_name in dir(self):
+            attr = getattr(self, attr_name)
+            if not callable(attr):
+                continue
+            if attr_name.startswith('info_'):
+                self.info_commands[attr_name.replace('info_', '')] = attr
 
-    def configure( self ):
-        SuiteProcPool.get_inst()  # initialise the singleton
-        # read-only commands to expose directly to the network
-        self.info_commands = {
-                'ping suite' : self.info_ping_suite,
-                'ping task' : self.info_ping_task,
-                'suite info' : self.info_get_suite_info,
-                'task info' : self.info_get_task_info,
-                'all families' : self.info_get_all_families,
-                'triggering families' : self.info_get_triggering_families,
-                'first-parent ancestors' : self.info_get_first_parent_ancestors,
-                'first-parent descendants' : self.info_get_first_parent_descendants,
-                'graph raw' : self.info_get_graph_raw,
-                'task requisites' : self.info_get_task_requisites,
-                'get cylc version' : self.info_get_cylc_version,
-                'task job file path' : self.info_get_task_jobfile_path
-                }
-
-        # control commands to expose indirectly via a command queue
-        self.control_commands = {
-                'stop cleanly' : self.command_set_stop_cleanly,
-                'stop now' : self.command_stop_now,
-                'stop after point' : self.command_set_stop_after_point,
-                'stop after clock time' : self.command_set_stop_after_clock_time,
-                'stop after task' : self.command_set_stop_after_task,
-                'release suite' : self.command_release_suite,
-                'release task' : self.command_release_task,
-                'remove cycle' : self.command_remove_cycle,
-                'remove task' : self.command_remove_task,
-                'hold suite now' : self.command_hold_suite,
-                'hold suite after' : self.command_hold_after_point_string,
-                'hold task now' : self.command_hold_task,
-                'set runahead' : self.command_set_runahead,
-                'set verbosity' : self.command_set_verbosity,
-                'purge tree' : self.command_purge_tree,
-                'reset task state' : self.command_reset_task_state,
-                'trigger task' : self.command_trigger_task,
-                'dry run task' : self.command_dry_run_task,
-                'nudge suite' : self.command_nudge,
-                'insert task' : self.command_insert_task,
-                'reload suite' : self.command_reload_suite,
-                'add prerequisite' : self.command_add_prerequisite,
-                'poll tasks' : self.command_poll_tasks,
-                'kill tasks' : self.command_kill_tasks,
-                }
-
-        # run dependency negotation etc. after these commands
+        # Run dependency negotation etc. after these commands.
         self.proc_cmds = [
-            'release suite',
-            'release task',
-            'kill cycle',
-            'kill task',
-            'set runahead',
-            'purge tree',
-            'reset task state',
-            'trigger task',
-            'nudge suite',
-            'insert task',
-            'reload suite',
-            'prerequisite'
-            ]
+            'release_suite',
+            'release_task',
+            'kill_tasks',
+            'set_runahead',
+            'reset_task_state',
+            'trigger_task',
+            'nudge',
+            'insert_task',
+            'reload_suite',
+        ]
+
+        self.log_memory("scheduler.py: before configure_suite")
         self.configure_suite()
-
-        # REMOTELY ACCESSIBLE SUITE IDENTIFIER
-        self.suite_id = identifier( self.suite, self.owner )
-        self.pyro.connect( self.suite_id, 'cylcid', qualified = False )
+        self.log_memory("scheduler.py: after configure_suite")
 
         reqmode = self.config.cfg['cylc']['required run mode']
         if reqmode:
             if reqmode != self.run_mode:
-                raise SchedulerError, 'ERROR: this suite requires the ' + reqmode + ' run mode'
+                raise SchedulerError(
+                    'ERROR: this suite requires the %s run mode' % reqmode)
 
         # TODO - self.config.fdir can be used instead of self.suite_dir
-        self.reflogfile = os.path.join(self.config.fdir,'reference.log')
+        self.reflogfile = os.path.join(self.config.fdir, 'reference.log')
 
         if self.gen_reference_log or self.reference_test_mode:
             self.configure_reftest()
 
         # Note that the following lines must be present at the top of
         # the suite log file for use in reference test runs:
-        self.log.info( 'Suite starting at ' + get_current_time_string() )
-        self.log.info( 'Run mode: ' + self.run_mode )
-        self.log.info( 'Initial point: ' + str(self.initial_point) )
+        self.log.info('Suite starting at ' + get_current_time_string())
+        self.log.info('Run mode: ' + self.run_mode)
+        self.log.info('Initial point: ' + str(self.initial_point))
         if self.start_point != self.initial_point:
-            self.log.info( 'Start point: ' + str(self.start_point) )
-        self.log.info( 'Final point: ' + str(self.final_point) )
+            self.log.info('Start point: ' + str(self.start_point))
+        self.log.info('Final point: ' + str(self.final_point))
 
         self.pool = TaskPool(
-            self.suite, self.db, self.view_db, self.final_point, self.config,
+            self.suite, self.pri_dao, self.pub_dao, self.final_point,
             self.pyro, self.log, self.run_mode)
         self.state_dumper.pool = self.pool
-        self.request_handler = request_handler( self.pyro )
+        self.request_handler = request_handler(self.pyro)
         self.request_handler.start()
 
-        # LOAD TASK POOL ACCORDING TO STARTUP METHOD
         self.old_user_at_host_set = set()
+        self.log_memory("scheduler.py: before load_tasks")
         self.load_tasks()
+        self.log_memory("scheduler.py: after load_tasks")
 
-        # REMOTELY ACCESSIBLE SUITE STATE SUMMARY
-        self.suite_state = state_summary(
-            self.config, self.run_mode, str(self.pool.get_min_point()))
-        self.pyro.connect( self.suite_state, 'state_summary')
-
-        self.state_dumper.set_cts( self.initial_point, self.final_point )
+        self.state_dumper.set_cts(self.initial_point, self.final_point)
         self.configure_suite_environment()
 
         # Write suite contact environment variables and link suite python
@@ -274,9 +264,8 @@ class scheduler(object):
         f.close()
 
         suite_py = os.path.join(self.suite_dir, "python")
-        if (os.path.realpath(self.suite_dir)
-                != os.path.realpath(suite_run_dir) and
-                os.path.isdir(suite_py)):
+        if (os.path.realpath(self.suite_dir) !=
+                os.path.realpath(suite_run_dir) and os.path.isdir(suite_py)):
             suite_run_py = os.path.join(suite_run_dir, "python")
             try:
                 rmtree(suite_run_py)
@@ -290,7 +279,7 @@ class scheduler(object):
                 RemoteJobHostManager.get_inst().init_suite_run_dir(
                     self.suite, user_at_host)
             except RemoteJobHostInitError as exc:
-                self.log.warning(str(exc))
+                self.log.error(str(exc))
 
         self.already_timed_out = False
         if self.config.cfg['cylc']['event hooks']['timeout']:
@@ -298,9 +287,10 @@ class scheduler(object):
 
         self.nudge_timer_start = None
         self.nudge_timer_on = False
-        self.auto_nudge_interval = 5 # seconds
+        self.auto_nudge_interval = 5  # seconds
+        self.log_memory("scheduler.py: end configure")
 
-    def process_command_queue( self ):
+    def process_command_queue(self):
         queue = self.command_queue.get_queue()
         n = queue.qsize()
         if n > 0:
@@ -314,45 +304,70 @@ class scheduler(object):
             except Empty:
                 break
             print '  +', name
-            cmdstr = name + '(' + ','.join( [ str(a) for a in args ]) + ')'
+            cmdstr = name + '(' + ','.join([str(a) for a in args]) + ')'
             try:
-                self.control_commands[ name ]( *args )
+                getattr(self, "command_%s" % name)(*args)
             except SchedulerStop:
-                self.log.info( 'Command succeeded: ' + cmdstr )
+                self.log.info('Command succeeded: ' + cmdstr)
                 raise
             except Exception, x:
-                # don't let a bad command bring the suite down
-                self.log.warning( traceback.format_exc() )
-                self.log.warning( str(x) )
-                self.log.warning( 'Command failed: ' +  cmdstr )
+                # Don't let a bad command bring the suite down.
+                self.log.warning(traceback.format_exc())
+                self.log.warning(str(x))
+                self.log.warning('Command failed: ' + cmdstr)
             else:
-                self.log.info( 'Command succeeded: ' + cmdstr )
+                self.log.info('Command succeeded: ' + cmdstr)
                 self.do_update_state_summary = True
                 if name in self.proc_cmds:
                     self.do_process_tasks = True
             queue.task_done()
 
-    def _task_type_exists( self, name_or_id ):
+    def _task_type_exists(self, name_or_id):
         # does a task name or id match a known task type in this suite?
         name = name_or_id
         if TaskID.is_valid_id(name_or_id):
             name = TaskID.split(name_or_id)[0]
         return name in self.config.get_task_name_list()
 
-    def info_ping_suite( self ):
+    def info_ping_suite(self):
         return True
 
     def info_get_cylc_version(self):
         """Return the cylc version running this suite daemon."""
         return CYLC_VERSION
 
+    def get_standardised_point_string(self, point_string):
+        """Return a standardised point string.
+
+        Used to process incoming command arguments.
+        """
+        try:
+            point_string = standardise_point_string(point_string)
+        except PointParsingError as exc:
+            # (This is only needed to raise a clearer error message).
+            raise Exception("Invalid cycle point: %s" % point_string)
+        return point_string
+
+    def get_standardised_point(self, point_string):
+        """Return a standardised point."""
+        point_string = self.get_standardised_point_string(point_string)
+        return get_point(point_string)
+
+    def get_standardised_taskid(self, task_id):
+        """Return task ID with standardised cycle point."""
+        name, point_string = TaskID.split(task_id)
+        point_string = self.get_standardised_point_string(point_string)
+        return TaskID.get(name, point_string)
+
     def info_ping_task(self, task_id, exists_only=False):
+        task_id = self.get_standardised_taskid(task_id)
         return self.pool.ping_task(task_id, exists_only)
 
     def info_get_task_jobfile_path(self, task_id):
+        task_id = self.get_standardised_taskid(task_id)
         return self.pool.get_task_jobfile_path(task_id)
 
-    def info_get_suite_info( self ):
+    def info_get_suite_info(self):
         info = {}
         for item in 'title', 'description':
             info[item] = self.config.cfg[item]
@@ -364,33 +379,35 @@ class scheduler(object):
         except KeyError:
             return {}
 
-    def info_get_all_families( self, exclude_root=False ):
+    def info_get_all_families(self, exclude_root=False):
         fams = self.config.get_first_parent_descendants().keys()
         if exclude_root:
             return fams[:-1]
         else:
             return fams
 
-    def info_get_triggering_families( self ):
+    def info_get_triggering_families(self):
         return self.config.triggering_families
 
-    def info_get_first_parent_descendants( self ):
+    def info_get_first_parent_descendants(self):
         # families for single-inheritance hierarchy based on first parents
         return deepcopy(self.config.get_first_parent_descendants())
 
-    def info_get_first_parent_ancestors( self, pruned=False ):
+    def info_get_first_parent_ancestors(self, pruned=False):
         # single-inheritance hierarchy based on first parents
-        return deepcopy(self.config.get_first_parent_ancestors(pruned) )
+        return deepcopy(self.config.get_first_parent_ancestors(pruned))
 
-    def info_get_graph_raw( self, cto, ctn, group_nodes, ungroup_nodes,
-            ungroup_recursive, group_all, ungroup_all ):
-        return self.config.get_graph_raw( cto, ctn, group_nodes,
-                ungroup_nodes, ungroup_recursive, group_all, ungroup_all), \
-                        self.config.suite_polling_tasks, \
-                        self.config.leaves, self.config.feet
+    def info_get_graph_raw(self, cto, ctn, group_nodes, ungroup_nodes,
+                           ungroup_recursive, group_all, ungroup_all):
+        rgraph = self.config.get_graph_raw(
+            cto, ctn, group_nodes, ungroup_nodes, ungroup_recursive, group_all,
+            ungroup_all)
+        return (
+            rgraph, self.config.suite_polling_tasks, self.config.leaves,
+            self.config.feet)
 
     def info_get_task_requisites(self, name, point_string):
-        point_string = standardise_point_string(point_string)
+        point_string = self.get_standardised_point_string(point_string)
         return self.pool.get_task_requisites(
             TaskID.get(name, point_string))
 
@@ -398,9 +415,9 @@ class scheduler(object):
         """Stop job submission and set the flag for clean shutdown."""
         SuiteProcPool.get_inst().stop_job_submission()
         TaskProxy.stop_sim_mode_job_submission = True
-        if kill_active_tasks:
-            self.pool.kill_active_tasks()
         self.shut_down_cleanly = True
+        self.kill_on_shutdown = kill_active_tasks
+        self.next_kill_issue = time.time()
 
     def command_stop_now(self):
         """Shutdown immediately."""
@@ -410,10 +427,11 @@ class scheduler(object):
         proc_pool.terminate()
         raise SchedulerStop("Stopping NOW")
 
-    def command_set_stop_after_point( self, point_string ):
-        self.set_stop_point( point_string )
+    def command_set_stop_after_point(self, point_string):
+        point_string = self.get_standardised_point_string(point_string)
+        self.set_stop_point(point_string)
 
-    def command_set_stop_after_clock_time( self, arg ):
+    def command_set_stop_after_clock_time(self, arg):
         # format: ISO 8601 compatible or YYYY/MM/DD-HH:mm (backwards comp.)
         parser = isodatetime.parsers.TimePointParser()
         try:
@@ -425,167 +443,156 @@ class scheduler(object):
                 raise exc  # Raise the first (prob. more relevant) ValueError.
         stop_time_in_epoch_seconds = int(stop_point.get(
             "seconds_since_unix_epoch"))
-        self.set_stop_clock( stop_time_in_epoch_seconds, str(stop_point) )
+        self.set_stop_clock(stop_time_in_epoch_seconds, str(stop_point))
 
-    def command_set_stop_after_task(self, tid):
-        if TaskID.is_valid_id(tid):
-            self.set_stop_task(tid)
+    def command_set_stop_after_task(self, task_id):
+        task_id = self.get_standardised_taskid(task_id)
+        if TaskID.is_valid_id(task_id):
+            self.set_stop_task(task_id)
 
-    def command_release_task( self, name, point_string, is_family ):
-        matches = self.get_matching_task_names( name, is_family )
-        point_string = standardise_point_string(point_string)
+    def command_release_task(self, name, point_string, is_family):
+        point_string = self.get_standardised_point_string(point_string)
+        matches = self.get_matching_task_names(name, is_family)
         if not matches:
-            raise TaskNotFoundError, "No matching tasks found: " + name
+            raise TaskNotFoundError("No matching tasks found: %s" % name)
         task_ids = [TaskID.get(i, point_string) for i in matches]
-        self.pool.release_tasks( task_ids )
+        self.pool.release_tasks(task_ids)
 
     def command_poll_tasks(self, name, point_string, is_family):
         """Poll all tasks or a task/family if options are provided."""
-        if name == "None" and point_string == "None":
-            self.pool.poll_tasks()
-        else:
+        if name and point_string:
             matches = self.get_matching_task_names(name, is_family)
             if not matches:
-                raise TaskNotFoundError, "No matching tasks found: " + name
-            point_string = standardise_point_string(point_string)
+                raise TaskNotFoundError("No matching tasks found: %s" % name)
+            point_string = self.get_standardised_point_string(point_string)
             task_ids = [TaskID.get(i, point_string) for i in matches]
-            self.pool.poll_tasks(task_ids)
+            self.pool.poll_task_jobs(task_ids)
+        else:
+            self.pool.poll_task_jobs()
 
-    def command_kill_tasks( self, name, point_string, is_family ):
-        matches = self.get_matching_task_names( name, is_family )
-        if not matches:
-            raise TaskNotFoundError, "No matching tasks found: " + name
-        point_string = standardise_point_string(point_string)
-        task_ids = [TaskID.get(i, point_string) for i in matches]
-        self.pool.kill_tasks( task_ids )
+    def command_kill_tasks(self, name, point_string, is_family):
+        """Kill all tasks or a task/family if options are provided."""
+        if name and point_string:
+            matches = self.get_matching_task_names(name, is_family)
+            if not matches:
+                raise TaskNotFoundError("No matching tasks found: %s" % name)
+            point_string = self.get_standardised_point_string(point_string)
+            task_ids = [TaskID.get(i, point_string) for i in matches]
+            self.pool.kill_task_jobs(task_ids)
+        else:
+            self.pool.kill_task_jobs()
 
-    def command_release_suite( self ):
+    def command_release_suite(self):
         self.release_suite()
 
-    def command_hold_task( self, name, point_string, is_family ):
-        matches = self.get_matching_task_names( name, is_family )
+    def command_hold_task(self, name, point_string, is_family):
+        matches = self.get_matching_task_names(name, is_family)
         if not matches:
-            raise TaskNotFoundError, "No matching tasks found: " + name
-        point_string = standardise_point_string(point_string)
+            raise TaskNotFoundError("No matching tasks found: %s" % name)
+        point_string = self.get_standardised_point_string(point_string)
         task_ids = [TaskID.get(i, point_string) for i in matches]
-        self.pool.hold_tasks( task_ids )
+        self.pool.hold_tasks(task_ids)
 
-    def command_hold_suite( self ):
+    def command_hold_suite(self):
         self.hold_suite()
 
-    def command_hold_after_point_string( self, point_string ):
+    def command_hold_after_point_string(self, point_string):
         """Hold tasks AFTER this point (itask.point > point)."""
-        point = get_point(point_string)
-        point.standardise()
-        self.hold_suite( point )
+        point_string = self.get_standardised_point_string(point_string)
+        point = self.get_standardised_point(point_string)
+        self.hold_suite(point)
         self.log.info(
             "The suite will pause when all tasks have passed " + point_string)
 
     def command_set_verbosity(self, lvl):
         # (lvl legality checked by CLI)
         self.log.setLevel(lvl)
-        flags.debug = (lvl == logging.DEBUG)
+        cylc.flags.debug = (lvl == logging.DEBUG)
         return True, 'OK'
 
-    def command_remove_cycle( self, point_string, spawn ):
-        point = get_point(point_string)
-        point.standardise()
-        self.pool.remove_entire_cycle( point, spawn )
+    def command_remove_cycle(self, point_string, spawn):
+        point = self.get_standardised_point(point_string)
+        self.pool.remove_entire_cycle(point, spawn)
 
-    def command_remove_task( self, name, point_string, is_family, spawn ):
-        matches = self.get_matching_task_names( name, is_family )
+    def command_remove_task(self, name, point_string, is_family, spawn):
+        matches = self.get_matching_task_names(name, is_family)
         if not matches:
-            raise TaskNotFoundError, "No matching tasks found: " + name
-        point_string = standardise_point_string(point_string)
+            raise TaskNotFoundError("No matching tasks found: %s" % name)
+        point_string = self.get_standardised_point_string(point_string)
         task_ids = [TaskID.get(i, point_string) for i in matches]
-        self.pool.remove_tasks( task_ids, spawn )
+        self.pool.remove_tasks(task_ids, spawn)
 
-    def command_insert_task( self, name, point_string, is_family,
-                             stop_point_string ):
-        matches = self.get_matching_task_names( name, is_family )
+    def command_insert_task(self, name, point_string, is_family,
+                            stop_point_string):
+        matches = self.get_matching_task_names(name, is_family)
         if not matches:
-            raise TaskNotFoundError, "No matching tasks found: " + name
+            raise TaskNotFoundError("No matching tasks found: %s" % name)
+        point_string = self.get_standardised_point_string(point_string)
         task_ids = [TaskID.get(i, point_string) for i in matches]
-
-        try:
-            point = get_point(point_string).standardise()
-        except PointParsingError as exc:
-            self.log.critical(
-                "%s: invalid cycle point for inserted task (%s)" % (
-                    point_string, exc)
-            )
-            return
-
+        point = get_point(point_string)
         if stop_point_string is None:
             stop_point = None
         else:
-            try:
-                stop_point = get_point(stop_point_string).standardise()
-            except PointParsingError as exc:
-                self.log.critical(
-                    "%s: invalid stop cycle point for inserted task (%s)" % (
-                        stop_point_string, exc)
-                )
-                return
-
+            stop_point_string = self.get_standardised_point_string(
+                stop_point_string)
+            stop_point = get_point(stop_point_string)
+        task_states_data = self.pri_dao.select_task_states_by_task_ids(
+            ["submit_num"], [TaskID.split(task_id) for task_id in task_ids])
         for task_id in task_ids:
-            name, task_point_string = TaskID.split(task_id)
-            # TODO - insertion of start-up tasks? (startup=False is assumed here)
-            new_task = self.config.get_task_proxy(
-                name, point, 'waiting', stop_point,
-                submit_num=self.db.get_task_current_submit_num(
-                    name, task_point_string),
-            )
+            task_name, task_point = TaskID.split(task_id)
+            # TODO - insertion of start-up tasks? (startup=False assumed here)
+            submit_num = None
+            if (task_name, task_point) in task_states_data:
+                submit_num = task_states_data[(task_name, task_point)].get(
+                    "submit_num")
+            new_task = get_task_proxy(
+                task_name, point, 'waiting', stop_point, submit_num=submit_num)
             if new_task:
-                self.pool.add_to_runahead_pool( new_task )
+                self.pool.add_to_runahead_pool(new_task)
 
-    def command_nudge( self ):
+    def command_nudge(self):
         # just to cause the task processing loop to be invoked
         pass
 
-    def command_reload_suite( self ):
+    def command_reload_suite(self):
         self.reconfigure()
 
-    def command_set_runahead( self, *args  ):
+    def command_set_runahead(self, *args):
         self.pool.set_runahead(*args)
 
-    def set_suite_timer( self, reset=False ):
-        self.suite_timer_timeout = time.time() + (       
+    def set_suite_timer(self, reset=False):
+        self.suite_timer_timeout = time.time() + (
             self.config.cfg['cylc']['event hooks']['timeout']
         )
-        if flags.verbose:
+        if cylc.flags.verbose:
             print "%s suite timer starts NOW: %s" % (
                 get_seconds_as_interval_string(
                     self.config.cfg['cylc']['event hooks']['timeout']),
                 get_current_time_string()
             )
 
-    def reconfigure( self ):
+    def reconfigure(self):
         print "RELOADING the suite definition"
-        self.configure_suite( reconfigure=True )
+        self.configure_suite(reconfigure=True)
 
-        self.pool.reconfigure( self.config, self.final_point )
+        self.pool.reconfigure(self.final_point)
 
-        self.suite_state.config = self.config
         self.configure_suite_environment()
 
         if self.gen_reference_log or self.reference_test_mode:
             self.configure_reftest(recon=True)
 
         # update state SuiteStateDumper state
-        self.state_dumper.set_cts( self.initial_point, self.final_point )
+        self.state_dumper.set_cts(self.initial_point, self.final_point)
 
     def parse_commandline(self):
         if self.options.run_mode not in [
-                'live',
-                'dummy',
-                'simulation'
-                ]:
+                'live', 'dummy', 'simulation']:
             self.parser.error(
-                    'Illegal run mode: %s\n' % self.options.run_mode)
+                'Illegal run mode: %s\n' % self.options.run_mode)
         self.run_mode = self.options.run_mode
 
-        if flags.debug:
+        if cylc.flags.debug:
             self.logging_level = logging.DEBUG
         else:
             self.logging_level = logging.INFO
@@ -597,32 +604,34 @@ class scheduler(object):
             self.gen_reference_log = self.options.genref
 
     def configure_pyro(self):
-        self.pyro = pyro_server( self.suite, self.suite_dir,
-                GLOBAL_CFG.get( ['pyro','base port'] ),
-                GLOBAL_CFG.get( ['pyro','maximum number of ports'] ) )
+        self.pyro = PyroDaemon(self.suite)
+        pphrase = passphrase(
+            self.suite, user, get_suite_host()).get(suitedir=self.suite_dir)
+        self.pyro.set_auth(pphrase)
         self.port = self.pyro.get_port()
-
         try:
-            self.port_file = port_file( self.suite, self.port )
-        except PortFileExistsError,x:
+            self.portfile = PortFile(self.suite, self.port)
+        except PortFileExistsError, x:
             print >> sys.stderr, x
-            raise SchedulerError( 'Suite already running? (if not, delete the port file)' )
-        except PortFileError,x:
-            raise SchedulerError( str(x) )
+            raise SchedulerError(
+                'Suite already running? (if not, delete the port file)')
+        except PortFileError, x:
+            raise SchedulerError(str(x))
 
     def load_suiterc(self, reconfigure):
         """Load and log the suite definition."""
 
-        self.config = config(
+        SuiteConfig._FORCE = True  # Reset the singleton!
+        self.config = SuiteConfig.get_inst(
             self.suite, self.suiterc,
             self.options.templatevars,
             self.options.templatevars_file, run_mode=self.run_mode,
             cli_initial_point_string=self._cli_initial_point_string,
             cli_start_point_string=self._cli_start_point_string,
             cli_final_point_string=self.options.final_point_string,
-            is_restart=self.is_restart, is_reload=reconfigure
+            is_restart=self.is_restart, is_reload=reconfigure,
+            mem_log_func=self.log_memory
         )
-
         # Dump the loaded suiterc for future reference.
         cfg_logdir = GLOBAL_CFG.get_derived_host_item(
             self.suite, 'suite config log directory')
@@ -647,7 +656,7 @@ class scheduler(object):
         printcfg(self.config.cfg, handle=handle)
         handle.close()
 
-    def configure_suite( self, reconfigure=False ):
+    def configure_suite(self, reconfigure=False):
         """Load and process the suite definition."""
 
         if self.is_restart:
@@ -671,106 +680,152 @@ class scheduler(object):
             self.final_point.standardise()
 
         if (not self.initial_point and not self.is_restart):
-            print >> sys.stderr, 'WARNING: No initial cycle point provided - no cycling tasks will be loaded.'
+            print >> sys.stderr, (
+                'WARNING: No initial cycle point provided ' +
+                ' - no cycling tasks will be loaded.')
 
         if self.run_mode != self.config.run_mode:
             self.run_mode = self.config.run_mode
 
         if not reconfigure:
+            # Things that can't change on suite reload.
+
             self.state_dumper = SuiteStateDumper(
                 self.suite, self.run_mode, self.initial_point,
                 self.final_point)
 
             run_dir = GLOBAL_CFG.get_derived_host_item(
-                    self.suite, 'suite run directory' )
-            if not self.is_restart:     # create new suite_db file (and dir) if needed
-                self.db = cylc.rundb.CylcRuntimeDAO(suite_dir=run_dir, new_mode=True)
-                self.view_db = cylc.rundb.CylcRuntimeDAO(suite_dir=run_dir, new_mode=True, primary_db=False)
+                self.suite, 'suite run directory')
+            pri_db_path = os.path.join(
+                run_dir, 'state', CylcSuiteDAO.DB_FILE_BASE_NAME)
+            pub_db_path = os.path.join(
+                run_dir, CylcSuiteDAO.DB_FILE_BASE_NAME)
+            if self.is_restart:
+                if (os.path.exists(pub_db_path) and
+                        not os.path.exists(pri_db_path)):
+                    # Backwards compatibility code for restarting at move to
+                    # new db location should be deleted at database refactoring
+                    print('Copy "cylc.suite.db" to "state/cylc.suite.db"')
+                    copyfile(pub_db_path, pri_db_path)
             else:
-                # Backwards compatibility code for restarting at move to new db location
-                # should be deleted at database refactoring
-                primary = os.path.join(run_dir, 'state', cylc.rundb.CylcRuntimeDAO.DB_FILE_BASE_NAME)
-                viewable = os.path.join(run_dir, cylc.rundb.CylcRuntimeDAO.DB_FILE_BASE_NAME)
-                if not os.path.exists(primary) and os.path.exists(viewable):
-                    print "[info] copying across old suite database to state directory"
-                    shcopy(viewable, primary)
-                self.db = cylc.rundb.CylcRuntimeDAO(suite_dir=run_dir)
-                self.view_db = cylc.rundb.CylcRuntimeDAO(suite_dir=run_dir, primary_db=False)
+                # Remove database created by previous runs
+                if os.path.isdir(pri_db_path):
+                    shutil.rmtree(pri_db_path)
+                else:
+                    try:
+                        os.unlink(pri_db_path)
+                    except:
+                        pass
+            # Ensure that:
+            # * public database is in sync with private database
+            # * private database file is private
+            self.pri_dao = CylcSuiteDAO(pri_db_path)
+            os.chmod(pri_db_path, 0600)
+            if self.is_restart:
+                sys.stdout.write("Rebuilding the suite db ...")
+                self.pri_dao.vacuum()
+                sys.stdout.write(" done\n")
+            self.pub_dao = CylcSuiteDAO(pub_db_path, is_public=True)
+            self._copy_pri_db_to_pub_db()
 
             self.hold_suite_now = False
             self._pool_hold_point = None
+
+            if self.config.cfg['scheduling']['hold after point']:
+                self._pool_hold_point = get_point(
+                    self.config.cfg['scheduling']['hold after point'])
+
             if self.options.hold_point_string:
                 self._pool_hold_point = get_point(
                     self.options.hold_point_string)
 
-        # Running in UTC time? (else just use the system clock)
-        flags.utc = self.config.cfg['cylc']['UTC mode']
-
-        # Capture cycling mode
-        flags.cycling_mode = self.config.cfg['scheduling']['cycling mode']
+            if self._pool_hold_point:
+                print "Suite will hold after " + str(self._pool_hold_point)
 
-        if not reconfigure:
-            slog = suite_log( self.suite )
+            slog = suite_log(self.suite)
             self.suite_log_dir = slog.get_dir()
-            slog.pimp( self.logging_level )
+            slog.pimp(self.logging_level)
             self.log = slog.get_log()
             self.logfile = slog.get_path()
 
-            self.command_queue = comqueue( self.control_commands.keys() )
-            self.pyro.connect( self.command_queue, 'command-interface' )
+            suite_id = SuiteIdServer.get_inst(self.suite, self.owner)
+            self.pyro.connect(suite_id, PYRO_SUITEID_OBJ_NAME)
 
-            self.info_interface = info_interface( self.info_commands )
-            self.pyro.connect( self.info_interface, 'suite-info' )
+            bcast = BroadcastServer.get_inst(
+                self.config.get_linearized_ancestors())
+            self.pyro.connect(bcast, PYRO_BCAST_OBJ_NAME)
 
-            self.log_interface = log_interface( slog )
-            self.pyro.connect( self.log_interface, 'log' )
+            self.command_queue = SuiteCommandServer()
+            self.pyro.connect(self.command_queue, PYRO_CMD_OBJ_NAME)
 
-            self.log.info( "port:" +  str( self.port ))
+            ets = ExtTriggerServer.get_inst()
+            self.pyro.connect(ets, PYRO_EXT_TRIG_OBJ_NAME)
 
-    def configure_suite_environment( self ):
+            self.info_interface = SuiteInfoServer(self.info_commands)
+            self.pyro.connect(self.info_interface, PYRO_INFO_OBJ_NAME)
+
+            self.log_interface = SuiteLogServer(slog)
+            self.pyro.connect(self.log_interface, PYRO_LOG_OBJ_NAME)
+
+            self.suite_state = StateSummaryServer.get_inst(self.run_mode)
+            self.pyro.connect(self.suite_state, PYRO_STATE_OBJ_NAME)
+
+            self.log.info("port:" + str(self.port))
+
+    def configure_suite_environment(self):
         # static cylc and suite-specific variables:
         self.suite_env = {
-                'CYLC_UTC'               : str(flags.utc),
-                'CYLC_CYCLING_MODE'      : str(flags.cycling_mode),
-                'CYLC_MODE'              : 'scheduler',
-                'CYLC_DEBUG'             : str( flags.debug ),
-                'CYLC_VERBOSE'           : str( flags.verbose ),
-                'CYLC_DIR_ON_SUITE_HOST' : os.environ[ 'CYLC_DIR' ],
-                'CYLC_SUITE_NAME'        : self.suite,
-                'CYLC_SUITE_REG_NAME'    : self.suite, # DEPRECATED
-                'CYLC_SUITE_HOST'        : str( self.host ),
-                'CYLC_SUITE_OWNER'       : self.owner,
-                'CYLC_SUITE_PORT'        :  str( self.pyro.get_port()),
-                'CYLC_SUITE_REG_PATH'    : RegPath( self.suite ).get_fpath(), # DEPRECATED
-                'CYLC_SUITE_DEF_PATH_ON_SUITE_HOST' : self.suite_dir,
-                'CYLC_SUITE_INITIAL_CYCLE_POINT' : str( self.initial_point ), # may be "None"
-                'CYLC_SUITE_FINAL_CYCLE_POINT'   : str( self.final_point ), # may be "None"
-                'CYLC_SUITE_INITIAL_CYCLE_TIME' : str( self.initial_point ), # may be "None"
-                'CYLC_SUITE_FINAL_CYCLE_TIME'   : str( self.final_point ), # may be "None"
-                'CYLC_SUITE_LOG_DIR'     : self.suite_log_dir # needed by the test battery
-                }
+            'CYLC_UTC': str(cylc.flags.utc),
+            'CYLC_CYCLING_MODE': str(cylc.flags.cycling_mode),
+            'CYLC_MODE': 'scheduler',
+            'CYLC_DEBUG': str(cylc.flags.debug),
+            'CYLC_VERBOSE': str(cylc.flags.verbose),
+            'CYLC_DIR_ON_SUITE_HOST': os.environ['CYLC_DIR'],
+            'CYLC_SUITE_NAME': self.suite,
+            'CYLC_SUITE_REG_NAME': self.suite,  # DEPRECATED
+            'CYLC_SUITE_HOST': str(self.host),
+            'CYLC_SUITE_OWNER': self.owner,
+            'CYLC_SUITE_PORT': str(self.pyro.get_port()),
+            # DEPRECATED
+            'CYLC_SUITE_REG_PATH': RegPath(self.suite).get_fpath(),
+            'CYLC_SUITE_DEF_PATH_ON_SUITE_HOST': self.suite_dir,
+            # may be "None"
+            'CYLC_SUITE_INITIAL_CYCLE_POINT': str(self.initial_point),
+            # may be "None"
+            'CYLC_SUITE_FINAL_CYCLE_POINT': str(self.final_point),
+            # may be "None"
+            'CYLC_SUITE_INITIAL_CYCLE_TIME': str(self.initial_point),
+            # may be "None"
+            'CYLC_SUITE_FINAL_CYCLE_TIME': str(self.final_point),
+            # needed by the test battery
+            'CYLC_SUITE_LOG_DIR': self.suite_log_dir,
+        }
 
         # Contact details for remote tasks, written to file on task
         # hosts because the details can change on restarting a suite.
         self.suite_contact_env = {
-                'CYLC_SUITE_NAME' : self.suite_env['CYLC_SUITE_NAME'],
-                'CYLC_SUITE_HOST' : self.suite_env['CYLC_SUITE_HOST'],
-                'CYLC_SUITE_OWNER' : self.suite_env['CYLC_SUITE_OWNER'],
-                'CYLC_SUITE_PORT' : self.suite_env['CYLC_SUITE_PORT'],
-                'CYLC_VERSION' : CYLC_VERSION
-                }
+            'CYLC_SUITE_NAME': self.suite_env['CYLC_SUITE_NAME'],
+            'CYLC_SUITE_HOST': self.suite_env['CYLC_SUITE_HOST'],
+            'CYLC_SUITE_OWNER': self.suite_env['CYLC_SUITE_OWNER'],
+            'CYLC_SUITE_PORT': self.suite_env['CYLC_SUITE_PORT'],
+            'CYLC_VERSION': CYLC_VERSION
+        }
 
         # Set local values of variables that are potenitally task-specific
         # due to different directory paths on different task hosts. These
         # are overridden by tasks prior to job submission, but in
         # principle they could be needed locally by event handlers:
         self.suite_task_env = {
-                'CYLC_SUITE_RUN_DIR' : GLOBAL_CFG.get_derived_host_item(self.suite, 'suite run directory'),
-                'CYLC_SUITE_WORK_DIR' : GLOBAL_CFG.get_derived_host_item(self.suite, 'suite work directory'),
-                'CYLC_SUITE_SHARE_DIR' : GLOBAL_CFG.get_derived_host_item(self.suite, 'suite share directory'),
-                'CYLC_SUITE_SHARE_PATH' : '$CYLC_SUITE_SHARE_DIR', # DEPRECATED
-                'CYLC_SUITE_DEF_PATH' : self.suite_dir}
-        # (note global config automatically expands environment variables in local paths)
+            'CYLC_SUITE_RUN_DIR': GLOBAL_CFG.get_derived_host_item(
+                self.suite, 'suite run directory'),
+            'CYLC_SUITE_WORK_DIR': GLOBAL_CFG.get_derived_host_item(
+                self.suite, 'suite work directory'),
+            'CYLC_SUITE_SHARE_DIR': GLOBAL_CFG.get_derived_host_item(
+                self.suite, 'suite share directory'),
+            'CYLC_SUITE_SHARE_PATH': '$CYLC_SUITE_SHARE_DIR',  # DEPRECATED
+            'CYLC_SUITE_DEF_PATH': self.suite_dir
+        }
+        # (global config auto expands environment variables in local paths)
 
         # Pass these to the job script generation code.
         JOB_FILE.set_suite_env(self.suite_env)
@@ -781,87 +836,120 @@ class scheduler(object):
             os.environ[var] = val
         for var, val in self.suite_task_env.items():
             os.environ[var] = val
-        cenv = self.config.cfg['cylc']['environment']
+        cenv = copy(self.config.cfg['cylc']['environment'])
         for var, val in cenv.items():
             cenv[var] = os.path.expandvars(val)
         # path to suite bin directory for suite and task event handlers
-        cenv['PATH'] = self.suite_dir + '/bin:' + os.environ['PATH']
+        cenv['PATH'] = os.pathsep.join([
+            os.path.join(self.suite_dir, 'bin'), os.environ['PATH']])
 
-        # make [cylc][environment] available to task event handlers in worker processes
+        # Make [cylc][environment] available to task event handlers in worker
+        # processes,
         TaskProxy.event_handler_env = cenv
-        # make [cylc][environment] available to suite event handlers in this process
+        # and to suite event handlers in this process.
         for var, val in cenv.items():
             os.environ[var] = val
 
-    def configure_reftest( self, recon=False ):
+    def configure_reftest(self, recon=False):
         if self.gen_reference_log:
             self.config.cfg['cylc']['log resolved dependencies'] = True
 
         elif self.reference_test_mode:
-            req = self.config.cfg['cylc']['reference test']['required run mode']
+            rtc = self.config.cfg['cylc']['reference test']
+            req = rtc['required run mode']
             if req and req != self.run_mode:
-                raise SchedulerError, 'ERROR: this suite allows only ' + req + ' mode reference tests'
-            handlers = self.config.cfg['cylc']['event hooks']['shutdown handler']
+                raise SchedulerError(
+                    'ERROR: suite allows only ' + req + ' reference tests')
+            handlers = self.config.cfg[
+                'cylc']['event hooks']['shutdown handler']
             if handlers:
-                print >> sys.stderr, 'WARNING: replacing shutdown event handlers for reference test run'
-            self.config.cfg['cylc']['event hooks']['shutdown handler'] = [ self.config.cfg['cylc']['reference test']['suite shutdown event handler'] ]
+                print >> sys.stderr, (
+                    'WARNING: replacing shutdown handlers for reference test')
+            self.config.cfg['cylc']['event hooks']['shutdown handler'] = [
+                rtc['suite shutdown event handler']]
             self.config.cfg['cylc']['log resolved dependencies'] = True
-            self.config.cfg['cylc']['event hooks']['abort if shutdown handler fails'] = True
+            self.config.cfg['cylc']['event hooks'][
+                'abort if shutdown handler fails'] = True
             if not recon:
-                spec = LogSpec( self.reflogfile )
-                self.initial_point = get_point( spec.get_initial_point_string() )
-                self.start_point = get_point( spec.get_start_point_string() ) or self.initial_point
-                self.final_point = get_point( spec.get_final_point_string() )
-            self.ref_test_allowed_failures = self.config.cfg['cylc']['reference test']['expected task failures']
-            if not self.config.cfg['cylc']['reference test']['allow task failures'] and len( self.ref_test_allowed_failures ) == 0:
+                spec = LogSpec(self.reflogfile)
+                self.initial_point = get_point(spec.get_initial_point_string())
+                self.start_point = get_point(
+                    spec.get_start_point_string()) or self.initial_point
+                self.final_point = get_point(spec.get_final_point_string())
+            self.ref_test_allowed_failures = rtc['expected task failures']
+            if (not rtc['allow task failures'] and
+                    not self.ref_test_allowed_failures):
                 self.config.cfg['cylc']['abort if any task fails'] = True
             self.config.cfg['cylc']['event hooks']['abort on timeout'] = True
-            timeout = self.config.cfg['cylc']['reference test'][ self.run_mode + ' mode suite timeout' ]
+            timeout = rtc[self.run_mode + ' mode suite timeout']
             if not timeout:
-                raise SchedulerError, 'ERROR: suite timeout not defined for ' + self.run_mode + ' mode reference test'
+                raise SchedulerError(
+                    'ERROR: timeout not defined for %s reference tests' % (
+                        self.run_mode))
             self.config.cfg['cylc']['event hooks']['timeout'] = timeout
             self.config.cfg['cylc']['event hooks']['reset timer'] = False
 
-    def run_event_handlers( self, name, fg, msg ):
-        if self.run_mode != 'live' or \
-                ( self.run_mode == 'simulation' and \
-                        self.config.cfg['cylc']['simulation mode']['disable suite event hooks'] ) or \
-                ( self.run_mode == 'dummy' and \
-                        self.config.cfg['cylc']['dummy mode']['disable suite event hooks'] ):
+    def run_event_handlers(self, name, fg, msg):
+        if (self.run_mode != 'live' or
+            (self.run_mode == 'simulation' and
+                self.config.cfg[
+                    'cylc']['simulation mode']['disable suite event hooks']) or
+            (self.run_mode == 'dummy' and
+                self.config.cfg[
+                    'cylc']['dummy mode']['disable suite event hooks'])):
             return
- 
+
         handlers = self.config.cfg['cylc']['event hooks'][name + ' handler']
         if handlers:
             for handler in handlers:
                 try:
-                    RunHandler( name, handler, self.suite, msg=msg, fg=fg )
+                    RunHandler(name, handler, self.suite, msg=msg, fg=fg)
                 except Exception, x:
                     # Note: test suites depends on this message:
-                    print >> sys.stderr, '\nERROR: ' + name + ' EVENT HANDLER FAILED'
-                    raise SchedulerError, x
+                    sys.stderr.write(
+                        'ERROR: %s EVENT HANDLER FAILED\n' % name)
                     if name == 'shutdown' and self.reference_test_mode:
-                            sys.exit( '\nERROR: SUITE REFERENCE TEST FAILED' )
+                        sys.stderr.write(
+                            'ERROR: SUITE REFERENCE TEST FAILED\n')
+                    raise SchedulerError(x)
                 else:
                     if name == 'shutdown' and self.reference_test_mode:
                         # TODO - this isn't true, it just means the
                         # shutdown handler run successfully:
-                        print '\nSUITE REFERENCE TEST PASSED'
+                        print 'SUITE REFERENCE TEST PASSED'
 
-    def run( self ):
+    def run(self):
 
         if self._pool_hold_point is not None:
-            # TODO - HANDLE STOP AND PAUSE TIMES THE SAME WAY?
-            self.hold_suite( self._pool_hold_point )
+            self.hold_suite(self._pool_hold_point)
 
         if self.options.start_held:
-            self.log.info( "Held on start-up (no tasks will be submitted)")
+            self.log.info("Held on start-up (no tasks will be submitted)")
             self.hold_suite()
 
-        abort = self.config.cfg['cylc']['event hooks']['abort if startup handler fails']
-        self.run_event_handlers( 'startup', abort, 'suite starting' )
+        abort = self.config.cfg[
+            'cylc']['event hooks']['abort if startup handler fails']
+        self.run_event_handlers('startup', abort, 'suite starting')
 
+        self.log_memory("scheduler.py: begin run while loop")
         proc_pool = SuiteProcPool.get_inst()
-        while True: # MAIN LOOP
+
+        next_fs_check = time.time() + self.FS_CHECK_PERIOD
+
+        suite_run_dir = GLOBAL_CFG.get_derived_host_item(
+            self.suite, 'suite run directory')
+
+        while True:  # MAIN LOOP
+
+            # Periodic check that the suite directory still exists
+            # - designed to catch stalled suite daemons where the suite
+            # directory has been deleted out from under itself
+            if time.time() > next_fs_check:
+                if not os.path.exists(suite_run_dir):
+                    os.kill(os.getpid(), signal.SIGKILL)
+                else:
+                    next_fs_check = time.time() + self.FS_CHECK_PERIOD
+
             # PROCESS ALL TASKS whenever something has changed that might
             # require renegotiation of dependencies, etc.
 
@@ -870,8 +958,10 @@ class scheduler(object):
                 while not proc_pool.is_dead():
                     proc_pool.handle_results_async()
                     if not warned:
-                        print "Waiting for the command process pool to empty for shutdown"
-                        print "(you can \"stop now\" to shut down immediately if you like)."
+                        print ("Waiting for the command process " +
+                               "pool to empty for shutdown")
+                        print ("(you can \"stop now\" to shut " +
+                               "down immediately if you like).")
                         warned = True
                     self.process_command_queue()
                     time.sleep(0.5)
@@ -887,42 +977,59 @@ class scheduler(object):
 
             proc_pool.handle_results_async()
 
+            # External triggers must be matched now. If any are matched pflag
+            # is set to tell process_tasks() that task processing is required.
+            self.pool.match_ext_triggers()
+
             if self.process_tasks():
-                if flags.debug:
-                    self.log.debug( "BEGIN TASK PROCESSING" )
+                if cylc.flags.debug:
+                    self.log.debug("BEGIN TASK PROCESSING")
                     main_loop_start_time = time.time()
 
                 self.pool.match_dependencies()
-
-                ready_tasks = self.pool.submit_tasks()
-                if (ready_tasks and
-                        self.config.cfg['cylc']['log resolved dependencies']):
-                    self.log_resolved_deps(ready_tasks)
-
+                if not self.shut_down_cleanly:
+                    self.pool.submit_tasks()
                 self.pool.spawn_tasks()
-
                 self.pool.remove_spent_tasks()
                 self.pool.remove_suiciding_tasks()
 
                 self.do_update_state_summary = True
 
-                self.pool.wireless.expire( self.pool.get_min_point() )
+                BroadcastServer.get_inst().expire(self.pool.get_min_point())
 
-                if flags.debug:
+                if cylc.flags.debug:
                     seconds = time.time() - main_loop_start_time
-                    self.log.debug( "END TASK PROCESSING (took " + str( seconds ) + " sec)" )
+                    self.log.debug(
+                        "END TASK PROCESSING (took " + str(seconds) + " sec)")
 
             self.pool.process_queued_task_messages()
+            self.pool.process_queued_task_event_handlers()
             try:
                 self.pool.process_queued_db_ops()
             except OSError as err:
                 self.shutdown(str(err))
                 raise
+            # If public database is stuck, blast it away by copying the content
+            # of the private database into it.
+            if self.pub_dao.n_tries >= self.pub_dao.MAX_TRIES:
+                try:
+                    self._copy_pri_db_to_pub_db()
+                except (IOError, OSError) as exc:
+                    # Something has to be very wrong here, so stop the suite
+                    self.shutdown(str(err))
+                    raise
+                else:
+                    # No longer stuck
+                    self.log.warning(
+                        "%(pub_db_name)s: recovered from %(pri_db_name)s" % {
+                            "pub_db_name": self.pub_dao.db_file_name,
+                            "pri_db_name": self.pri_dao.db_file_name})
+                    self.pub_dao.n_tries = 0
 
             self.process_command_queue()
 
-            if flags.iflag or self.do_update_state_summary:
-                flags.iflag = False
+            if cylc.flags.iflag or self.do_update_state_summary:
+                cylc.flags.iflag = False
                 self.do_update_state_summary = False
                 self.update_state_summary()
                 self.state_dumper.dump()
@@ -932,16 +1039,18 @@ class scheduler(object):
 
             if self.config.cfg['cylc']['abort if any task fails']:
                 if self.pool.any_task_failed():
-                    raise SchedulerError( 'One or more tasks failed and "abort if any task fails" is set' )
+                    raise SchedulerError(
+                        'Task(s) failed and "abort if any task fails" is set')
 
             # the run is a reference test, and unexpected failures occured
             if self.reference_test_mode:
-                if len( self.ref_test_allowed_failures ) > 0:
+                if len(self.ref_test_allowed_failures) > 0:
                     for itask in self.pool.get_failed_tasks():
                         if (itask.identity not in
                                 self.ref_test_allowed_failures):
                             print >>sys.stderr, itask.identity
-                            raise SchedulerError( 'A task failed unexpectedly: not in allowed failures list' )
+                            raise SchedulerError(
+                                'Failed task is not in allowed failures list')
 
             # check submission and execution timeout and polling timers
             if self.run_mode != 'simulation':
@@ -952,23 +1061,36 @@ class scheduler(object):
             if self.stop_clock_done() or self.stop_task_done() or auto_stop:
                 self.command_set_stop_cleanly()
 
-            if ((self.shut_down_cleanly or auto_stop) and 
+            if ((self.shut_down_cleanly or auto_stop) and
                     self.pool.no_active_tasks()):
                 proc_pool.close()
                 self.shut_down_now = True
 
+            if (self.shut_down_cleanly and self.kill_on_shutdown):
+                if self.pool.has_unkillable_tasks_only():
+                    if not self.pool.no_active_tasks():
+                        self.log.warning(
+                            'some tasks were not killable at shutdown')
+                    proc_pool.close()
+                    self.shut_down_now = True
+                else:
+                    if time.time() > self.next_kill_issue:
+                        self.pool.poll_task_jobs()
+                        self.pool.kill_task_jobs()
+                        self.next_kill_issue = time.time() + 10.0
+
             if self.options.profile_mode:
                 t1 = time.time()
                 self._update_profile_info("scheduler loop dt (s)", t1 - t0,
                                           amount_format="%.3f")
                 self._update_cpu_usage()
-                self._update_profile_info(
-                        "jobqueue.qsize",
-                        float(self.pool.jobqueue.qsize()),
-                        amount_format="%.1f")
-
+                if (int(t1) % 60 == 0):
+                    # Only get this every minute.
+                    self.log_memory("scheduler.py: loop: " +
+                                    get_current_time_string())
             time.sleep(1)
 
+        self.log_memory("scheduler.py: end main loop")
         # END MAIN LOOP
 
     def update_state_summary(self):
@@ -977,18 +1099,9 @@ class scheduler(object):
             self.pool.get_min_point(), self.pool.get_max_point(),
             self.pool.get_max_point_runahead(), self.paused(),
             self.will_pause_at(), self.shut_down_cleanly, self.will_stop_at(),
-            self.config.ns_defn_order)
-
-    def log_resolved_deps(self, ready_tasks):
-        """Log what triggered off what."""
-        # Used in reference tests.
-        for itask in ready_tasks:
-            itask.log(
-                    logging.INFO, 'triggered off %s' %
-                    str(itask.get_resolved_dependencies())
-                    )
-
-    def check_suite_timer( self ):
+            self.config.ns_defn_order, self.pool.reconfiguring)
+
+    def check_suite_timer(self):
         if self.already_timed_out:
             return
         if time.time() > self.suite_timer_timeout:
@@ -997,24 +1110,25 @@ class scheduler(object):
                 get_seconds_as_interval_string(
                     self.config.cfg['cylc']['event hooks']['timeout'])
             )
-            self.log.warning( message )
-            abort = self.config.cfg['cylc']['event hooks']['abort if timeout handler fails']
-            self.run_event_handlers( 'timeout', abort, message )
+            self.log.warning(message)
+            abort = self.config.cfg[
+                'cylc']['event hooks']['abort if timeout handler fails']
+            self.run_event_handlers('timeout', abort, message)
             if self.config.cfg['cylc']['event hooks']['abort on timeout']:
-                raise SchedulerError, 'Abort on suite timeout is set'
+                raise SchedulerError('Abort on suite timeout is set')
 
-    def process_tasks( self ):
+    def process_tasks(self):
         # do we need to do a pass through the main task processing loop?
         process = False
 
         if self.do_process_tasks:
             # this flag is turned on by commands that change task state
             process = True
-            self.do_process_tasks = False # reset
+            self.do_process_tasks = False  # reset
 
-        if flags.pflag:
+        if cylc.flags.pflag:
             process = True
-            flags.pflag = False # reset
+            cylc.flags.pflag = False  # reset
             # New suite activity, so reset the suite timer.
             if (self.config.cfg['cylc']['event hooks']['timeout'] and
                     self.config.cfg['cylc']['event hooks']['reset timer']):
@@ -1026,30 +1140,31 @@ class scheduler(object):
         if self.run_mode == 'simulation' and self.pool.sim_time_check():
             process = True
 
-        ##if not process:
-        ##    # If we neglect to set flags.pflag on some event that
-        ##    # makes re-negotiation of dependencies necessary then if
-        ##    # that event ever happens in isolation the suite could stall
-        ##    # unless manually nudged ("cylc nudge SUITE").  If this
-        ##    # happens turn on debug logging to see what happens
-        ##    # immediately before the stall, then set flags.pflag = True in
-        ##    # the corresponding code section. Alternatively,
-        ##    # for an undiagnosed stall you can uncomment this section to
-        ##    # stimulate task processing every few seconds even during
-        ##    # lulls in activity.  THIS SHOULD NOT BE NECESSARY, HOWEVER.
-        ##    if not self.nudge_timer_on:
-        ##        self.nudge_timer_start = now()
-        ##        self.nudge_timer_on = True
-        ##    else:
-        ##        timeout = self.nudge_timer_start + \
-        ##              datetime.timedelta( seconds=self.auto_nudge_interval )
-        ##      if now() > timeout:
-        ##          process = True
-        ##          self.nudge_timer_on = False
+        # if not process:
+        #    # If we neglect to set cylc.flags.pflag on some event that
+        #    # makes re-negotiation of dependencies necessary then if
+        #    # that event ever happens in isolation the suite could stall
+        #    # unless manually nudged ("cylc nudge SUITE").  If this
+        #    # happens turn on debug logging to see what happens
+        #    # immediately before the stall,
+        #    # then set cylc.flags.pflag = True in
+        #    # the corresponding code section. Alternatively,
+        #    # for an undiagnosed stall you can uncomment this section to
+        #    # stimulate task processing every few seconds even during
+        #    # lulls in activity.  THIS SHOULD NOT BE NECESSARY, HOWEVER.
+        #    if not self.nudge_timer_on:
+        #        self.nudge_timer_start = now()
+        #        self.nudge_timer_on = True
+        #    else:
+        #        timeout = self.nudge_timer_start + \
+        #              datetime.timedelta(seconds=self.auto_nudge_interval)
+        #      if now() > timeout:
+        #          process = True
+        #          self.nudge_timer_on = False
 
         return process
 
-    def shutdown( self, reason='' ):
+    def shutdown(self, reason=''):
         msg = "Suite shutting down at " + get_current_time_string()
         if reason:
             msg += ' (' + reason + ')'
@@ -1063,7 +1178,7 @@ class scheduler(object):
 
         if self.gen_reference_log:
             print '\nCOPYING REFERENCE LOG to suite definition directory'
-            shcopy( self.logfile, self.reflogfile)
+            copyfile(self.logfile, self.reflogfile)
 
         proc_pool = SuiteProcPool.get_inst()
         if proc_pool:
@@ -1081,64 +1196,65 @@ class scheduler(object):
                 except (OSError, IOError) as exc:
                     # (see comments in the state dumping module)
                     # ignore errors here in order to shut down cleanly
-                    self.log.warning( 'Final state dump failed: ' + str(exc) )
+                    self.log.warning('Final state dump failed: ' + str(exc))
                     pass
 
         if self.request_handler:
             self.request_handler.quit = True
             self.request_handler.join()
 
-        for i in [ self.command_queue, self.suite_id, self.suite_state ]:
-            if i:
-                self.pyro.disconnect( i )
+        for iface in [self.command_queue,
+                      SuiteIdServer.get_inst(), StateSummaryServer.get_inst(),
+                      ExtTriggerServer.get_inst(), BroadcastServer.get_inst()]:
+            try:
+                self.pyro.disconnect(iface)
+            except KeyError:
+                # Wasn't connected yet.
+                pass
 
         if self.pyro:
             self.pyro.shutdown()
 
         try:
-            self.port_file.unlink()
+            self.portfile.unlink()
         except PortFileError, x:
             # port file may have been deleted
             print >> sys.stderr, x
 
         # disconnect from suite-db, stop db queue
         if getattr(self, "db", None) is not None:
-            self.db.close()
-            self.view_db.close()
+            self.pri_dao.close()
+            self.pub_dao.close()
 
         if getattr(self, "config", None) is not None:
             # run shutdown handlers
-            abort = self.config.cfg['cylc']['event hooks']['abort if shutdown handler fails']
-            self.run_event_handlers( 'shutdown', abort, reason )
+            abort = self.config.cfg[
+                'cylc']['event hooks']['abort if shutdown handler fails']
+            self.run_event_handlers('shutdown', abort, reason)
 
-        print "DONE" # main thread exit
+        print "DONE"  # main thread exit
 
-    def set_stop_point( self, stop_point_string ):
+    def set_stop_point(self, stop_point_string):
         stop_point = get_point(stop_point_string)
-        try:
-            stop_point.standardise()
-        except PointParsingError as exc:
-            self.log.critical(
-                "Cannot set stop cycle point: %s: %s" % (
-                    stop_point_string, exc))
-            return
         self.stop_point = stop_point
-        self.log.info( "Setting stop cycle point: %s" % stop_point_string )
+        self.log.info("Setting stop cycle point: %s" % stop_point_string)
         self.pool.set_stop_point(self.stop_point)
 
-    def set_stop_clock( self, unix_time, date_time_string ):
-        self.log.info( "Setting stop clock time: %s (unix time: %s)" % (
-                           date_time_string, unix_time))
+    def set_stop_clock(self, unix_time, date_time_string):
+        self.log.info("Setting stop clock time: %s (unix time: %s)" % (
+                      date_time_string, unix_time))
         self.stop_clock_time = unix_time
         self.stop_clock_time_string = date_time_string
 
-    def set_stop_task(self, taskid):
-        name, point_string = TaskID.split(taskid)
+    def set_stop_task(self, task_id):
+        name, point_string = TaskID.split(task_id)
         if name in self.config.get_task_name_list():
-            self.log.info("Setting stop task: " + taskid)
-            self.stop_task = taskid
+            task_id = self.get_standardised_taskid(task_id)
+            self.log.info("Setting stop task: " + task_id)
+            self.stop_task = task_id
         else:
-            self.log.warning("Requested stop task name does not exist: " + name)
+            self.log.warning(
+                "Requested stop task name does not exist: %s" % name)
 
     def stop_task_done(self):
         """Return True if stop task has succeeded."""
@@ -1148,22 +1264,22 @@ class scheduler(object):
         self.log.info("Stop task " + id + " finished")
         return True
 
-    def hold_suite( self, point=None ):
+    def hold_suite(self, point=None):
         if point is None:
             self.hold_suite_now = True
             self.pool.hold_all_tasks()
         else:
-            self.log.info( "Setting suite hold cycle point: " + str(point) )
+            self.log.info("Setting suite hold cycle point: " + str(point))
             self.pool.set_hold_point(point)
 
-    def release_suite( self ):
+    def release_suite(self):
         if self.hold_suite_now:
-            self.log.info( "RELEASE: new tasks will be queued when ready")
+            self.log.info("RELEASE: new tasks will be queued when ready")
             self.hold_suite_now = False
         self.pool.set_hold_point(None)
         self.pool.release_all_tasks()
 
-    def will_stop_at( self ):
+    def will_stop_at(self):
         if self.stop_point:
             return str(self.stop_point)
         elif self.stop_clock_time is not None:
@@ -1175,22 +1291,23 @@ class scheduler(object):
         else:
             return None
 
-    def clear_stop_times( self ):
+    def clear_stop_times(self):
         self.stop_point = None
         self.stop_clock_time = None
         self.stop_clock_time_string = None
         self.stop_task = None
 
-    def paused( self ):
+    def paused(self):
         return self.hold_suite_now
 
-    def will_pause_at( self ):
+    def will_pause_at(self):
         return self.pool.get_hold_point()
 
     def command_trigger_task(self, name, point_string, is_family):
         matches = self.get_matching_task_names(name, is_family)
         if not matches:
-            raise TaskNotFoundError, "No matching tasks found: " + name
+            raise TaskNotFoundError("No matching tasks found: %s" % name)
+        point_string = self.get_standardised_point_string(point_string)
         task_ids = [TaskID.get(i, point_string) for i in matches]
         self.pool.trigger_tasks(task_ids)
 
@@ -1200,63 +1317,58 @@ class scheduler(object):
             raise TaskNotFoundError("Task not found: %s" % name)
         if len(matches) > 1:
             raise TaskNotFoundError("Unique task match not found: %s" % name)
+        point_string = self.get_standardised_point_string(point_string)
         task_id = TaskID.get(matches[0], point_string)
         self.pool.dry_run_task(task_id)
 
-    def get_matching_task_names(self, expr, is_family=False):
-        """Return task names that match expr (for task or family name)."""
-        matches = []
-        tasks = self.config.get_task_name_list()
+    def get_matching_task_names(self, pattern, is_family=False):
+        """Return task names that match pattern (by task or family name)."""
+
+        matching_tasks = []
+        all_tasks = self.config.get_task_name_list()
         if is_family:
-            families = self.config.runtime['first-parent descendants']
+            fp_desc = self.config.runtime['first-parent descendants']
+            matching_mems = []
             try:
                 # Exact family match.
-                f_matches = families[expr]
+                matching_mems = fp_desc[pattern]
             except KeyError:
-                # Regex familyi match
-                f_matches = []
-                for fam, mems in families.items():
-                    if re.match(expr, fam):
-                        f_matches += mems
-            matches = []
-            for m in f_matches:
-                if m in tasks:
-                    matches.append(m)
+                # Regex family match
+                for fam, mems in fp_desc.items():
+                    if re.match(pattern, fam):
+                        matching_mems += mems
+            # Keep family members that are tasks (not sub-families).
+            matching_tasks = [m for m in matching_mems if m in all_tasks]
         else:
-            if expr in tasks:
+            if pattern in all_tasks:
                 # Exact task match.
-                matches.append(expr)
+                matching_tasks = [pattern]
             else:
                 # Regex task match.
-                for task in tasks:
-                    if re.match(expr, task):
-                        matches.append(task)
-        return matches
+                matching_tasks = [t for t in all_tasks if re.match(pattern, t)]
+        return matching_tasks
 
-    def command_reset_task_state( self, name, point_string, state, is_family ):
-        matches = self.get_matching_task_names( name, is_family )
+    def command_reset_task_state(self, name, point_string, state, is_family):
+        matches = self.get_matching_task_names(name, is_family)
         if not matches:
-            raise TaskNotFoundError, "No matching tasks found: " + name
+            raise TaskNotFoundError("No matching tasks found: %s" % name)
+        point_string = self.get_standardised_point_string(point_string)
         task_ids = [TaskID.get(i, point_string) for i in matches]
-        self.pool.reset_task_states( task_ids, state )
-
-    def command_add_prerequisite( self, task_id, message ):
-        self.pool.add_prereq_to_task( task_id, message )
+        self.pool.reset_task_states(task_ids, state)
 
-    def command_purge_tree( self, id, stop ):
-        self.pool.purge_tree( id, get_point(stop) )
-
-    def filter_initial_task_list( self, inlist ):
-        included_by_rc  = self.config.cfg['scheduling']['special tasks']['include at start-up']
-        excluded_by_rc  = self.config.cfg['scheduling']['special tasks']['exclude at start-up']
+    def filter_initial_task_list(self, inlist):
+        included_by_rc = self.config.cfg[
+            'scheduling']['special tasks']['include at start-up']
+        excluded_by_rc = self.config.cfg[
+            'scheduling']['special tasks']['exclude at start-up']
         outlist = []
         for name in inlist:
             if name in excluded_by_rc:
                 continue
-            if len( included_by_rc ) > 0:
+            if len(included_by_rc) > 0:
                 if name not in included_by_rc:
                     continue
-            outlist.append( name )
+            outlist.append(name)
         return outlist
 
     def stop_clock_done(self):
@@ -1274,6 +1386,39 @@ class scheduler(object):
         else:
             return False
 
+    def _copy_pri_db_to_pub_db(self):
+        """Copy content of primary database file to public database file.
+
+        Use temporary file to ensure that we do not end up with a partial file.
+
+        """
+        temp_pub_db_file_name = None
+        self.pub_dao.close()
+        try:
+            self.pub_dao.conn = None  # reset connection
+            open(self.pub_dao.db_file_name, "a").close()  # touch
+            st_mode = os.stat(self.pub_dao.db_file_name).st_mode
+            temp_pub_db_file_name = mkstemp(
+                prefix=self.pub_dao.DB_FILE_BASE_NAME,
+                dir=os.path.dirname(self.pub_dao.db_file_name))[1]
+            copyfile(
+                self.pri_dao.db_file_name, temp_pub_db_file_name)
+            os.rename(temp_pub_db_file_name, self.pub_dao.db_file_name)
+            os.chmod(self.pub_dao.db_file_name, st_mode)
+        except (IOError, OSError) as exc:
+            if temp_pub_db_file_name:
+                os.unlink(temp_pub_db_file_name)
+            raise
+
+    def log_memory(self, message):
+        """Print a message to standard out with the current memory usage."""
+        if not self.options.profile_mode:
+            return
+        proc = subprocess.Popen(["ps", "h", "-orss", str(os.getpid())],
+                                stdout=subprocess.PIPE)
+        memory = int(proc.communicate()[0])
+        print "PROFILE: Memory: %d KiB: %s" % (memory, message)
+
     def _update_profile_info(self, category, amount, amount_format="%s"):
         # Update the 1, 5, 15 minute dt averages for a given category.
         tnow = time.time()
@@ -1299,13 +1444,14 @@ class scheduler(object):
             averages[minute_num] = sum(minute_amounts) / len(minute_amounts)
             output_text += (" %d: " + amount_format) % (
                 minute_num, averages[minute_num])
-        self.log.info( output_text )
+        self.log.info(output_text)
 
     def _update_cpu_usage(self):
-        p = subprocess.Popen(["ps", "-o%cpu= ", str(os.getpid())], stdout=subprocess.PIPE)
+        p = subprocess.Popen(
+            ["ps", "-o%cpu= ", str(os.getpid())], stdout=subprocess.PIPE)
         try:
             cpu_frac = float(p.communicate()[0])
         except (TypeError, OSError, IOError, ValueError) as e:
-            self.log.warning( "Cannot get CPU % statistics: %s" % e )
+            self.log.warning("Cannot get CPU % statistics: %s" % e)
             return
         self._update_profile_info("CPU %", cpu_frac, amount_format="%.1f")
diff --git a/lib/cylc/strftime.py b/lib/cylc/strftime.py
index 15496aa..953aa0e 100644
--- a/lib/cylc/strftime.py
+++ b/lib/cylc/strftime.py
@@ -18,34 +18,35 @@
 
 import datetime
 
-def strftime( dt, template ):
+
+def strftime(dt, template):
     """A replacement for datetime.strftime() which does not handle dates
     earlier than 1900 (or beyond 2048?)."""
 
     iso = dt.isoformat()
-    return isoformat_strftime( iso, template )
+    return isoformat_strftime(iso, template)
 
 
-def isoformat_strftime( iso_string, template ):
+def isoformat_strftime(iso_string, template):
     """Re-template a datetime.datetime isoformat string."""
-    d,t = iso_string.split('T')
-    Y,m,d = d.split('-')
-    H,M,S = t.split(':')
-    t = template.replace('%Y', Y )
-    t = t.replace('%m', m )
-    t = t.replace('%d', d )
-    t = t.replace('%H', H )
-    t = t.replace('%M', M )
-    t = t.replace('%S', S[0:2] )
+    d, t = iso_string.split('T')
+    Y, m, d = d.split('-')
+    H, M, S = t.split(':')
+    t = template.replace('%Y', Y)
+    t = t.replace('%m', m)
+    t = t.replace('%d', d)
+    t = t.replace('%H', H)
+    t = t.replace('%M', M)
+    t = t.replace('%S', S[0:2])
     return t
 
 
 if __name__ == '__main__':
-    dt1 = datetime.datetime(1900,1,1)
-    dt2 = datetime.datetime(1600,1,1)
+    dt1 = datetime.datetime(1900, 1, 1)
+    dt2 = datetime.datetime(1600, 1, 1)
 
-    print strftime( dt1, "%Y-%m-%d %H:%M:%S" )
-    print dt1.strftime( "%Y-%m-%d %H:%M:%S" )
+    print strftime(dt1, "%Y-%m-%d %H:%M:%S")
+    print dt1.strftime("%Y-%m-%d %H:%M:%S")
 
-    print strftime( dt2, "%Y-%m-%d %H:%M:%S" )
-    print dt2.strftime( "%Y-%m-%d %H:%M:%S" ) # FAILS
+    print strftime(dt2, "%Y-%m-%d %H:%M:%S")
+    print dt2.strftime("%Y-%m-%d %H:%M:%S")  # FAILS
diff --git a/lib/cylc/suite_cmd_interface.py b/lib/cylc/suite_cmd_interface.py
deleted file mode 100644
index b41bd98..0000000
--- a/lib/cylc/suite_cmd_interface.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import Pyro.core
-from Queue import Queue
-
-class comqueue( Pyro.core.ObjBase ):
-    """Pyro-connected class to queue suite control requests."""
-
-    def __init__( self, legal_commands=[] ):
-        Pyro.core.ObjBase.__init__(self)
-        self.legal = legal_commands
-        self.queue = Queue()
-
-    def put( self, command, *args ):
-        res = ( True, 'Command queued' )
-        if command not in self.legal:
-            res = ( False, 'ERROR: Illegal command: ' + str(command) )
-        else:
-            # queue incoming messages for this task
-            self.queue.put( (command, args) )
-        return res
-
-    def get_queue( self ):
-        return self.queue
diff --git a/lib/cylc/suite_host.py b/lib/cylc/suite_host.py
index 4527ba9..d6e0c0e 100644
--- a/lib/cylc/suite_host.py
+++ b/lib/cylc/suite_host.py
@@ -16,13 +16,16 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import re, sys, socket
+import re
+import sys
+import socket
 
 hostname = None
 suite_host = None
 host_ip_address = None
 
-def get_local_ip_address( target ):
+
+def get_local_ip_address(target):
     """
 ATTRIBUTION:
 http://www.linux-support.com/cms/get-local-ip-address-with-python/
@@ -61,48 +64,59 @@ returning the IP address associated with this socket.
 
     ipaddr = ''
     try:
-        s = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
-        s.connect( (target, 8000) )
+        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+        s.connect((target, 8000))
         ipaddr = s.getsockname()[0]
         s.close()
     except:
         pass
     return ipaddr
 
+
 def get_hostname():
     global hostname
     if hostname is None:
         hostname = socket.getfqdn()
     return hostname
 
+
 def get_host_ip_address():
     from cylc.cfgspec.globalcfg import GLOBAL_CFG
     global host_ip_address
     if host_ip_address is None:
-        target = GLOBAL_CFG.get( ['suite host self-identification','target'] )
+        target = GLOBAL_CFG.get(['suite host self-identification', 'target'])
         # external IP address of the suite host:
-        host_ip_address = get_local_ip_address( target )
+        host_ip_address = get_local_ip_address(target)
     return host_ip_address
 
+
 def get_suite_host():
-    from cylc.cfgspec.globalcfg import GLOBAL_CFG
+    from cylc.cfgspec.globalcfg import GLOBAL_CFG, GlobalConfigError
     global suite_host
     if suite_host is None:
-        hardwired = GLOBAL_CFG.get( ['suite host self-identification','host'] )
-        method = GLOBAL_CFG.get( ['suite host self-identification','method'] )
-        # the following is for suite host self-identfication in task job scripts:
+        hardwired = GLOBAL_CFG.get(['suite host self-identification', 'host'])
+        method = GLOBAL_CFG.get(['suite host self-identification', 'method'])
+        # the following is for suite host self-identfication in task job
+        # scripts:
         if method == 'name':
             suite_host = hostname
         elif method == 'address':
             suite_host = get_host_ip_address()
         elif method == 'hardwired':
             if not hardwired:
-                sys.exit( 'ERROR, no hardwired hostname is configured' )
+                raise GlobalConfigError(
+                    'ERROR, no hardwired hostname is configured (%s)' %
+                    ['suite host self-identification', 'host']
+                )
             suite_host = hardwired
         else:
-            sys.exit( 'ERROR, unknown host method: ' + method )
+            raise GlobalConfigError(
+                'ERROR, unknown host method (%s): %s' % (
+                    ['suite host self-identification', 'method'], method)
+            )
     return suite_host
 
+
 def is_remote_host(name):
     """Return True if name has different IP address than the current host.
     Return False if name is None.  Abort if host is unknown.
@@ -114,13 +128,13 @@ def is_remote_host(name):
         ipa = socket.gethostbyname(name)
     except Exception, e:
         print >> sys.stderr, str(e)
-        raise Exception( 'ERROR, host not found: ' + name )
+        raise Exception('ERROR, host not found: ' + name)
     host_ip_address = get_host_ip_address()
     # local IP address of the suite host (may be 127.0.0.1, for e.g.)
     local_ip_address = socket.gethostbyname(get_hostname())
     return name and ipa != host_ip_address and ipa != local_ip_address
 
-if __name__ == "__main__":
 
+if __name__ == "__main__":
     target = sys.argv[1]
-    print get_local_ip_address( target )
+    print get_local_ip_address(target)
diff --git a/lib/cylc/suite_id.py b/lib/cylc/suite_id.py
deleted file mode 100644
index b455045..0000000
--- a/lib/cylc/suite_id.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-# A minimal Pyro-connected object to allow client programs to identify
-# what suite is running at a given cylc port - by suite name and owner.
-
-# All *other* suite objects should be connected to Pyro via qualified
-# names: owner.suite.object, to prevent accidental access to the wrong
-# suite. This object, however, should be connected unqualified so that
-# that same ID method can be called on any active cylc port.
-
-import Pyro.core
-
-class identifier( Pyro.core.ObjBase ):
-    def __init__( self, name, owner ):
-        self.owner = owner
-        self.name = name
-        Pyro.core.ObjBase.__init__( self )
-
-    def id( self ):
-        return ( self.name, self.owner )
diff --git a/lib/cylc/suite_info_interface.py b/lib/cylc/suite_info_interface.py
deleted file mode 100644
index 13efbaf..0000000
--- a/lib/cylc/suite_info_interface.py
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import Pyro.core
-
-"""Pyro interface for DIRECT READ-ONLY INTERACTION with a cylc suite.
-Any interaction that alters suite state in any way must go via the
-indirect thread-safe suite command interface queue."""
-
-class info_interface( Pyro.core.ObjBase ):
-    def __init__( self, info_commands ):
-        Pyro.core.ObjBase.__init__(self)
-        self.commands = info_commands
-
-    def get( self, descrip, *args ):
-        # TODO - WHAT TO RETURN IN CASE OF UNKNOWN COMMAND?
-        return self.commands[ descrip ]( *args )
diff --git a/lib/cylc/suite_logging.py b/lib/cylc/suite_logging.py
index 4309618..b7a5cd0 100644
--- a/lib/cylc/suite_logging.py
+++ b/lib/cylc/suite_logging.py
@@ -16,47 +16,54 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys, re
-import logging, logging.handlers
+import os
+import sys
+import logging
+import logging.handlers
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.wallclock import get_time_string_from_unix_time
 
 """Configure suite logging with the Python logging module, 'main'
 logger, in a sub-directory of the suite running directory."""
 
-class suite_log( object ):
-    def __init__( self, suite ):
 
-        self.ldir = GLOBAL_CFG.get_derived_host_item( suite, 'suite log directory' )
-        self.path = os.path.join( self.ldir, 'log' ) 
+class suite_log(object):
+    def __init__(self, suite):
 
-        self.err_path = os.path.join( self.ldir, 'err' )
-        self.roll_at_startup = GLOBAL_CFG.get( ['suite logging','roll over at start-up'] )
-        self.n_keep = GLOBAL_CFG.get( ['suite logging','rolling archive length'] )
-        self.max_bytes = GLOBAL_CFG.get( ['suite logging','maximum size in bytes'] )
+        self.ldir = GLOBAL_CFG.get_derived_host_item(
+            suite, 'suite log directory')
+        self.path = os.path.join(self.ldir, 'log')
 
-    def get_err_path( self ):
+        self.err_path = os.path.join(self.ldir, 'err')
+        self.roll_at_startup = GLOBAL_CFG.get(
+            ['suite logging', 'roll over at start-up'])
+        self.n_keep = GLOBAL_CFG.get(
+            ['suite logging', 'rolling archive length'])
+        self.max_bytes = GLOBAL_CFG.get(
+            ['suite logging', 'maximum size in bytes'])
+
+    def get_err_path(self):
         return self.err_path
 
-    def get_dir( self ):
+    def get_dir(self):
         return self.ldir
 
-    def get_path( self ):
+    def get_path(self):
         return self.path
 
-    def get_log( self ):
+    def get_log(self):
         # not really necessary: just get the main logger
-        return logging.getLogger( 'main' )
+        return logging.getLogger('main')
 
-    def pimp( self, level=logging.INFO ):
-        log = logging.getLogger( 'main' )
-        log.setLevel( level )
+    def pimp(self, level=logging.INFO):
+        log = logging.getLogger('main')
+        log.setLevel(level)
 
         h = logging.handlers.RotatingFileHandler(
-                    self.path, 'a', self.max_bytes, self.n_keep )
+            self.path, 'a', self.max_bytes, self.n_keep)
         # The above creates a zero-sized log file if it doesn't already exist.
         if self.roll_at_startup:
-            if os.path.getsize( self.path ) > 0:
+            if os.path.getsize(self.path) > 0:
                 h.doRollover()
 
         f = ISO8601DateTimeFormatter(
@@ -65,9 +72,9 @@ class suite_log( object ):
 
         # write warnings and worse to stderr as well as to the log
         h2 = logging.StreamHandler(sys.stderr)
-        h2.setLevel( logging.WARNING )
-        h2.setFormatter( f )
-        log.addHandler( h2 )
+        h2.setLevel(logging.WARNING)
+        h2.setFormatter(f)
+        log.addHandler(h2)
 
         h.setFormatter(f)
         log.addHandler(h)
diff --git a/lib/cylc/suite_output.py b/lib/cylc/suite_output.py
index 864127a..433804e 100644
--- a/lib/cylc/suite_output.py
+++ b/lib/cylc/suite_output.py
@@ -16,8 +16,10 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os, sys
-import logging, logging.handlers
+import os
+import sys
+import logging
+import logging.handlers
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from rolling_archive import rolling_archive
 
@@ -25,25 +27,28 @@ from rolling_archive import rolling_archive
 sub-directory of the suite running directory. Can also be used to simply
 get the configure log locations."""
 
-class suite_output( object ):
 
-    def __init__( self, suite ):
+class suite_output(object):
 
-        sodir = GLOBAL_CFG.get_derived_host_item( suite, 'suite log directory' )
-        self.opath = os.path.join( sodir, 'out' ) 
-        self.epath = os.path.join( sodir, 'err' ) 
+    def __init__(self, suite):
+
+        sodir = GLOBAL_CFG.get_derived_host_item(suite, 'suite log directory')
+        self.opath = os.path.join(sodir, 'out')
+        self.epath = os.path.join(sodir, 'err')
 
         # use same archive length as logging (TODO: document this)
-        self.roll_at_startup = GLOBAL_CFG.get( ['suite logging','roll over at start-up'] )
-        self.arclen = GLOBAL_CFG.get( ['suite logging','rolling archive length'] )
+        self.roll_at_startup = GLOBAL_CFG.get(
+            ['suite logging', 'roll over at start-up'])
+        self.arclen = GLOBAL_CFG.get(
+            ['suite logging', 'rolling archive length'])
 
-    def get_path( self, err=False ):
+    def get_path(self, err=False):
         if err:
             return self.epath
         else:
             return self.opath
 
-    def redirect( self ):
+    def redirect(self):
         """redirect the standard file descriptors to suite log files."""
 
         self.roll()
@@ -51,32 +56,32 @@ class suite_output( object ):
         # record current standard file descriptors
         self.sys_stdout = sys.stdout
         self.sys_stderr = sys.stderr
-        self.sys_stdin  = sys.stdin
+        self.sys_stdin = sys.stdin
 
         # redirect standard file descriptors
         # note that simply reassigning the sys streams is not sufficient
         # if we import modules that write to stdin and stdout from C
         # code - evidently the subprocess module is in this category!
-        sout = file( self.opath, 'a+', 0 ) # 0 => unbuffered
-        serr = file( self.epath, 'a+', 0 )
-        dvnl = file( '/dev/null', 'r' )
-        os.dup2( sout.fileno(), sys.stdout.fileno() )
-        os.dup2( serr.fileno(), sys.stderr.fileno() )
-        os.dup2( dvnl.fileno(), sys.stdin.fileno() )
+        sout = file(self.opath, 'a+', 0)  # 0 => unbuffered
+        serr = file(self.epath, 'a+', 0)
+        dvnl = file('/dev/null', 'r')
+        os.dup2(sout.fileno(), sys.stdout.fileno())
+        os.dup2(serr.fileno(), sys.stderr.fileno())
+        os.dup2(dvnl.fileno(), sys.stdin.fileno())
 
-    def restore( self ):
+    def restore(self):
         # (not used)
         sys.stdout.close()
         sys.stderr.close()
         sys.stdout = self.sys_stdout
         sys.stderr = self.sys_stderr
-        sys.stdin  = self.sys_stdin
+        sys.stdin = self.sys_stdin
         print "\n Restored stdout and stderr to normal"
 
-    def roll( self ):
+    def roll(self):
         # roll the stdout and stderr log files
-        oarchive = rolling_archive( self.opath, self.arclen, sep='.' )
-        earchive = rolling_archive( self.epath, self.arclen, sep='.' )
+        oarchive = rolling_archive(self.opath, self.arclen, sep='.')
+        earchive = rolling_archive(self.epath, self.arclen, sep='.')
         if self.roll_at_startup:
             oarchive.roll()
             earchive.roll()
diff --git a/lib/cylc/suite_state_dumping.py b/lib/cylc/suite_state_dumping.py
index 62e3e64..d55f08c 100644
--- a/lib/cylc/suite_state_dumping.py
+++ b/lib/cylc/suite_state_dumping.py
@@ -23,6 +23,8 @@ import time
 import logging
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 from cylc.wallclock import get_current_time_string
+from cylc.network.suite_broadcast import BroadcastServer
+
 
 class SuiteStateDumper(object):
     """Generate state dumps."""
@@ -45,25 +47,13 @@ class SuiteStateDumper(object):
 
     def set_cts(self, ict, fct):
         """Set initial and final cycle time strings."""
-        ict_string = str(ict)
-        stop_string = str(fct)
-
-        self.cts_str = ""
-        if ict_string:
-            self.cts_str += 'initial cycle : ' + ict_string + '\n'
-        else:
-            self.cts_str += 'initial cycle : (none)\n'
-
-        if stop_string:
-            self.cts_str += 'final cycle : ' + stop_string + '\n'
-        else:
-            self.cts_str += 'final cycle : (none)\n'
+        self.cts_str = "initial cycle : %s\n" % str(ict)
+        self.cts_str += "final cycle : %s\n" % str(fct)
 
-    def dump(self, tasks=None, wireless=None):
+    def dump(self, tasks=None):
         """Dump suite states to disk. Return state file basename on success."""
 
-        if wireless is None:
-            wireless = self.pool.wireless
+        wireless = BroadcastServer.get_inst()
 
         base_name = self.BASE_NAME + "." + get_current_time_string(
             override_use_utc=True, use_basic_format=True,
@@ -83,13 +73,10 @@ class SuiteStateDumper(object):
 
                 handle.write('run mode : %s\n' % self.run_mode)
                 handle.write('time : %s (%d)\n' % (
-                   get_current_time_string(), time.time()))
+                    get_current_time_string(), time.time()))
 
                 handle.write(self.cts_str)
-
-                if wireless is not None:
-                    wireless.dump(handle)
-
+                wireless.dump(handle)
                 handle.write('Begin task states\n')
 
                 if tasks is None and self.pool is not None:
@@ -100,7 +87,7 @@ class SuiteStateDumper(object):
 
                 # To generate "OSError [Errno 9] bad file descriptor",
                 # close the file with os.close() before calling fsync():
-                ## os.close( handle.fileno() )
+                # # os.close( handle.fileno() )
 
                 os.fsync(handle.fileno())
                 handle.close()
@@ -112,7 +99,7 @@ class SuiteStateDumper(object):
                 if not exc.filename:
                     exc.filename = file_name
                 self.log.warning(
-                    'State dumping failed, #%d %s' %(n_attempt, exc))
+                    'State dumping failed, #%d %s' % (n_attempt, exc))
                 if n_attempt >= max_attempts:
                     raise exc
                 n_attempt += 1
diff --git a/lib/cylc/tail.py b/lib/cylc/tail.py
index 5cfc6d1..d4a9df0 100644
--- a/lib/cylc/tail.py
+++ b/lib/cylc/tail.py
@@ -18,27 +18,16 @@
 
 import time
 
-def tail( file ):
+
+def tail(file):
     while True:
         where = file.tell()
         line = file.readline()
         if not line:
-            time.sleep( 1 )
-            file.seek( where )
-            yield None  # return even if no new line so the host thread
-                        # doesn't hang when the gui exits.
+            time.sleep(1)
+            file.seek(where)
+            # return even if no new line so the host thread doesn't hang when
+            # the gui exits.
+            yield None
         else:
             yield line
-
-# FOR NORMAL 'tail -F' behaviour:
-#def tail( file ):
-#    interval = 1.0
-#
-#    while True:
-#        where = file.tell()
-#        line = file.readline()
-#        if not line:
-#            time.sleep( interval )
-#            file.seek( where )
-#        else:
-#            yield line
diff --git a/lib/cylc/task_id.py b/lib/cylc/task_id.py
index 67fa34f..467bf6e 100644
--- a/lib/cylc/task_id.py
+++ b/lib/cylc/task_id.py
@@ -50,7 +50,7 @@ class TaskID(object):
     @classmethod
     def is_valid_id(cls, id_str):
         """Return whether a task id is valid."""
-        if not cls.DELIM in id_str:
+        if cls.DELIM not in id_str:
             return False
         name, point = cls.split(id_str)
         # N.B. only basic cycle point check
diff --git a/lib/cylc/task_message.py b/lib/cylc/task_message.py
index e34f8cc..a315a24 100644
--- a/lib/cylc/task_message.py
+++ b/lib/cylc/task_message.py
@@ -15,42 +15,70 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
 """Task to cylc progress messaging."""
 
-import os, sys
-import socket
-import subprocess
-from datetime import datetime
+import os
+import sys
 from time import sleep
-from remote import remrun
-from cylc.passphrase import passphrase
+from cylc.remote import remrun
 from cylc.wallclock import get_current_time_string
 import cylc.flags
 
-class message(object):
-    def __init__( self, msg=None, priority='NORMAL' ):
 
-        self.msg = msg
+class TaskMessage(object):
+
+    """Send task (job) messages."""
+
+    FAILED = "failed"
+    STARTED = "started"
+    SUCCEEDED = "succeeded"
+    STATUSES = (STARTED, SUCCEEDED, FAILED)
+
+    CYLC_JOB_PID = "CYLC_JOB_PID"
+    CYLC_JOB_INIT_TIME = "CYLC_JOB_INIT_TIME"
+    CYLC_JOB_EXIT = "CYLC_JOB_EXIT"
+    CYLC_JOB_EXIT_TIME = "CYLC_JOB_EXIT_TIME"
+    CYLC_MESSAGE = "CYLC_MESSAGE"
+
+    FAIL_MESSAGE_PREFIX = "Task job script received signal "
+    VACATION_MESSAGE_PREFIX = "Task job script vacated by signal "
+
+    NORMAL = "NORMAL"
+    WARNING = "WARNING"
+    CRITICAL = "CRITICAL"
+    PRIORITIES = (NORMAL, WARNING, CRITICAL)
 
-        if priority in [ 'NORMAL', 'WARNING', 'CRITICAL' ]:
+    ATTRS = (
+        ('suite', 'CYLC_SUITE_NAME', '(CYLC_SUITE_NAME)'),
+        ('task_id', 'CYLC_TASK_ID', '(CYLC_TASK_ID)'),
+        ('retry_seconds', 'CYLC_TASK_MSG_RETRY_INTVL',
+         '(CYLC_TASK_MSG_RETRY_INTVL)'),
+        ('max_tries', 'CYLC_TASK_MSG_MAX_TRIES', '(CYLC_TASK_MSG_MAX_TRIES)'),
+        ('try_timeout', 'CYLC_TASK_MSG_TIMEOUT', '(CYLC_TASK_MSG_TIMEOUT)'),
+        ('owner', 'CYLC_SUITE_OWNER', None),
+        ('host', 'CYLC_SUITE_HOST', '(CYLC_SUITE_HOST)'),
+        ('port', 'CYLC_SUITE_PORT', '(CYLC_SUITE_PORT)'),
+    )
+
+    def __init__(self, priority=NORMAL):
+        if priority in self.PRIORITIES:
             self.priority = priority
         else:
-            raise Exception( 'Illegal message priority ' + priority )
+            raise Exception('Illegal message priority ' + priority)
 
         # load the environment
         self.env_map = dict(os.environ)
 
         # set some instance variables
-        for attr, key, default in (
-                ('suite', 'CYLC_SUITE_NAME', '(CYLC_SUITE_NAME)'),
-                ('task_id', 'CYLC_TASK_ID', '(CYLC_TASK_ID)'),
-                ('retry_seconds', 'CYLC_TASK_MSG_RETRY_INTVL', '(CYLC_TASK_MSG_RETRY_INTVL)'),
-                ('max_tries',     'CYLC_TASK_MSG_MAX_TRIES',   '(CYLC_TASK_MSG_MAX_TRIES)'),
-                ('try_timeout',   'CYLC_TASK_MSG_TIMEOUT',     '(CYLC_TASK_MSG_TIMEOUT)'),
-                ('owner', 'CYLC_SUITE_OWNER', None),
-                ('host', 'CYLC_SUITE_HOST', '(CYLC_SUITE_HOST)'),
-                ('port', 'CYLC_SUITE_PORT', '(CYLC_SUITE_PORT)')):
+        self.suite = None
+        self.task_id = None
+        self.retry_seconds = None
+        self.max_tries = None
+        self.try_timeout = None
+        self.owner = None
+        self.host = None
+        self.port = None
+        for attr, key, default in self.ATTRS:
             value = self.env_map.get(key, default)
             setattr(self, attr, value)
 
@@ -58,18 +86,19 @@ class message(object):
         if self.try_timeout == 'None':
             self.try_timeout = None
         try:
-            self.retry_seconds = float( self.retry_seconds )
-            self.max_tries = int( self.max_tries )
-        except:
+            self.retry_seconds = float(self.retry_seconds)
+            self.max_tries = int(self.max_tries)
+        except ValueError:
             pass
 
-        cylc.flags.verbose = cylc.flags.verbose or self.env_map.get('CYLC_VERBOSE') == 'True'
+        cylc.flags.verbose = (
+            cylc.flags.verbose or self.env_map.get('CYLC_VERBOSE') == 'True')
 
         # 'scheduler' or 'submit', (or 'raw' if job script run manually)
-        self.mode = self.env_map.get( 'CYLC_MODE', 'raw' )
+        self.mode = self.env_map.get('CYLC_MODE', 'raw')
 
-        rd = self.env_map.get( 'CYLC_SUITE_RUN_DIR', '.' )
-        self.env_file_path = os.path.join (rd, 'cylc-suite-env' )
+        suite_run_dir = self.env_map.get('CYLC_SUITE_RUN_DIR', '.')
+        self.env_file_path = os.path.join(suite_run_dir, 'cylc-suite-env')
 
         self.utc = self.env_map.get('CYLC_UTC') == 'True'
         # Record the time the messaging system was called and append it
@@ -78,17 +107,41 @@ class message(object):
             override_use_utc=self.utc)
 
         self.ssh_messaging = (
-                self.env_map.get('CYLC_TASK_COMMS_METHOD') == 'ssh' )
+            self.env_map.get('CYLC_TASK_COMMS_METHOD') == 'ssh')
 
         self.polling = (
-                self.env_map.get('CYLC_TASK_COMMS_METHOD') == 'poll' )
+            self.env_map.get('CYLC_TASK_COMMS_METHOD') == 'poll')
 
         self.ssh_login_shell = (
-                self.env_map.get('CYLC_TASK_SSH_LOGIN_SHELL') != 'False')
+            self.env_map.get('CYLC_TASK_SSH_LOGIN_SHELL') != 'False')
 
-    def load_suite_contact_file( self ):
-        # override CYLC_SUITE variables using the contact environment file,
-        # in case the suite was stopped and then restarted on another port:
+    def send(self, messages):
+        """Send messages back to the suite."""
+        self._update_job_status_file(messages)
+
+        if self.mode != 'scheduler' or self.polling:
+            # no suite to communicate with, just print to stdout.
+            self._print_messages(messages)
+            return
+
+        if self.ssh_messaging and self._send_by_ssh():
+            return
+
+        self._send_by_pyro(messages)
+
+    def _get_client(self):
+        """Return the Pyro client."""
+        from cylc.network.task_msgqueue import TaskMessageClient
+        return TaskMessageClient(
+            self.suite, self.task_id, self.owner, self.host,
+            self.try_timeout, self.port)
+
+    def _load_suite_contact_file(self):
+        """Override CYLC_SUITE variables using the contact environment file.
+
+        In case the suite was stopped and then restarted on another port.
+
+        """
         if os.access(self.env_file_path, os.F_OK | os.R_OK):
             for line in open(self.env_file_path):
                 key, value = line.strip().split('=', 1)
@@ -102,164 +155,170 @@ class message(object):
             value = self.env_map.get(key, default)
             setattr(self, attr, value)
 
-    def get_proxy( self ):
-        # get passphrase here, not in __init__, because it is not needed
-        # on remote task hosts if 'ssh messaging = True' (otherwise, if
-        # it is needed, we will end up in this method).
-
-        self.pphrase = passphrase( self.suite, self.owner, self.host ).get( None, None )
-
-        import cylc_pyro_client
-        return cylc_pyro_client.client( self.suite, self.pphrase,
-                self.owner, self.host, self.try_timeout,
-                self.port ).get_proxy( self.task_id )
-
-    def print_msg( self, msg ):
-        now = get_current_time_string(override_use_utc=self.utc)
-        prefix = 'cylc (' + self.mode + ' - ' + now + '): '
-        if self.priority == 'NORMAL':
-            print prefix + msg
-        else:
-            print >> sys.stderr, prefix + self.priority + ' ' + msg
-
-    def send( self, msgin=None ):
-        msg = None
-        if msgin:
-            msg = msgin
-        elif self.msg:
-            msg = self.msg
-        if not msg:
-            # nothing to send (TODO - not needed?)
-            return
-
-        # append event time to the message
-        msg += ' at ' + self.true_event_time
-
-        if self.mode != 'scheduler' or self.polling:
-            # no suite to communicate with, just print to stdout.
-            self.print_msg( msg )
-            return
-
-        if self.ssh_messaging:
-            self.load_suite_contact_file()
-
-            # The suite definition specified that this task should
-            # communicate back to the suite by means of using
-            # passwordless ssh to re-invoke the messaging command on the
-            # suite host.
-
-            # The remote_run() function expects command line options
-            # to identify the target user and host names:
-            sys.argv.append( '--user=' + self.owner )
-            sys.argv.append( '--host=' + self.host )
-            if cylc.flags.verbose:
-                sys.argv.append( '-v' )
-
-            if self.ssh_login_shell:
-                sys.argv.append('--login')
+    def _print_messages(self, messages):
+        """Print message to send."""
+        prefix = 'cylc (%s - %s): ' % (self.mode, self.true_event_time)
+        for message in messages:
+            if self.priority == self.NORMAL:
+                print prefix + message
             else:
-                sys.argv.append('--no-login')
-
-            # Some variables from the task execution environment are
-            # also required by the re-invoked remote command: Note that
-            # $CYLC_TASK_SSH_MESSAGING is not passed through so the
-            # re-invoked command on the remote side will not end up in
-            # this code block.
-            env = {}
-            for var in ['CYLC_MODE', 'CYLC_TASK_ID', 'CYLC_VERBOSE',
-                    'CYLC_SUITE_DEF_PATH_ON_SUITE_HOST',
-                    'CYLC_SUITE_RUN_DIR',
-                    'CYLC_SUITE_NAME', 'CYLC_SUITE_OWNER',
-                    'CYLC_SUITE_HOST', 'CYLC_SUITE_PORT', 'CYLC_UTC',
-                    'CYLC_TASK_MSG_MAX_TRIES', 'CYLC_TASK_MSG_TIMEOUT',
-                    'CYLC_TASK_MSG_RETRY_INTVL']:
-                # (no exception handling here as these variables should
-                # always be present in the task execution environment)
-                env[var] = self.env_map.get( var, 'UNSET' )
-
-            # The path to cylc/bin on the remote end may be required:
-            path = [ os.path.join( self.env_map['CYLC_DIR_ON_SUITE_HOST'], 'bin' ) ]
-
-            if remrun().execute( env=env, path=path ):
-                # Return here if remote re-invocation occurred,
-                # otherwise drop through to local Pyro messaging.
-                # Note: do not sys.exit(0) here as the commands do, it
-                # will cause messaging failures on the remote host.
-                return
-
-        self.print_msg( msg )
-        self.send_pyro( msg )
-
-    def send_pyro( self, msg ):
+                print >>sys.stderr, "%s%s %s" % (
+                    prefix, self.priority, message)
+
+    def _send_by_pyro(self, messages):
+        """Send message by Pyro."""
+        self._print_messages(messages)
         from Pyro.errors import NamingError
         sent = False
-        itry = 0
-        while True:
-            itry += 1
+        i_try = 0
+        while not sent and i_try < self.max_tries:
+            i_try += 1
             try:
                 # Get a proxy for the remote object and send the message.
-                self.load_suite_contact_file() # might have change between tries
-                self.get_proxy().put( self.priority, msg )
-            except NamingError, x:
-                print >> sys.stderr, x
+                self._load_suite_contact_file()  # may change between tries
+                client = self._get_client()
+                for message in messages:
+                    client.put(self.priority, message)
+            except NamingError, exc:
+                print >> sys.stderr, exc
                 print "Send message: try %s of %s failed: %s" % (
-                    itry,
+                    i_try,
                     self.max_tries,
-                    x
+                    exc
                 )
                 print "Task proxy removed from suite daemon? Aborting."
                 break
-            except Exception, x:
-                print >> sys.stderr, x
+            except Exception, exc:
+                print >> sys.stderr, exc
                 print "Send message: try %s of %s failed: %s" % (
-                    itry,
+                    i_try,
                     self.max_tries,
-                    x
+                    exc
                 )
-                if itry >= self.max_tries:
+                if i_try >= self.max_tries:
                     break
                 print "   retry in %s seconds, timeout is %s" % (
                     self.retry_seconds,
                     self.try_timeout
                 )
-                sleep( self.retry_seconds )
+                sleep(self.retry_seconds)
             else:
-                if itry > 1:
+                if i_try > 1:
                     print "Send message: try %s of %s succeeded" % (
-                        itry,
+                        i_try,
                         self.max_tries
                     )
                 sent = True
-                break
         if not sent:
             # issue a warning and let the task carry on
             print >> sys.stderr, 'WARNING: MESSAGE SEND FAILED'
 
-    def send_started( self ):
-        self.send( self.task_id + ' started' )
-
-    def send_succeeded( self ):
-        self.send( self.task_id + ' succeeded' )
-
-    def send_failed( self ):
-        self.priority = 'CRITICAL'
-        if self.msg:
-            # send reason for failure first so it does not contaminate
-            # the special task failed message.
-            self.send()
-        self.send( self.task_id + ' failed' )
-
-    def shortcut_next_restart( self ):
-        self.print_msg( 'next restart file completed' )
-        if self.mode == 'scheduler':
-            self.get_proxy().set_next_restart_completed()
-
-    def shortcut_all_restarts( self ):
-        self.print_msg( 'all restart files completed' )
-        if self.mode == 'scheduler':
-            self.get_proxy().set_all_restarts_completed()
-
-    def shortcut_all_outputs( self ):
-        self.print_msg( 'all outputs completed' )
-        if self.mode == 'scheduler':
-            self.get_proxy().set_all_internal_outputs_completed()
+    def _send_by_ssh(self):
+        """Send message via SSH."""
+        self._load_suite_contact_file()
+
+        # The suite definition specified that this task should
+        # communicate back to the suite by means of using
+        # passwordless ssh to re-invoke the messaging command on the
+        # suite host.
+
+        # The remote_run() function expects command line options
+        # to identify the target user and host names:
+        sys.argv.append('--user=' + self.owner)
+        sys.argv.append('--host=' + self.host)
+        if cylc.flags.verbose:
+            sys.argv.append('-v')
+
+        if self.ssh_login_shell:
+            sys.argv.append('--login')
+        else:
+            sys.argv.append('--no-login')
+
+        # Some variables from the task execution environment are
+        # also required by the re-invoked remote command: Note that
+        # $CYLC_TASK_SSH_MESSAGING is not passed through so the
+        # re-invoked command on the remote side will not end up in
+        # this code block.
+        env = {}
+        for var in [
+                'CYLC_MODE', 'CYLC_TASK_ID', 'CYLC_VERBOSE',
+                'CYLC_SUITE_DEF_PATH_ON_SUITE_HOST',
+                'CYLC_SUITE_RUN_DIR',
+                'CYLC_SUITE_NAME', 'CYLC_SUITE_OWNER',
+                'CYLC_SUITE_HOST', 'CYLC_SUITE_PORT', 'CYLC_UTC',
+                'CYLC_TASK_MSG_MAX_TRIES', 'CYLC_TASK_MSG_TIMEOUT',
+                'CYLC_TASK_MSG_RETRY_INTVL']:
+            # (no exception handling here as these variables should
+            # always be present in the task execution environment)
+            env[var] = self.env_map.get(var, 'UNSET')
+
+        # The path to cylc/bin on the remote end may be required:
+        path = os.path.join(self.env_map['CYLC_DIR_ON_SUITE_HOST'], 'bin')
+
+        # Return here if remote re-invocation occurred,
+        # otherwise drop through to local Pyro messaging.
+        # Note: do not sys.exit(0) here as the commands do, it
+        # will cause messaging failures on the remote host.
+        try:
+            return remrun().execute(env=env, path=[path])
+        except SystemExit:
+            return
+
+    def _update_job_status_file(self, messages):
+        """Write messages to job status file."""
+        job_log_name = os.getenv("CYLC_TASK_LOG_ROOT")
+        job_status_file = None
+        if job_log_name:
+            try:
+                job_status_file = open(job_log_name + ".status", "ab")
+            except IOError as exc:
+                if cylc.flags.debug:
+                    print >>sys.stderr, exc
+        for i, message in enumerate(messages):
+            if job_status_file:
+                if message == self.STARTED:
+                    job_status_file.write(
+                        ("%s=%s\n" % (
+                            self.CYLC_JOB_PID, os.getenv(self.CYLC_JOB_PID))) +
+                        ("%s=%s\n" % (
+                            self.CYLC_JOB_INIT_TIME, self.true_event_time)))
+                elif message == self.SUCCEEDED:
+                    job_status_file.write(
+                        ("%s=%s\n" % (
+                            self.CYLC_JOB_EXIT, self.SUCCEEDED.upper())) +
+                        ("%s=%s\n" % (
+                            self.CYLC_JOB_EXIT_TIME, self.true_event_time)))
+                elif message == self.FAILED:
+                    job_status_file.write("%s=%s\n" % (
+                        self.CYLC_JOB_EXIT_TIME, self.true_event_time))
+                elif message.startswith(self.FAIL_MESSAGE_PREFIX):
+                    job_status_file.write("%s=%s\n" % (
+                        self.CYLC_JOB_EXIT,
+                        message.replace(self.FAIL_MESSAGE_PREFIX, "")))
+                elif message.startswith(self.VACATION_MESSAGE_PREFIX):
+                    # Job vacated, remove entries related to current job
+                    job_status_file_name = job_status_file.name
+                    job_status_file.close()
+                    lines = []
+                    for line in open(job_status_file_name):
+                        if not line.startswith("CYLC_JOB_"):
+                            lines.append(line)
+                    job_status_file = open(job_status_file_name, "wb")
+                    for line in lines:
+                        job_status_file.write(line)
+                    job_status_file.write("%s=%s|%s|%s\n" % (
+                        self.CYLC_MESSAGE, self.true_event_time, self.priority,
+                        message))
+                else:
+                    job_status_file.write("%s=%s|%s|%s\n" % (
+                        self.CYLC_MESSAGE, self.true_event_time, self.priority,
+                        message))
+            if message in self.STATUSES:
+                messages[i] = "%s %s" % (self.task_id, message)
+            messages[i] += ' at ' + self.true_event_time
+        if job_status_file:
+            try:
+                job_status_file.close()
+            except IOError as exc:
+                if cylc.flags.debug:
+                    print >>sys.stderr, exc
diff --git a/lib/cylc/task_output_logs.py b/lib/cylc/task_output_logs.py
index 9bbc731..e175d7c 100644
--- a/lib/cylc/task_output_logs.py
+++ b/lib/cylc/task_output_logs.py
@@ -18,38 +18,39 @@
 
 import re
 
-class logfiles( object ):
+
+class logfiles(object):
     # we need task output logs file to be mutable (i.e. not just strings) so
     # that changes to log paths in the job submit class are reflected in
     # the task class.
-    def __init__( self, path = None ):
+    def __init__(self, path=None):
         self.paths = []
         if path:
-            self.paths.append( path )
+            self.paths.append(path)
 
-    def add_path( self, path ):
-        self.paths.append( path )
+    def add_path(self, path):
+        self.paths.append(path)
 
-    def add_path_prepend( self, path ):
-        self.paths = [ path ] + self.paths
+    def add_path_prepend(self, path):
+        self.paths = [path] + self.paths
 
     # NO LONGER NEEDED:
-    #def replace_path( self, pattern, path, prepend=True ):
-    #    # replace a path that matches a pattern with another path
-    #    # (used to replace output logs when a failed task is reset)
-    #    for item in self.paths:
-    #        if re.match( pattern, item ):
-    #            #print 'REPLACING', item, 'WITH', path
-    #            self.paths.remove( item )
-    #            break
-    #    # add the new path even if a match to replace wasn't found
-    #    if prepend:
-    #        self.add_path_prepend( path )
-    #    else:
-    #        self.add_path( path )
-
-    def get_paths( self ):
+    # def replace_path(self, pattern, path, prepend=True):
+    #     # replace a path that matches a pattern with another path
+    #     # (used to replace output logs when a failed task is reset)
+    #     for item in self.paths:
+    #         if re.match(pattern, item):
+    #             #print 'REPLACING', item, 'WITH', path
+    #             self.paths.remove(item)
+    #             break
+    #     # add the new path even if a match to replace wasn't found
+    #     if prepend:
+    #         self.add_path_prepend(path)
+    #     else:
+    #         self.add_path(path)
+
+    def get_paths(self):
         return self.paths
 
-    def empty( self ):
+    def empty(self):
         self.paths = []
diff --git a/lib/cylc/task_pool.py b/lib/cylc/task_pool.py
index 0450a5f..17d7e0c 100644
--- a/lib/cylc/task_pool.py
+++ b/lib/cylc/task_pool.py
@@ -34,35 +34,52 @@ as such, on restart, into the runahead pool.
 
 """
 
-import sys
-from cylc.task_state import task_state
-from cylc.broker import broker
-import cylc.flags
+from logging import ERROR, DEBUG, INFO, WARNING
+import os
 from Pyro.errors import NamingError
-from logging import WARNING, DEBUG, INFO
+import shlex
+import sys
+from tempfile import NamedTemporaryFile
+from time import time
+import traceback
 
-import cylc.rundb
+from cylc.batch_sys_manager import BATCH_SYS_MANAGER
+from cylc.broker import broker
+from cylc.cfgspec.globalcfg import GLOBAL_CFG
+from cylc.config import SuiteConfig
 from cylc.cycling.loader import (
     get_interval, get_interval_cls, ISO8601_CYCLING_TYPE)
 from cylc.CylcError import SchedulerError, TaskNotFoundError
-from cylc.prerequisites.plain_prerequisites import plain_prerequisites
-from cylc.broadcast import Broadcast
+import cylc.flags
+from cylc.get_task_proxy import get_task_proxy
+from cylc.mp_pool import SuiteProcPool, SuiteProcContext
+from cylc.network.ext_trigger import ExtTriggerServer
+from cylc.network.suite_broadcast import BroadcastServer
+from cylc.owner import is_remote_user
+from cylc.suite_host import is_remote_host
+from cylc.task_proxy import TaskProxy
+from cylc.task_state import task_state
 
 
 class TaskPool(object):
     """Task pool of a suite."""
 
-    def __init__(
-            self, suite, db, view_db, stop_point, config, pyro, log, run_mode):
+    JOBS_KILL = "jobs-kill"
+    JOBS_POLL = "jobs-poll"
+    JOBS_SUBMIT = "jobs-submit"
+
+    def __init__(self, suite, pri_dao, pub_dao, stop_point, pyro, log,
+                 run_mode):
+        self.suite_name = suite
         self.pyro = pyro
         self.run_mode = run_mode
         self.log = log
-        self.qconfig = config.cfg['scheduling']['queues']
         self.stop_point = stop_point
         self.reconfiguring = False
-        self.db = db
-        self.view_db = view_db
+        self.pri_dao = pri_dao
+        self.pub_dao = pub_dao
 
+        config = SuiteConfig.get_inst()
         self.custom_runahead_limit = config.get_custom_runahead_limit()
         self.max_future_offset = None
         self._prev_runahead_base_point = None
@@ -70,8 +87,7 @@ class TaskPool(object):
             config.get_max_num_active_cycle_points())
         self._prev_runahead_base_point = None
         self._prev_runahead_sequence_points = None
-
-        self.config = config
+        self.reload_warned = False
 
         self.pool = {}
         self.runahead_pool = {}
@@ -88,9 +104,6 @@ class TaskPool(object):
         self.hold_point = None
         self.held_future_tasks = []
 
-        self.wireless = Broadcast(config.get_linearized_ancestors())
-        self.pyro.connect(self.wireless, 'broadcast_receiver')
-
         self.broker = broker()
 
         self.orphans = []
@@ -98,9 +111,11 @@ class TaskPool(object):
 
     def assign_queues(self):
         """self.myq[taskname] = qfoo"""
+        config = SuiteConfig.get_inst()
+        qconfig = config.cfg['scheduling']['queues']
         self.myq = {}
-        for queue in self.qconfig:
-            for taskname in self.qconfig[queue]['members']:
+        for queue in qconfig:
+            for taskname in qconfig[queue]['members']:
                 self.myq[taskname] = queue
 
     def add_to_runahead_pool(self, itask):
@@ -130,16 +145,17 @@ class TaskPool(object):
             return False
 
         # add in held state if beyond the suite stop point
-
         if self.stop_point and itask.point > self.stop_point:
             itask.log(
                 INFO,
                 "holding (beyond suite stop point) " + str(self.stop_point))
             itask.reset_state_held()
 
+        # add in held state if beyond the suite hold point
         elif self.hold_point and itask.point > self.hold_point:
-            itask.log(INFO, "holding (beyond suite hold point) " +
-                      str(self.hold_point))
+            itask.log(
+                INFO,
+                "holding (beyond suite hold point) " + str(self.hold_point))
             itask.reset_state_held()
 
         # add in held state if a future trigger goes beyond the suite stop
@@ -150,7 +166,8 @@ class TaskPool(object):
             self.held_future_tasks.append(itask.identity)
             itask.reset_state_held()
         elif self.is_held and itask.state.is_currently("waiting"):
-            # hold newly-spawned tasks in a held suite (e.g. due to manual triggering of a held task)
+            # Hold newly-spawned tasks in a held suite (e.g. due to manual
+            # triggering of a held task).
             itask.reset_state_held()
 
         # add to the runahead pool
@@ -169,7 +186,7 @@ class TaskPool(object):
         # restart when all tasks are initially loaded into the runahead pool).
         for itask_id_maps in self.runahead_pool.values():
             for itask in itask_id_maps.values():
-                if itask.state.is_currently('failed', 'succeeded'):
+                if itask.state.is_currently('failed', 'succeeded', 'expired'):
                     self.release_runahead_task(itask)
                     self.rhpool_changed = True
 
@@ -180,7 +197,8 @@ class TaskPool(object):
                 self.get_tasks_by_point(incl_runahead=True).items()):
             has_unfinished_itasks = False
             for itask in itasks:
-                if not itask.state.is_currently('failed', 'succeeded'):
+                if not itask.state.is_currently(
+                        'failed', 'succeeded', 'expired'):
                     has_unfinished_itasks = True
                     break
             if not points and not has_unfinished_itasks:
@@ -201,7 +219,8 @@ class TaskPool(object):
             sequence_points = self._prev_runahead_sequence_points
         else:
             sequence_points = []
-            for sequence in self.config.sequences:
+            config = SuiteConfig.get_inst()
+            for sequence in config.sequences:
                 point = runahead_base_point
                 for _ in range(limit):
                     point = sequence.get_next_point(point)
@@ -231,8 +250,8 @@ class TaskPool(object):
                     self._prev_runahead_base_point != runahead_base_point):
                 if self.custom_runahead_limit < self.max_future_offset:
                     self.log.warning(
-                        'custom runahead limit of %s is less than ' +
-                        'future triggering offset %s: suite may stall.' % (
+                        ('custom runahead limit of %s is less than ' +
+                         'future triggering offset %s: suite may stall.') % (
                             self.custom_runahead_limit,
                             self.max_future_offset
                         )
@@ -345,7 +364,6 @@ class TaskPool(object):
         self.update_rhpool_list()
         return self.rhpool_list
 
-
     def get_tasks_by_point(self, incl_runahead):
         """Return a map of task proxies by cycle point."""
         point_itasks = {}
@@ -396,16 +414,17 @@ class TaskPool(object):
                 if itask.manual_trigger or itask.ready_to_run():
                     # queue the task
                     itask.set_status('queued')
-                    if itask.manual_trigger:
-                        itask.reset_manual_trigger()
+                    itask.reset_manual_trigger()
 
         # 2) submit queued tasks if manually forced or not queue-limited
-        readytogo = []
+        ready_tasks = []
+        config = SuiteConfig.get_inst()
+        qconfig = config.cfg['scheduling']['queues']
         for queue in self.queues:
             # 2.1) count active tasks and compare to queue limit
             n_active = 0
             n_release = 0
-            n_limit = self.qconfig[queue]['limit']
+            n_limit = qconfig[queue]['limit']
             tasks = self.queues[queue].values()
             if n_limit:
                 for itask in tasks:
@@ -423,23 +442,99 @@ class TaskPool(object):
                 if itask.manual_trigger or not n_limit or n_release > 0:
                     # manual release, or no limit, or not currently limited
                     n_release -= 1
-                    readytogo.append(itask)
-                    if itask.manual_trigger:
-                        itask.reset_manual_trigger()
+                    ready_tasks.append(itask)
+                    itask.reset_manual_trigger()
                 # else leaved queued
 
-        self.log.debug('%d task(s) de-queued' % len(readytogo))
+        self.log.debug('%d task(s) de-queued' % len(ready_tasks))
 
-        for itask in readytogo:
-            itask.submit(overrides=self.wireless.get(itask.identity))
+        self.submit_task_jobs(ready_tasks)
+
+    def submit_task_jobs(self, ready_tasks):
+        """Prepare and submit task jobs."""
+        if not ready_tasks:
+            return
 
-        return readytogo
+        # Prepare tasks for job submission
+        config = SuiteConfig.get_inst()
+        bcast = BroadcastServer.get_inst()
+        prepared_tasks = []
+        for itask in ready_tasks:
+            if (config.cfg['cylc']['log resolved dependencies'] and
+                    not itask.job_file_written):
+                itask.log(
+                    INFO,
+                    'triggered off %s' % itask.get_resolved_dependencies())
+            overrides = bcast.get(itask.identity)
+            if self.run_mode == 'simulation':
+                itask.job_submission_succeeded()
+            elif itask.prep_submit(overrides=overrides) is not None:
+                prepared_tasks.append(itask)
+
+        if not prepared_tasks:
+            return
+
+        # Submit task jobs
+        auth_itasks = {}
+        for itask in prepared_tasks:
+            # The job file is now (about to be) used: reset the file write flag
+            # so that subsequent manual retrigger will generate a new job file.
+            itask.job_file_written = False
+            itask.set_status('ready')
+            if (itask.task_host, itask.task_owner) not in auth_itasks:
+                auth_itasks[(itask.task_host, itask.task_owner)] = []
+            auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
+        for auth, itasks in sorted(auth_itasks.items()):
+            cmd = ["cylc", self.JOBS_SUBMIT]
+            if cylc.flags.debug:
+                cmd.append("--debug")
+            host, owner = auth
+            remote_mode = False
+            for key, value, test_func in [
+                    ('host', host, is_remote_host),
+                    ('user', owner, is_remote_user)]:
+                if test_func(value):
+                    cmd.append('--%s=%s' % (key, value))
+                    remote_mode = True
+            if remote_mode:
+                cmd.append('--remote-mode')
+            cmd.append("--")
+            cmd.append(GLOBAL_CFG.get_derived_host_item(
+                self.suite_name, 'suite job log directory', host, owner))
+            stdin_file_paths = []
+            job_log_dirs = []
+            for itask in sorted(itasks, key=lambda itask: itask.identity):
+                if remote_mode:
+                    stdin_file_paths.append(
+                        itask.job_conf['local job file path'])
+                job_log_dirs.append(itask.get_job_log_dir(
+                    itask.tdef.name, itask.point, itask.submit_num))
+            cmd += job_log_dirs
+            SuiteProcPool.get_inst().put_command(
+                SuiteProcContext(
+                    self.JOBS_SUBMIT,
+                    cmd,
+                    stdin_file_paths=stdin_file_paths,
+                    job_log_dirs=job_log_dirs,
+                ),
+                self.submit_task_jobs_callback)
+
+    def submit_task_jobs_callback(self, ctx):
+        """Callback when submit task jobs command exits."""
+        self._manip_task_jobs_callback(
+            ctx,
+            lambda itask, line: itask.job_submit_callback(line),
+            {
+                BATCH_SYS_MANAGER.OUT_PREFIX_COMMAND:
+                lambda itask, line: itask.job_cmd_out_callback(line),
+            },
+        )
 
     def task_has_future_trigger_overrun(self, itask):
         """Check for future triggers extending beyond the final cycle."""
         if not self.stop_point:
             return False
-        for pct in set(itask.prerequisites.get_target_points()):
+        for pct in set(itask.prerequisites_get_target_points()):
             if pct > self.stop_point:
                 return True
         return False
@@ -497,19 +592,18 @@ class TaskPool(object):
                 max_offset = itask.tdef.max_future_prereq_offset
         self.max_future_offset = max_offset
 
-    def reconfigure(self, config, stop_point):
+    def reconfigure(self, stop_point):
         """Set the task pool to reload mode."""
         self.reconfiguring = True
 
+        config = SuiteConfig.get_inst()
         self.custom_runahead_limit = config.get_custom_runahead_limit()
         self.max_num_active_cycle_points = (
             config.get_max_num_active_cycle_points())
-        self.config = config
         self.stop_point = stop_point
 
         # reassign live tasks from the old queues to the new.
         # self.queues[queue][id_] = task
-        self.qconfig = config.cfg['scheduling']['queues']
         self.assign_queues()
         new_queues = {}
         for queue in self.queues:
@@ -537,6 +631,9 @@ class TaskPool(object):
     def reload_taskdefs(self):
         """Reload task definitions."""
         found = False
+
+        config = SuiteConfig.get_inst()
+
         for itask in self.get_all_tasks():
             if itask.state.is_currently('ready', 'submitted', 'running'):
                 # do not reload active tasks as it would be possible to
@@ -563,7 +660,7 @@ class TaskPool(object):
                 else:
                     self.log.info(
                         'RELOADING TASK DEFINITION FOR ' + itask.identity)
-                    new_task = self.config.get_task_proxy(
+                    new_task = get_task_proxy(
                         itask.tdef.name,
                         itask.point,
                         itask.state.get_status(),
@@ -578,7 +675,7 @@ class TaskPool(object):
                         new_task.state.set_unspawned()
                     # succeeded tasks need their outputs set completed:
                     if itask.state.is_currently('succeeded'):
-                        new_task.reset_state_succeeded(manual=False)
+                        new_task.reset_state_succeeded()
 
                     # carry some task proxy state over to the new instance
                     new_task.logfiles = itask.logfiles
@@ -590,27 +687,24 @@ class TaskPool(object):
                     # if currently retrying, retain the old retry delay
                     # list, to avoid extra retries (the next instance
                     # of the task will still be as newly configured)
-                    if itask.state.is_currently('retrying'):
-                        new_task.retry_delay = itask.retry_delay
-                        new_task.retry_delays = itask.retry_delays
-                        new_task.retry_delay_timer_timeout = (
-                            itask.retry_delay_timer_timeout)
-                    elif itask.state.is_currently('submit-retrying'):
-                        new_task.sub_retry_delay = itask.sub_retry_delay
-                        new_task.sub_retry_delays = itask.sub_retry_delays
-                        new_task.sub_retry_delays_orig = (
-                            itask.sub_retry_delays_orig)
-                        new_task.sub_retry_delay_timer_timeout = (
-                            itask.sub_retry_delay_timer_timeout)
-
-                    new_task.try_number = itask.try_number
-                    new_task.sub_try_number = itask.sub_try_number
+                    new_task.run_try_state = itask.run_try_state
+                    new_task.sub_try_state = itask.sub_try_state
                     new_task.submit_num = itask.submit_num
-                    new_task.db_queue = itask.db_queue
+                    new_task.db_inserts_map = itask.db_inserts_map
+                    new_task.db_updates_map = itask.db_updates_map
 
                     self.remove(itask, '(suite definition reload)')
                     self.add_to_runahead_pool(new_task)
 
+        if found:
+            if not self.reload_warned:
+                self.log.warning(
+                    "Reload will complete once active tasks have finished.")
+                self.reload_warned = True
+        else:
+            self.log.info("Reload completed.")
+            self.reload_warned = False
+
         self.reconfiguring = found
 
     def set_stop_point(self, stop_point):
@@ -626,29 +720,165 @@ class TaskPool(object):
                 itask.reset_state_held()
 
     def no_active_tasks(self):
+        """Return True if no more active tasks."""
         for itask in self.get_tasks():
-            if itask.state.is_currently('running', 'submitted'):
+            if itask.is_active() or itask.event_handler_try_states:
                 return False
         return True
 
-    def poll_tasks(self, ids=None):
+    def has_unkillable_tasks_only(self):
+        """Used to identify if a task pool contains unkillable tasks.
+
+        Return True if all running and submitted tasks in the pool have had
+        kill operations fail, False otherwise.
+        """
         for itask in self.get_tasks():
             if itask.state.is_currently('running', 'submitted'):
-                if ids is None:
-                    itask.poll()
-                elif itask.identity in ids:
-                    itask.poll()
+                if not itask.kill_failed:
+                    return False
+        return True
 
-    def kill_active_tasks(self):
-        for itask in self.get_tasks():
-            if itask.state.is_currently('submitted', 'running'):
-                itask.kill()
+    def poll_task_jobs(self, ids=None):
+        """Poll jobs of active tasks.
+
+        If ids is specified, poll active tasks matching given IDs.
+
+        """
+        if self.run_mode == 'simulation':
+            return
+        itasks = []
+        for itask in self.get_all_tasks():
+            if ids and itask.identity not in ids:
+                continue
+            if itask.is_active():
+                if itask.job_conf is None:
+                    try:
+                        itask.prep_manip()
+                    except Exception as exc:
+                        # Note: Exception is most likely some kind of IOError
+                        # or OSError. Need to catch Exception here because it
+                        # can also be an Exception raised by
+                        # cylc.suite_host.is_remote_host
+                        itask.command_log(SuiteProcContext(
+                            itask.JOB_POLL, '(prepare job poll)', err=exc,
+                            ret_code=1))
+                        continue
+                itasks.append(itask)
+            elif ids and itask.identity in ids:  # and not is_active
+                self.log.warning(
+                    '%s: skip poll, state not ["submitted", "running"]' % (
+                        itask.identity))
+        if not itasks:
+            return
+        self._run_job_cmd(self.JOBS_POLL, itasks, self.poll_task_jobs_callback)
+
+    def poll_task_jobs_callback(self, ctx):
+        """Callback when poll tasks command exits."""
+        self._manip_task_jobs_callback(
+            ctx,
+            lambda itask, line: itask.job_poll_callback(line),
+            {
+                BATCH_SYS_MANAGER.OUT_PREFIX_MESSAGE:
+                lambda itask, line: itask.job_poll_message_callback(line),
+            },
+        )
 
-    def kill_tasks(self, ids):
+    def kill_task_jobs(self, ids=None):
+        """Kill jobs of active tasks.
+
+        If ids is specified, kill active tasks matching given IDs.
+
+        """
+        itasks = []
+        for itask in self.get_all_tasks():
+            if ids and itask.identity not in ids:
+                continue
+            is_active = itask.is_active()
+            if is_active and self.run_mode == 'simulation':
+                itask.reset_state_failed()
+            elif is_active and itask.tdef.rtconfig['manual completion']:
+                self.log(
+                    WARNING,
+                    "%s: skip kill, detaching task (job ID unknown)" % (
+                        itask.identity))
+            elif is_active:
+                if itask.job_conf is None:
+                    try:
+                        itask.prep_manip()
+                    except Exception as exc:
+                        # Note: Exception is most likely some kind of IOError
+                        # or OSError. Need to catch Exception here because it
+                        # can also be an Exception raised by
+                        # cylc.suite_host.is_remote_host
+                        itask.command_log(SuiteProcContext(
+                            itask.JOB_KILL, '(prepare job kill)', err=exc,
+                            ret_code=1))
+                        continue
+                itask.reset_state_held()
+                itasks.append(itask)
+            elif ids and itask.identity in ids:  # and not is_active
+                self.log.warning(
+                    '%s: skip kill, state not ["submitted", "running"]' % (
+                        itask.identity))
+        if not itasks:
+            return
+        self._run_job_cmd(self.JOBS_KILL, itasks, self.kill_task_jobs_callback)
+
+    def kill_task_jobs_callback(self, ctx):
+        """Callback when kill tasks command exits."""
+        self._manip_task_jobs_callback(
+            ctx,
+            lambda itask, line: itask.job_kill_callback(line),
+            {
+                BATCH_SYS_MANAGER.OUT_PREFIX_COMMAND:
+                lambda itask, line: itask.job_cmd_out_callback(line),
+            },
+        )
+
+    def _manip_task_jobs_callback(
+            self, ctx, summary_callback, more_callbacks=None):
+        """Callback when poll/kill tasks command exits."""
+        if ctx.ret_code:
+            self.log.error(ctx)
+        else:
+            self.log.debug(ctx)
+        tasks = {}
+        # Note for "kill": It is possible for a job to trigger its trap and
+        # report back to the suite back this logic is called. If so, the task
+        # will no longer be in the "submitted" or "running" state, and its
+        # output line will be ignored here.
         for itask in self.get_tasks():
-            if itask.identity in ids:
-                # (state check done in task module)
-                itask.kill()
+            if itask.point is not None and itask.submit_num:
+                submit_num = "%02d" % (itask.submit_num)
+                tasks[(str(itask.point), itask.tdef.name, submit_num)] = itask
+        handlers = [(BATCH_SYS_MANAGER.OUT_PREFIX_SUMMARY, summary_callback)]
+        if more_callbacks:
+            for prefix, callback in more_callbacks.items():
+                handlers.append((prefix, callback))
+        if not ctx.out:
+            # Something is very wrong here
+            # Fallback to use "job_log_dirs" list to report the problem
+            job_log_dirs = ctx.cmd_kwargs.get("job_log_dirs", [])
+            for job_log_dir in job_log_dirs:
+                point, name, submit_num = job_log_dir.split(os.sep, 2)
+                itask = tasks[(point, name, submit_num)]
+                callback(itask, "|".join([ctx.timestamp, job_log_dir, "1"]))
+            return
+        for line in ctx.out.splitlines(True):
+            for prefix, callback in handlers:
+                if line.startswith(prefix):
+                    line = line[len(prefix):].strip()
+                    try:
+                        path = line.split("|", 2)[1]  # timestamp, path, status
+                        point, name, submit_num = path.split(os.sep, 2)
+                        itask = tasks[(point, name, submit_num)]
+                        callback(itask, line)
+                    except (KeyError, ValueError) as exc:
+                        if cylc.flags.debug:
+                            self.log.warning(
+                                'Unhandled %s output: %s' % (
+                                    ctx.cmd_key, line))
+                            traceback.print_exc()
 
     def get_hold_point(self):
         """Return the point after which tasks must be held."""
@@ -721,74 +951,253 @@ class TaskPool(object):
         """Handle incoming task messages for each task proxy."""
         for itask in self.get_tasks():
             itask.process_incoming_messages()
- 
-    def process_queued_db_ops(self):
-        """Handle queued db operations for each task proxy."""
-        state_recorders = []
-        state_updaters = []
-        event_recorders = []
-        other = []
 
-        for itask in self.get_all_tasks():
-            # (runahead pool tasks too, to get new state recorders).
-            for oper in itask.get_db_ops():
-                if isinstance(oper, cylc.rundb.UpdateObject):
-                    state_updaters += [oper]
-                elif isinstance(oper, cylc.rundb.RecordStateObject):
-                    state_recorders += [oper]
-                elif isinstance(oper, cylc.rundb.RecordEventObject):
-                    event_recorders += [oper]
-                else:
-                    other += [oper]
-
-        # precedence is record states > update_states > record_events >
-        # anything_else
-        db_ops = state_recorders + state_updaters + event_recorders + other
-        # compact the set of operations
-        if len(db_ops) > 1:
-            db_opers = [db_ops[0]]
-            for i in range(1, len(db_ops)):
-                if db_opers[-1].s_fmt == db_ops[i].s_fmt:
-                    if isinstance(db_opers[-1], cylc.rundb.BulkDBOperObject):
-                        db_opers[-1].add_oper(db_ops[i])
+    def process_queued_task_event_handlers(self):
+        """Process task event handlers."""
+        ctx_groups = {}
+        env = None
+        for itask in self.get_tasks():
+            for key, try_state in itask.event_handler_try_states.items():
+                # This should not happen, ignore for now.
+                if try_state.ctx is None:
+                    del itask.event_handler_try_states[key]
+                    continue
+                if try_state.is_waiting:
+                    continue
+                # Set timer if timeout is None.
+                if not try_state.is_timeout_set():
+                    if try_state.next() is None:
+                        itask.log(ERROR, "%s failed" % str(key))
+                        del itask.event_handler_try_states[key]
+                        continue
+                    # Report 1st and retries
+                    if try_state.num == 1:
+                        level = INFO
+                        tmpl = "%s will run after %s (after %s)"
                     else:
-                        new_oper = cylc.rundb.BulkDBOperObject(db_opers[-1])
-                        new_oper.add_oper(db_ops[i])
-                        db_opers.pop(-1)
-                        db_opers += [new_oper]
+                        level = WARNING
+                        tmpl = "%s failed, retrying in %s (after %s)"
+                    itask.log(level, tmpl % (
+                        str(key),
+                        try_state.delay_as_seconds(),
+                        try_state.timeout_as_str()))
+                # Ready to run?
+                if not try_state.is_delay_done():
+                    continue
+                try_state.set_waiting()
+
+                if try_state.ctx.ctx_type == TaskProxy.CUSTOM_EVENT_HANDLER:
+                    # Run custom event handlers on their own
+                    if env is None:
+                        env = dict(os.environ)
+                        if TaskProxy.event_handler_env:
+                            env.update(TaskProxy.event_handler_env)
+                    SuiteProcPool.get_inst().put_command(
+                        SuiteProcContext(
+                            key, try_state.ctx.cmd, env=env, shell=True,
+                        ),
+                        itask.custom_event_handler_callback)
+                else:
+                    # Group together built-in event handlers, where possible
+                    if try_state.ctx not in ctx_groups:
+                        ctx_groups[try_state.ctx] = []
+                    # "itask.submit_num" may have moved on at this point
+                    key1, submit_num = key
+                    ctx_groups[try_state.ctx].append(
+                        (key1, str(itask.point), itask.tdef.name, submit_num))
+
+        for ctx, id_keys in ctx_groups.items():
+            if ctx.ctx_type == TaskProxy.EVENT_MAIL:
+                self._process_task_event_email(ctx, id_keys)
+            elif ctx.ctx_type == TaskProxy.JOB_LOGS_REGISTER:
+                self._process_task_job_logs_register(ctx, id_keys)
+            elif ctx.ctx_type == TaskProxy.JOB_LOGS_RETRIEVE:
+                self._process_task_job_logs_retrieval(ctx, id_keys)
+
+    def _process_task_event_email(self, ctx, id_keys):
+        """Process event notification, by email."""
+        subject = "[%(n_tasks)d task(s) %(event)s] %(suite_name)s" % {
+            "suite_name": self.suite_name,
+            "n_tasks": len(id_keys),
+            "event": ctx.event}
+        cmd = ["mail", "-s", subject]
+        # From: and To:
+        cmd.append("-r")
+        cmd.append(ctx.mail_from)
+        cmd.append(ctx.mail_to)
+        # Tasks
+        stdin_str = ""
+        for _, point, name, submit_num in id_keys:
+            stdin_str += "%s/%s/%02d: %s\n" % (
+                point, name, submit_num, ctx.event)
+        # SMTP server
+        env = dict(os.environ)
+        mail_smtp = ctx.mail_smtp
+        if mail_smtp:
+            env["smtp"] = mail_smtp
+        SuiteProcPool.get_inst().put_command(
+            SuiteProcContext(
+                ctx, cmd, env=env, stdin_str=stdin_str, id_keys=id_keys,
+            ),
+            self._task_event_email_callback)
+
+    def _task_event_email_callback(self, ctx):
+        """Call back when email notification command exits."""
+        tasks = {}
+        for itask in self.get_tasks():
+            if itask.point is not None and itask.submit_num:
+                tasks[(str(itask.point), itask.tdef.name)] = itask
+        for id_key in ctx.cmd_kwargs["id_keys"]:
+            key1, point, name, submit_num = id_key
+            try:
+                itask = tasks[(point, name)]
+                try_states = itask.event_handler_try_states
+                if ctx.ret_code == 0:
+                    del try_states[(key1, submit_num)]
+                    log_ctx = SuiteProcContext((key1, submit_num), None)
+                    log_ctx.ret_code = 0
+                    itask.command_log(log_ctx)
+                else:
+                    try_states[(key1, submit_num)].unset_waiting()
+            except KeyError:
+                if cylc.flags.debug:
+                    traceback.print_exc()
+
+    def _process_task_job_logs_register(self, ctx, id_keys):
+        """Register task job logs."""
+        tasks = {}
+        for itask in self.get_tasks():
+            if itask.point is not None and itask.submit_num:
+                tasks[(str(itask.point), itask.tdef.name)] = itask
+        for id_key in id_keys:
+            key1, point, name, submit_num = id_key
+            try:
+                itask = tasks[(point, name)]
+                try_states = itask.event_handler_try_states
+                filenames = itask.register_job_logs(submit_num)
+                if "job.out" in filenames and "job.err" in filenames:
+                    log_ctx = SuiteProcContext((key1, submit_num), None)
+                    log_ctx.ret_code = 0
+                    itask.command_log(log_ctx)
+                    del try_states[(key1, submit_num)]
                 else:
-                    db_opers += [db_ops[i]]
+                    try_states[(key1, submit_num)].unset_waiting()
+            except KeyError:
+                if cylc.flags.debug:
+                    traceback.print_exc()
+
+    def _process_task_job_logs_retrieval(self, ctx, id_keys):
+        """Process retrieval of task job logs from remote user at host."""
+        if ctx.user_at_host and "@" in ctx.user_at_host:
+            s_user, s_host = ctx.user_at_host.split("@", 1)
         else:
-            db_opers = db_ops
+            s_user, s_host = (None, ctx.user_at_host)
+        ssh_tmpl = str(GLOBAL_CFG.get_host_item(
+            "remote shell template", s_host, s_user)).replace(" %s", "")
+        rsync_str = str(GLOBAL_CFG.get_host_item(
+            "retrieve job logs command", s_host, s_user))
+
+        cmd = shlex.split(rsync_str) + ["--rsh=" + ssh_tmpl]
+        if cylc.flags.debug:
+            cmd.append("-v")
+        if ctx.max_size:
+            cmd.append("--max-size=%s" % (ctx.max_size,))
+        # Includes and excludes
+        includes = set()
+        for _, point, name, submit_num in id_keys:
+            # Include relevant directories, all levels needed
+            includes.add("/%s" % (point))
+            includes.add("/%s/%s" % (point, name))
+            includes.add("/%s/%s/%02d" % (point, name, submit_num))
+            includes.add("/%s/%s/%02d/**" % (point, name, submit_num))
+        cmd += ["--include=%s" % (include) for include in sorted(includes)]
+        cmd.append("--exclude=/**")  # exclude everything else
+        # Remote source
+        cmd.append(ctx.user_at_host + ":" + GLOBAL_CFG.get_derived_host_item(
+            self.suite_name, "suite job log directory", s_host, s_user) + "/")
+        # Local target
+        cmd.append(GLOBAL_CFG.get_derived_host_item(
+            self.suite_name, "suite job log directory") + "/")
+        SuiteProcPool.get_inst().put_command(
+            SuiteProcContext(ctx, cmd, env=dict(os.environ), id_keys=id_keys),
+            self._task_job_logs_retrieval_callback)
+
+    def _task_job_logs_retrieval_callback(self, ctx):
+        """Call back when log job retrieval completes."""
+        tasks = {}
+        for itask in self.get_tasks():
+            if itask.point is not None and itask.submit_num:
+                tasks[(str(itask.point), itask.tdef.name)] = itask
+        for id_key in ctx.cmd_kwargs["id_keys"]:
+            key1, point, name, submit_num = id_key
+            try:
+                itask = tasks[(point, name)]
+                try_states = itask.event_handler_try_states
+                filenames = []
+                if ctx.ret_code == 0:
+                    filenames = itask.register_job_logs(submit_num)
+                if "job.out" in filenames and "job.err" in filenames:
+                    log_ctx = SuiteProcContext((key1, submit_num), None)
+                    log_ctx.ret_code = 0
+                    itask.command_log(log_ctx)
+                    del try_states[(key1, submit_num)]
+                else:
+                    try_states[(key1, submit_num)].unset_waiting()
+            except KeyError:
+                if cylc.flags.debug:
+                    traceback.print_exc()
+
+    def process_queued_db_ops(self):
+        """Handle queued db operations for each task proxy."""
+        for itask in self.get_all_tasks():
+            # (runahead pool tasks too, to get new state recorders).
+            for table_name, db_inserts in sorted(itask.db_inserts_map.items()):
+                while db_inserts:
+                    db_insert = db_inserts.pop(0)
+                    db_insert.update({
+                        "name": itask.tdef.name,
+                        "cycle": str(itask.point),
+                    })
+                    if "submit_num" not in db_insert:
+                        db_insert["submit_num"] = itask.submit_num
+                    self.pri_dao.add_insert_item(table_name, db_insert)
+                    self.pub_dao.add_insert_item(table_name, db_insert)
+
+            for table_name, db_updates in sorted(itask.db_updates_map.items()):
+                while db_updates:
+                    set_args = db_updates.pop(0)
+                    where_args = {
+                        "cycle": str(itask.point), "name": itask.tdef.name}
+                    if "submit_num" not in set_args:
+                        where_args["submit_num"] = itask.submit_num
+                    self.pri_dao.add_update_item(
+                        table_name, set_args, where_args)
+                    self.pub_dao.add_update_item(
+                        table_name, set_args, where_args)
 
         # record any broadcast settings to be dumped out
-        if self.wireless:
-            for db_oper in self.wireless.get_db_ops():
-                db_opers += [db_oper]
-
-        for db_oper in db_opers:
-            if self.db.c.is_alive():
-                self.db.run_db_op(db_oper)
-            elif self.db.c.exception:
-                self.view_db.close()
-                raise self.db.c.exception
-            else:
-                raise SchedulerError(
-                    'An unexpected error occurred while writing to the ' +
-                    'suite database')
-
-        # we should filter down to only recording the utility relevent
-        # entries in the viewable database following database refactoring
-        for db_oper in db_opers:
-            if self.view_db.c.is_alive():
-                self.view_db.run_db_op(db_oper)
-            elif self.view_db.c.exception:
-                self.db.close()
-                raise self.view_db.c.exception
-            else:
-                raise SchedulerError(
-                    'An unexpected error occurred while writing to the ' +
-                    'viewable database')
+        bcast = BroadcastServer.get_inst()
+        for table_name, db_inserts in sorted(bcast.db_inserts_map.items()):
+            while db_inserts:
+                db_insert = db_inserts.pop(0)
+                self.pri_dao.add_insert_item(table_name, db_insert)
+                self.pub_dao.add_insert_item(table_name, db_insert)
+        for table_name, db_deletes in sorted(bcast.db_deletes_map.items()):
+            while db_deletes:
+                where_args = db_deletes.pop(0)
+                self.pri_dao.add_delete_item(table_name, where_args)
+                self.pub_dao.add_delete_item(table_name, where_args)
+
+        # Previously, we used a separate thread for database writes. This has
+        # now been removed. For the private database, there is no real
+        # advantage in using a separate thread, because we want it to be like
+        # the state dump - always in sync with what is current. For the public
+        # database, which does not need to be fully in sync, there is some
+        # advantage of using a separate thread/process, if writing to it
+        # becomes a bottleneck. At the moment, there is no evidence that this
+        # is a bottleneck, so it is better to keep the logic simple.
+        self.pri_dao.execute_queued_items()
+        self.pub_dao.execute_queued_items()
 
     def force_spawn(self, itask):
         """Spawn successor of itask."""
@@ -811,8 +1220,8 @@ class TaskPool(object):
     def remove_suiciding_tasks(self):
         """Remove any tasks that have suicide-triggered."""
         for itask in self.get_tasks():
-            if itask.suicide_prerequisites.count() != 0:
-                if itask.suicide_prerequisites.all_satisfied():
+            if itask.suicide_prerequisites:
+                if itask.suicide_prerequisites_are_all_satisfied():
                     if itask.state.is_currently(
                             'ready', 'submitted', 'running'):
                         itask.log(WARNING, 'suiciding while active')
@@ -844,28 +1253,26 @@ class TaskPool(object):
         prerequisites.  Each task proxy knows its "cleanup cutoff" from the
         graph. For example:
           graph = 'foo[T-6]=>bar \n foo[T-12]=>baz'
-        implies foo's cutoff is T+12: if foo has succeeded and spawned,
-        it can be removed if no unsatisfied task proxy exists with
+        implies foo's cutoff is T+12: if foo has succeeded (or expired) and
+        spawned, it can be removed if no unsatisfied task proxy exists with
         T<=T+12. Note this only uses information about the cycle point of
         downstream dependents - if we used specific IDs instead spent
         tasks could be identified and removed even earlier).
 
         """
-
         # first find the cycle point of the earliest unsatisfied task
         cutoff = self._get_earliest_unsatisfied_point()
-
         if not cutoff:
             return
 
         # now check each succeeded task against the cutoff
         spent = []
         for itask in self.get_tasks():
-            if not itask.state.is_currently('succeeded') or \
-                    not itask.state.has_spawned() or \
-                    itask.cleanup_cutoff is None:
-                continue
-            if cutoff > itask.cleanup_cutoff:
+            if (itask.state.is_currently('succeeded', 'expired') and
+                    itask.state.has_spawned() and
+                    not itask.event_handler_try_states and
+                    itask.cleanup_cutoff is not None and
+                    cutoff > itask.cleanup_cutoff):
                 spent.append(itask)
         for itask in spent:
             self.remove(itask)
@@ -925,15 +1332,45 @@ class TaskPool(object):
                 if not itask.state.is_currently('queued'):
                     itask.reset_state_ready()
 
-    def dry_run_task(self, id):
+    def dry_run_task(self, id_):
+        """Create job file for "cylc trigger --edit"."""
+        bcast = BroadcastServer.get_inst()
         for itask in self.get_tasks():
-            if itask.identity == id:
-                itask.submit(overrides=self.wireless.get(itask.identity),
-                             dry_run=True)
+            if itask.identity == id_:
+                itask.prep_submit(
+                    overrides=bcast.get(itask.identity), dry_run=True)
 
     def check_task_timers(self):
+        """Check submission and execution timeout timers for current tasks.
+
+        Not called in simulation mode.
+
+        """
+        now = time()
+        poll_task_ids = set()
         for itask in self.get_tasks():
-            itask.check_timers()
+            if itask.state.is_currently('submitted'):
+                if (itask.submission_timer_timeout is not None and
+                        now > itask.submission_timer_timeout):
+                    itask.handle_submission_timeout()
+                    itask.submission_timer_timeout = None
+                    poll_task_ids.add(itask.identity)
+                if (itask.submission_poll_timer and
+                        itask.submission_poll_timer.get()):
+                    itask.submission_poll_timer.set_timer()
+                    poll_task_ids.add(itask.identity)
+            elif itask.state.is_currently('running'):
+                if (itask.execution_timer_timeout is not None and
+                        now > itask.execution_timer_timeout):
+                    itask.handle_execution_timeout()
+                    itask.execution_timer_timeout = None
+                    poll_task_ids.add(itask.identity)
+                if (itask.execution_poll_timer and
+                        itask.execution_poll_timer.get()):
+                    itask.execution_poll_timer.set_timer()
+                    poll_task_ids.add(itask.identity)
+        if poll_task_ids:
+            self.poll_task_jobs(poll_task_ids)
 
     def check_auto_shutdown(self):
         """Check if we should do a normal automatic shutdown."""
@@ -941,11 +1378,12 @@ class TaskPool(object):
         for itask in self.get_all_tasks():
             if self.stop_point is None:
                 # Don't if any unsucceeded task exists.
-                if not itask.state.is_currently('succeeded'):
+                if (not itask.state.is_currently('succeeded', 'expired') or
+                        itask.event_handler_try_states):
                     shutdown = False
                     break
             elif (itask.point <= self.stop_point and
-                    not itask.state.is_currently('succeeded')):
+                    not itask.state.is_currently('succeeded', 'expired')):
                 # Don't if any unsucceeded task exists < stop point...
                 if itask.identity not in self.held_future_tasks:
                     # ...unless it has a future trigger extending > stop point.
@@ -966,10 +1404,12 @@ class TaskPool(object):
     def shutdown(self):
         if not self.no_active_tasks():
             self.log.warning("some active tasks will be orphaned")
-        self.pyro.disconnect(self.wireless)
         for itask in self.get_tasks():
-            if itask.message_queue:
+            try:
                 self.pyro.disconnect(itask.message_queue)
+            except KeyError:
+                # Wasn't connected yet.
+                pass
 
     def waiting_tasks_ready(self):
         """Waiting tasks can become ready for internal reasons.
@@ -984,16 +1424,6 @@ class TaskPool(object):
                 break
         return result
 
-    def add_prereq_to_task(self, id_, msg):
-        for itask in self.get_tasks():
-            if itask.identity == id_:
-                prereq = plain_prerequisites(id_)
-                prereq.add(msg)
-                itask.prerequisites.add_requisites(prereq)
-                break
-        else:
-            raise TaskNotFoundError("Task not present in suite: " + id_)
-
     def task_succeeded(self, id_):
         res = False
         for itask in self.get_tasks():
@@ -1023,17 +1453,22 @@ class TaskPool(object):
                     return False, "task not running"
 
     def get_task_jobfile_path(self, id_):
+        """Return a task job log dir, sans submit number.
+
+        TODO - this method name (and same in scheduler.py) should be changed.
+
+        """
         found = False
-        running = False
         for itask in self.get_tasks():
             if itask.identity == id_:
                 found = True
-                jobfile_path = itask.job_conf['local job file path']
+                job_parent_dir = os.path.dirname(itask.get_job_log_dir(
+                    itask.tdef.name, itask.point, suite=self.suite_name))
                 break
         if not found:
             return False, "task not found"
         else:
-            return True, jobfile_path
+            return True, job_parent_dir
 
     def get_task_requisites(self, taskid):
         info = {}
@@ -1043,14 +1478,19 @@ class TaskPool(object):
             if id_ == taskid:
                 found = True
                 extra_info = {}
-                # extra info for clocktriggered tasks
                 if itask.tdef.clocktrigger_offset is not None:
                     extra_info['Clock trigger time reached'] = (
                         itask.start_time_reached())
                     extra_info['Triggers at'] = itask.delayed_start_str
+                for trig, satisfied in itask.external_triggers.items():
+                    if satisfied:
+                        state = 'satisfied'
+                    else:
+                        state = 'NOT satisfied'
+                    extra_info['External trigger "%s"' % trig] = state
 
                 info[id_] = [
-                    itask.prerequisites.dump(),
+                    itask.prerequisites_dump(),
                     itask.outputs.dump(),
                     extra_info,
                 ]
@@ -1058,107 +1498,45 @@ class TaskPool(object):
             self.log.warning('task state info request: task(s) not found')
         return info
 
-    def purge_tree(self, id_, stop):
-        """Remove an entire dependency tree.
-
-        Remove an entire dependency tree rooted on the target task,
-        through to the given stop time (inclusive). In general this
-        involves tasks that do not even exist yet within the pool.
-
-        Method: trigger the target task *virtually* (i.e. without
-        running the real task) by: setting it to the succeeded state,
-        setting all of its outputs completed, and forcing it to spawn.
-        (this is equivalent to instantaneous successful completion as
-        far as cylc is concerned). Then enter the normal dependency
-        negotation process to trace the downstream effects of this,
-        also triggering subsequent tasks virtually. Each time a task
-        triggers mark it as a dependency of the target task for later
-        deletion (but not immmediate deletion because other downstream
-        tasks may still trigger off its outputs).  Downstream tasks
-        (freshly spawned or not) are not triggered if they have passed
-        the stop time, and the process is stopped is soon as a
-        dependency negotation round results in no new tasks
-        triggering.
-
-        Finally, reset the prerequisites of all tasks spawned during
-        the purge to unsatisfied, since they may have been satisfied
-        by the purged tasks in the "virtual" dependency negotiations.
-
-        TODO - THINK ABOUT WHETHER THIS CAN APPLY TO TASKS THAT
-        ALREADY EXISTED PRE-PURGE, NOT ONLY THE JUST-SPAWNED ONES. If
-        so we should explicitly record the tasks that get satisfied
-        during the purge.
-
-        Purge is an infrequently used power tool, so print
-        comprehensive information on what it does to stdout.
-
-        """
-
-        print
-        print "PURGE ALGORITHM RESULTS:"
+    def match_ext_triggers(self):
+        """See if any queued external event messages can trigger tasks."""
+        ets = ExtTriggerServer.get_inst()
+        for itask in self.get_tasks():
+            if itask.external_triggers:
+                ets.retrieve(itask)
 
-        die = []
-        spawn = []
+    def _run_job_cmd(self, cmd_key, itasks, callback, **kwargs):
+        """Run job commands, e.g. poll, kill, etc.
 
-        print 'ROOT TASK:'
-        for itask in self.get_all_tasks():
-            # Find the target task
-            if itask.identity == id_:
-                # set it succeeded
-                print '  Setting', itask.identity, 'succeeded'
-                itask.reset_state_succeeded(manual=False)
-                # force it to spawn
-                print '  Spawning', itask.identity
-                spawned = self.force_spawn(itask)
-                if spawned:
-                    spawn.append(spawned)
-                # mark it for later removal
-                print '  Marking', itask.identity, 'for deletion'
-                die.append(itask)
-                break
+        Group itasks with their user at host.
+        Put a job command for each user at host to the multiprocess pool.
 
-        print 'VIRTUAL TRIGGERING STOPPING AT', stop
-        # trace out the tree of dependent tasks
-        something_triggered = True
-        while something_triggered:
-            self.match_dependencies()
-            something_triggered = False
-            for itask in sorted(
-                    self.get_all_tasks(), key=lambda t: t.identity):
-                if itask.point > stop:
-                    continue
-                if itask.ready_to_run():
-                    something_triggered = True
-                    print '  Triggering', itask.identity
-                    itask.reset_state_succeeded(manual=False)
-                    print '  Spawning', itask.identity
-                    spawned = self.force_spawn(itask)
-                    if spawned:
-                        spawn.append(spawned)
-                    print '  Marking', itask.identity, 'for deletion'
-                    # remove these later (their outputs may still be needed)
-                    die.append(itask)
-                elif itask.suicide_prerequisites.count() > 0:
-                    if itask.suicide_prerequisites.all_satisfied():
-                        print (
-                            '  Spawning virtually activated suicide task ' +
-                            itask.identity)
-                        self.force_spawn(itask)
-                        # remove these now (not setting succeeded; outputs not
-                        # needed)
-                        print '  Suiciding', itask.identity, 'now'
-                        self.remove(itask, 'purge')
-            self.release_runahead_tasks()
-        # reset any prerequisites "virtually" satisfied during the purge
-        print 'RESETTING spawned tasks to unsatisified:'
-        for itask in spawn:
-            print '  ', itask.identity
-            itask.prerequisites.set_all_unsatisfied()
-
-        # finally, purge all tasks marked as depending on the target
-        print 'REMOVING PURGED TASKS:'
-        for itask in die:
-            print '  ', itask.identity
-            self.remove(itask, 'purge')
-
-        print 'PURGE DONE'
+        """
+        if not itasks:
+            return
+        auth_itasks = {}
+        for itask in itasks:
+            if (itask.task_host, itask.task_owner) not in auth_itasks:
+                auth_itasks[(itask.task_host, itask.task_owner)] = []
+            auth_itasks[(itask.task_host, itask.task_owner)].append(itask)
+        for auth, itasks in sorted(auth_itasks.items()):
+            cmd = ["cylc", cmd_key]
+            if cylc.flags.debug:
+                cmd.append("--debug")
+            host, owner = auth
+            for key, value, test_func in [
+                    ('host', host, is_remote_host),
+                    ('user', owner, is_remote_user)]:
+                if test_func(value):
+                    cmd.append('--%s=%s' % (key, value))
+            cmd.append("--")
+            cmd.append(GLOBAL_CFG.get_derived_host_item(
+                self.suite_name, 'suite job log directory', host, owner))
+            job_log_dirs = []
+            for itask in sorted(itasks, key=lambda itask: itask.identity):
+                job_log_dirs.append(itask.get_job_log_dir(
+                    itask.tdef.name, itask.point, itask.submit_num))
+            cmd += job_log_dirs
+            kwargs["job_log_dirs"] = job_log_dirs
+            SuiteProcPool.get_inst().put_command(
+                SuiteProcContext(cmd_key, cmd, **kwargs), callback)
diff --git a/lib/cylc/task_proxy.py b/lib/cylc/task_proxy.py
index 47d71b8..2f5faac 100644
--- a/lib/cylc/task_proxy.py
+++ b/lib/cylc/task_proxy.py
@@ -15,29 +15,30 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""Task Proxy."""
+"""Provide a class to represent a task proxy in a running suite."""
 
-import Queue
+from collections import namedtuple
+from copy import copy
+from logging import getLogger, CRITICAL, ERROR, WARNING, INFO, DEBUG
 import os
+from pipes import quote
+import Queue
+from random import randrange
 import re
 import socket
-import time
-from copy import copy
-from random import randrange
-from collections import deque
-from logging import getLogger, CRITICAL, ERROR, WARNING, INFO, DEBUG
 import shlex
+from shutil import rmtree
+import time
 import traceback
+
 from isodatetime.timezone import get_local_time_zone
 
+from cylc.mkdir_p import mkdir_p
 from cylc.task_state import task_state
 from cylc.cfgspec.globalcfg import GLOBAL_CFG
 import cylc.cycling.iso8601
 from cylc.cycling.loader import get_interval_cls, get_point_relative
 from cylc.envvar import expandvars
-from cylc.owner import user
-from cylc.job_logs import CommandLogger
-import cylc.rundb
 import cylc.flags as flags
 from cylc.wallclock import (
     get_current_time_string,
@@ -45,28 +46,101 @@ from cylc.wallclock import (
     get_seconds_as_interval_string,
     RE_DATE_TIME_FORMAT_EXTENDED
 )
-from cylc.task_receiver import msgqueue
+from cylc.network.task_msgqueue import TaskMessageServer
 from cylc.host_select import get_task_host
 from cylc.job_file import JOB_FILE
 from cylc.job_host import RemoteJobHostManager
 from cylc.batch_sys_manager import BATCH_SYS_MANAGER
 from cylc.outputs import outputs
-from cylc.owner import is_remote_user
+from cylc.owner import is_remote_user, user
 from cylc.poll_timer import PollTimer
-from cylc.prerequisites.prerequisites import prerequisites
-from cylc.prerequisites.plain_prerequisites import plain_prerequisites
-from cylc.prerequisites.conditionals import conditional_prerequisites
-from cylc.suite_host import is_remote_host
+from cylc.prerequisite import Prerequisite
+from cylc.suite_host import is_remote_host, get_suite_host
 from parsec.util import pdeepcopy, poverride
-from cylc.mp_pool import (
-    SuiteProcPool,
-    CMD_TYPE_EVENT_HANDLER,
-    CMD_TYPE_JOB_POLL_KILL,
-    CMD_TYPE_JOB_SUBMISSION,
-    JOB_SKIPPED_FLAG
-)
+from parsec.OrderedDict import OrderedDictWithDefaults
+from cylc.mp_pool import SuiteProcPool, SuiteProcContext
+from cylc.rundb import CylcSuiteDAO
 from cylc.task_id import TaskID
+from cylc.task_message import TaskMessage
 from cylc.task_output_logs import logfiles
+from parsec.util import pdeepcopy, poverride
+from parsec.config import ItemNotFoundError
+
+
+CustomTaskEventHandlerContext = namedtuple(
+    "CustomTaskEventHandlerContext",
+    ["key", "ctx_type", "cmd"])
+
+
+TaskEventMailContext = namedtuple(
+    "TaskEventMailContext",
+    ["key", "ctx_type", "event", "mail_from", "mail_to", "mail_smtp"])
+
+
+TaskJobLogsRegisterContext = namedtuple(
+    "TaskJobLogsRegisterContext",
+    ["key", "ctx_type"])
+
+
+TaskJobLogsRetrieveContext = namedtuple(
+    "TaskJobLogsRetrieveContext",
+    ["key", "ctx_type", "user_at_host", "max_size"])
+
+
+class TryState(object):
+    """Represent the current state of a (re)try."""
+
+    def __init__(self, ctx=None, delays=None):
+        self.ctx = ctx
+        if delays:
+            self.delays = list(delays)
+        else:
+            self.delays = [0]
+        self.num = 0
+        self.delay = None
+        self.timeout = None
+        self.is_waiting = False
+
+    def delay_as_seconds(self):
+        """Return the delay as PTnS, where n is number of seconds."""
+        return get_seconds_as_interval_string(self.delay)
+
+    def is_delay_done(self, now=None):
+        """Is timeout done?"""
+        if self.timeout is None:
+            return False
+        if now is None:
+            now = time.time()
+        return now > self.timeout
+
+    def is_timeout_set(self):
+        """Return True if timeout is set."""
+        return self.timeout is not None
+
+    def next(self):
+        """Return the next retry delay if there is one, or None otherwise."""
+        try:
+            self.delay = self.delays[self.num]
+        except IndexError:
+            return None
+        else:
+            self.timeout = time.time() + self.delay
+            self.num += 1
+            return self.delay
+
+    def set_waiting(self):
+        """Set waiting flag, while waiting for action to complete."""
+        self.delay = None
+        self.is_waiting = True
+        self.timeout = None
+
+    def unset_waiting(self):
+        """Unset waiting flag after an action has completed."""
+        self.is_waiting = False
+
+    def timeout_as_str(self):
+        """Return the timeout as an ISO8601 date-time string."""
+        return get_time_string_from_unix_time(self.timeout)
 
 
 class TaskProxySequenceBoundsError(ValueError):
@@ -91,18 +165,64 @@ class TaskProxy(object):
     # if execution retries are configured; and is passed to task
     # environments to allow changed behaviour after previous failures.
 
-    POLL_SUFFIX_RE = re.compile(
+    # Format string for single line output
+    JOB_LOG_FMT_1 = "%(timestamp)s [%(cmd_key)s %(attr)s] %(mesg)s"
+    # Format string for multi-line output
+    JOB_LOG_FMT_M = "%(timestamp)s [%(cmd_key)s %(attr)s]\n\n%(mesg)s\n"
+
+    CUSTOM_EVENT_HANDLER = "event-handler"
+    EVENT_MAIL = "event-mail"
+    JOB_KILL = "job-kill"
+    JOB_LOGS_REGISTER = "job-logs-register"
+    JOB_LOGS_RETRIEVE = "job-logs-retrieve"
+    JOB_POLL = "job-poll"
+    JOB_SUBMIT = SuiteProcPool.JOB_SUBMIT
+    MANAGE_JOB_LOGS_TRY_DELAYS = (0, 30, 180)  # PT0S, PT30S, PT3M
+    MESSAGE_SUFFIX_RE = re.compile(
         ' at (' + RE_DATE_TIME_FORMAT_EXTENDED + '|unknown-time)$')
 
+    LOGGING_LVL_OF = {
+        "INFO": INFO,
+        "NORMAL": INFO,
+        "WARNING": WARNING,
+        "ERROR": ERROR,
+        "CRITICAL": CRITICAL,
+        "DEBUG": DEBUG,
+    }
+
+    TABLE_TASK_JOBS = CylcSuiteDAO.TABLE_TASK_JOBS
+    TABLE_TASK_JOB_LOGS = CylcSuiteDAO.TABLE_TASK_JOB_LOGS
+    TABLE_TASK_EVENTS = CylcSuiteDAO.TABLE_TASK_EVENTS
+    TABLE_TASK_STATES = CylcSuiteDAO.TABLE_TASK_STATES
+
     event_handler_env = {}
     stop_sim_mode_job_submission = False
 
+    @classmethod
+    def get_job_log_dir(
+            cls, task_name, task_point, submit_num="NN", suite=None):
+        """Return the latest job log path on the suite host."""
+        try:
+            submit_num = "%02d" % submit_num
+        except TypeError:
+            pass
+        if suite:
+            return os.path.join(
+                GLOBAL_CFG.get_derived_host_item(
+                    suite, "suite job log directory"),
+                str(task_point), task_name, submit_num)
+        else:
+            return os.path.join(str(task_point), task_name, submit_num)
+
     def __init__(
             self, tdef, start_point, initial_state, stop_point=None,
             is_startup=False, validate_mode=False, submit_num=0,
             is_reload=False):
         self.tdef = tdef
-        self.submit_num = submit_num
+        if submit_num is None:
+            self.submit_num = 0
+        else:
+            self.submit_num = submit_num
         self.validate_mode = validate_mode
 
         if is_startup:
@@ -126,9 +246,8 @@ class TaskProxy(object):
                 self.point, self.tdef.intercycle_offsets)
             self.identity = TaskID.get(self.tdef.name, self.point)
 
-        # prerequisites
-        self.prerequisites = prerequisites(self.tdef.start_point)
-        self.suicide_prerequisites = prerequisites(self.tdef.start_point)
+        self.prerequisites = []
+        self.suicide_prerequisites = []
         self._add_prerequisites(self.point)
         self.point_as_seconds = None
 
@@ -144,6 +263,11 @@ class TaskProxy(object):
                 self.outputs.add(msg)
         self.outputs.register()
 
+        self.external_triggers = {}
+        for ext in self.tdef.external_triggers:
+            # set unsatisfied
+            self.external_triggers[ext] = False
+
         # Manually inserted tasks may have a final cycle point set.
         self.stop_point = stop_point
 
@@ -152,6 +276,7 @@ class TaskProxy(object):
         self.state_before_held = None  # state before being held
         self.hold_on_retry = False
         self.manual_trigger = False
+        self.is_manual_submit = False
 
         self.submission_timer_timeout = None
         self.execution_timer_timeout = None
@@ -174,28 +299,30 @@ class TaskProxy(object):
             'label': str(self.point),
             'logfiles': self.logfiles.get_paths()
         }
-        self.retries_configured = False
-
-        self.try_number = 1
-        self.retry_delay = None
-        self.retry_delay_timer_timeout = None
-        self.retry_delays = None
         self.job_file_written = False
 
-        self.sub_try_number = 1
-        self.sub_retry = None
-        self.sub_retry_delay = None
-        self.sub_retry_delay_timer_timeout = None
-        self.sub_retry_delays_orig = None
-        self.sub_retry_delays = None
+        self.retries_configured = False
 
-        self.message_queue = msgqueue()
-        self.db_queue = []
+        self.run_try_state = TryState()
+        self.sub_try_state = TryState()
+        self.event_handler_try_states = {}
+
+        self.message_queue = TaskMessageServer()
+        self.db_inserts_map = {
+            self.TABLE_TASK_JOBS: [],
+            self.TABLE_TASK_JOB_LOGS: [],
+            self.TABLE_TASK_STATES: [],
+            self.TABLE_TASK_EVENTS: [],
+        }
+        self.db_updates_map = {
+            self.TABLE_TASK_JOBS: [],
+            self.TABLE_TASK_STATES: [],
+        }
 
         # TODO - should take suite name from config!
         self.suite_name = os.environ['CYLC_SUITE_NAME']
 
-        # In case task owner and host are needed by record_db_event()
+        # In case task owner and host are needed by _db_events_insert()
         # for pre-submission events, set their initial values as if
         # local (we can't know the correct host prior to this because
         # dynamic host selection could be used).
@@ -211,17 +338,20 @@ class TaskProxy(object):
         self.execution_poll_timer = None
 
         self.logger = getLogger("main")
-        self.command_logger = CommandLogger(
-            self.suite_name, self.tdef.name, self.point)
 
         # An initial db state entry is created at task proxy init. On reloading
         # or restarting the suite, the task proxies already have this db entry.
-        if not is_reload and self.submit_num == 0:
-            self.record_db_state()
-
-        if self.submit_num > 0:
-            self.record_db_update(
-                "task_states", status=self.state.get_status())
+        if not self.validate_mode and not is_reload and self.submit_num == 0:
+            self.db_inserts_map[self.TABLE_TASK_STATES].append({
+                "time_created": get_current_time_string(),
+                "time_updated": get_current_time_string(),
+                "try_num": self.run_try_state.num + 1,
+                "status": self.state.get_status()})
+
+        if not self.validate_mode and self.submit_num > 0:
+            self.db_updates_map[self.TABLE_TASK_STATES].append({
+                "time_updated": get_current_time_string(),
+                "status": self.state.get_status()})
 
         self.reconfigure_me = False
         self.event_hooks = None
@@ -229,26 +359,50 @@ class TaskProxy(object):
         self.set_from_rtconfig()
         self.delayed_start_str = None
         self.delayed_start = None
+        self.expire_time_str = None
+        self.expire_time = None
+
+        self.kill_failed = False
 
     def _add_prerequisites(self, point):
         """Add task prerequisites."""
-        # NOTE: Task objects hold all triggers defined for the task
-        # in all cycling graph sections in this data structure:
-        #     self.triggers[sequence] = [list of triggers for this
-        #     sequence]
-        # The list of triggers associated with sequenceX will only be
-        # used by a particular task if the task's cycle point is a
-        # valid member of sequenceX's sequence of cycle points.
-
-        # 1) non-conditional triggers
-        ppre = plain_prerequisites(self.identity, self.tdef.start_point)
-        spre = plain_prerequisites(self.identity, self.tdef.start_point)
+        # self.triggers[sequence] = [triggers for sequence]
+        # Triggers for sequence_i only used if my cycle point is a
+        # valid member of sequence_i's sequence of cycle points.
+
+        for sequence, exps in self.tdef.triggers.items():
+            for ctrig, exp in exps:
+                key = ctrig.keys()[0]
+                if not sequence.is_valid(self.point):
+                    # This trigger is not valid for current cycle (see NOTE
+                    # just above)
+                    continue
+
+                cpre = Prerequisite(self.identity, self.tdef.start_point)
+                for label in ctrig:
+                    trig = ctrig[label]
+                    if trig.graph_offset_string is not None:
+                        prereq_offset_point = get_point_relative(
+                            trig.graph_offset_string, point)
+                        if prereq_offset_point > point:
+                            prereq_offset = prereq_offset_point - point
+                            if (self.tdef.max_future_prereq_offset is None or
+                                    (prereq_offset >
+                                     self.tdef.max_future_prereq_offset)):
+                                self.tdef.max_future_prereq_offset = (
+                                    prereq_offset)
+                        cpre.add(trig.get_prereq(point)[0], label,
+                                 prereq_offset_point < self.tdef.start_point)
+                    else:
+                        cpre.add(trig.get_prereq(point)[0], label)
+                cpre.set_condition(exp)
+                if ctrig[key].suicide:
+                    self.suicide_prerequisites.append(cpre)
+                else:
+                    self.prerequisites.append(cpre)
 
         if self.tdef.sequential:
-            # For tasks declared 'sequential' we automatically add a
-            # previous-instance inter-cycle trigger, and adjust the
-            # cleanup cutoff (determined by inter-cycle triggers)
-            # accordingly.
+            # Add a previous-instance prerequisite, adjust cleanup cutoff.
             p_next = None
             adjusted = []
             for seq in self.tdef.sequences:
@@ -261,144 +415,97 @@ class TaskProxy(object):
                 if (self.cleanup_cutoff is not None and
                         self.cleanup_cutoff < p_next):
                     self.cleanup_cutoff = p_next
-
             p_prev = None
             adjusted = []
             for seq in self.tdef.sequences:
                 prv = seq.get_nearest_prev_point(self.point)
                 if prv:
-                    # may be None if out of sequence bounds
+                    # None if out of sequence bounds.
                     adjusted.append(prv)
             if adjusted:
                 p_prev = max(adjusted)
-                ppre.add(TaskID.get(self.tdef.name, p_prev) + ' succeeded')
-
-        for sequence in self.tdef.triggers:
-            for trig in self.tdef.triggers[sequence]:
-                if not sequence.is_valid(self.point):
-                    # This trigger is not used in current cycle
-                    continue
-                if (trig.graph_offset_string is None or
-                        (get_point_relative(
-                            trig.graph_offset_string, point) >=
-                         self.tdef.start_point)):
-                    # i.c.t. can be None after a restart, if one
-                    # is not specified in the suite definition.
-
-                    message, prereq_point = trig.get(point)
-                    prereq_offset = prereq_point - point
-                    if (prereq_offset > get_interval_cls().get_null() and
-                            (self.tdef.max_future_prereq_offset is None or
-                             prereq_offset >
-                             self.tdef.max_future_prereq_offset)):
-                        self.tdef.max_future_prereq_offset = prereq_offset
-
-                    if trig.suicide:
-                        spre.add(message)
-                    else:
-                        ppre.add(message)
-
-        self.prerequisites.add_requisites(ppre)
-        self.suicide_prerequisites.add_requisites(spre)
+                cpre = Prerequisite(self.identity, self.tdef.start_point)
+                prereq = TaskID.get(self.tdef.name, p_prev) + ' succeeded'
+                label = self.tdef.name
+                cpre.add(prereq, label, p_prev < self.tdef.start_point)
+                cpre.set_condition(label)
+                self.prerequisites.append(cpre)
+
+    def _get_events_conf(self, key, default=None):
+        """Return an events setting from suite then global configuration."""
+        for getter in (
+                self.tdef.rtconfig["events"],
+                self.event_hooks,
+                GLOBAL_CFG.get()["task events"]):
+            try:
+                value = getter.get(key)
+                if value is not None:
+                    return value
+            except (ItemNotFoundError, KeyError):
+                pass
+        return default
 
-        # 2) conditional triggers
-        for sequence in self.tdef.cond_triggers.keys():
-            for ctrig, exp in self.tdef.cond_triggers[sequence]:
-                key = ctrig.keys()[0]
-                if not sequence.is_valid(self.point):
-                    # This trigger is not valid for current cycle (see NOTE
-                    # just above)
-                    continue
-                cpre = conditional_prerequisites(
-                    self.identity, self.tdef.start_point)
-                for label in ctrig:
-                    trig = ctrig[label]
-                    if trig.graph_offset_string is not None:
-                        is_less_than_start = (
-                            get_point_relative(
-                                trig.graph_offset_string, point) <
-                            self.tdef.start_point
-                        )
-                        cpre.add(trig.get(point)[0], label, is_less_than_start)
-                    else:
-                        cpre.add(trig.get(point)[0], label)
-                cpre.set_condition(exp)
-                if ctrig[key].suicide:
-                    self.suicide_prerequisites.add_requisites(cpre)
-                else:
-                    self.prerequisites.add_requisites(cpre)
+    def _get_host_conf(self, key, default=None):
+        """Return a host setting from suite then global configuration."""
+        if self.tdef.rtconfig["remote"].get(key):
+            return self.tdef.rtconfig["remote"][key]
+        else:
+            try:
+                return GLOBAL_CFG.get_host_item(
+                    key, self.task_host, self.task_owner)
+            except ItemNotFoundError:
+                pass
+        return default
 
     def log(self, lvl=INFO, msg=""):
         """Log a message of this task proxy."""
         msg = "[%s] -%s" % (self.identity, msg)
         self.logger.log(lvl, msg)
 
-    def command_log(self, log_type, out=None, err=None):
-        """Log a command activity for a job of this task proxy."""
-        self.command_logger.append_to_log(self.submit_num, log_type, out, err)
-
-    def record_db_event(self, event="", message=""):
-        """Record an event to the DB."""
-        if self.validate_mode:
-            # Don't touch the db during validation.
-            return
-        self.db_queue.append(cylc.rundb.RecordEventObject(
-            self.tdef.name, str(self.point), self.submit_num, event, message,
-            self.user_at_host
-        ))
-
-    def record_db_update(self, table, **kwargs):
-        """Record an update to the DB."""
-        if self.validate_mode:
-            # Don't touch the db during validation.
-            return
-        self.db_queue.append(cylc.rundb.UpdateObject(
-            table, self.tdef.name, str(self.point), **kwargs))
-
-    def record_db_state(self):
-        """Record state to DB."""
-        if self.validate_mode:
-            # Don't touch the db during validation.
+    def command_log(self, ctx):
+        """Log an activity for a job of this task proxy."""
+        ctx_str = str(ctx)
+        if not ctx_str:
             return
-        self.db_queue.append(cylc.rundb.RecordStateObject(
-            self.tdef.name,
-            str(self.point),
-            time_created_string=get_current_time_string(),
-            time_updated_string=None,
-            submit_num=self.submit_num,
-            try_num=self.try_number,
-            host=None,
-            submit_method=None,
-            submit_method_id=None,
-            status=self.state.get_status()
-        ))
-
-    def get_db_ops(self):
-        """Return the next DB operation from DB queue."""
-        ops = self.db_queue
-        self.db_queue = []
-        return ops
+        submit_num = "NN"
+        if isinstance(ctx.cmd_key, tuple):  # An event handler
+            submit_num = ctx.cmd_key[-1]
+        job_log_dir = self.get_job_log_dir(
+            self.tdef.name, self.point, submit_num, self.suite_name)
+        job_activity_log = os.path.join(job_log_dir, "job-activity.log")
+        try:
+            with open(job_activity_log, "ab") as handle:
+                handle.write(ctx_str)
+        except IOError as exc:
+            self.log(WARNING, "%s: write failed\n%s" % (job_activity_log, exc))
+        if ctx.cmd and ctx.ret_code:
+            self.log(ERROR, ctx_str)
+        elif ctx.cmd:
+            self.log(DEBUG, ctx_str)
+
+    def _db_events_insert(self, event="", message=""):
+        """Record an event to the DB."""
+        self.db_inserts_map[self.TABLE_TASK_EVENTS].append({
+            "time": get_current_time_string(),
+            "event": event,
+            "message": message,
+            "misc": self.user_at_host})
 
     def retry_delay_done(self):
         """Is retry delay done? Can I retry now?"""
-        done = False
-        now_time = time.time()
-        if self.retry_delay_timer_timeout:
-            if now_time > self.retry_delay_timer_timeout:
-                done = True
-        elif self.sub_retry_delay_timer_timeout:
-            if now_time > self.sub_retry_delay_timer_timeout:
-                done = True
-        return done
+        now = time.time()
+        return (self.run_try_state.is_delay_done(now) or
+                self.sub_try_state.is_delay_done(now))
 
     def ready_to_run(self):
         """Is this task ready to run?"""
-        return (
+        ready = (
             (
                 self.state.is_currently('queued') or
                 (
                     self.state.is_currently('waiting') and
-                    self.prerequisites.all_satisfied()
+                    self.prerequisites_are_all_satisfied() and
+                    all(self.external_triggers.values())
                 ) or
                 (
                     self.state.is_currently('submit-retrying', 'retrying') and
@@ -406,72 +513,140 @@ class TaskProxy(object):
                 )
             ) and self.start_time_reached()
         )
+        if ready and self.has_expired():
+            self.log(WARNING, 'Task expired (skipping job).')
+            self.setup_event_handlers(
+                'expired', 'Task expired (skipping job).')
+            self.reset_state_expired()
+            return False
+        return ready
 
-    def start_time_reached(self):
-        """Has this task reached its clock trigger time?"""
-        if self.tdef.clocktrigger_offset is None:
-            return True
+    def get_point_as_seconds(self):
+        """Compute and store my cycle point as seconds."""
         if self.point_as_seconds is None:
             iso_timepoint = cylc.cycling.iso8601.point_parse(str(self.point))
-            iso_clocktrigger_offset = cylc.cycling.iso8601.interval_parse(
-                str(self.tdef.clocktrigger_offset))
             self.point_as_seconds = int(iso_timepoint.get(
                 "seconds_since_unix_epoch"))
-            clocktrigger_offset_as_seconds = int(
-                iso_clocktrigger_offset.get_seconds())
             if iso_timepoint.time_zone.unknown:
                 utc_offset_hours, utc_offset_minutes = (
                     get_local_time_zone())
                 utc_offset_in_seconds = (
                     3600 * utc_offset_hours + 60 * utc_offset_minutes)
                 self.point_as_seconds += utc_offset_in_seconds
+        return self.point_as_seconds
+
+    def get_offset_as_seconds(self, offset):
+        """Return an ISO interval as seconds."""
+        iso_offset = cylc.cycling.iso8601.interval_parse(str(offset))
+        return int(iso_offset.get_seconds())
+
+    def start_time_reached(self):
+        """Has this task reached its clock trigger time?"""
+        if self.tdef.clocktrigger_offset is None:
+            return True
+        if self.delayed_start is None:
             self.delayed_start = (
-                self.point_as_seconds + clocktrigger_offset_as_seconds)
+                self.get_point_as_seconds() +
+                self.get_offset_as_seconds(self.tdef.clocktrigger_offset))
             self.delayed_start_str = get_time_string_from_unix_time(
                 self.delayed_start)
         return time.time() > self.delayed_start
 
+    def has_expired(self):
+        """Is this task past its use-by date?"""
+        if self.tdef.expiration_offset is None:
+            return False
+        if self.expire_time is None:
+            self.expire_time = (
+                self.get_point_as_seconds() +
+                self.get_offset_as_seconds(self.tdef.expiration_offset))
+            self.expire_time_str = get_time_string_from_unix_time(
+                self.expire_time)
+        return time.time() > self.expire_time
+
     def get_resolved_dependencies(self):
-        """report who I triggered off"""
-        # Used by the test-battery log comparator
-        dep = []
-        satby = self.prerequisites.get_satisfied_by()
-        for label in satby.keys():
-            dep.append(satby[label])
+        """Report who I triggered off."""
+        satby = {}
+        for req in self.prerequisites:
+            satby.update(req.satisfied_by)
+        dep = satby.values()
         # order does not matter here; sort to allow comparison with
         # reference run task with lots of near-simultaneous triggers.
         dep.sort()
         return dep
 
-    def unfail(self):
-        """Remove previous failed message.
-
-        If a task is manually reset remove any previous failed message or on
-        later success it will be seen as an incomplete output.
+    def unset_outputs(self):
+        """Remove special output messages.
 
+        These are added for use in triggering off special states:
+          failed, submit-failed, expired
+        If the task state is later reset, these must be removed or they will
+        seen as incomplete outputs when the task finishes.
         """
         self.hold_on_retry = False
-        failed_msg = self.identity + " failed"
-        if self.outputs.exists(failed_msg):
-            self.outputs.remove(failed_msg)
-        failed_msg = self.identity + "submit-failed"
-        if self.outputs.exists(failed_msg):
-            self.outputs.remove(failed_msg)
+        self.kill_failed = False
+        for state in ["failed", "submit-failed", "expired"]:
+            msg = "%s %s" % (self.identity, state)
+            if self.outputs.exists(msg):
+                self.outputs.remove(msg)
 
     def turn_off_timeouts(self):
         """Turn off submission and execution timeouts."""
         self.submission_timer_timeout = None
         self.execution_timer_timeout = None
 
+    def prerequisites_get_target_points(self):
+        """Return a list of cycle points targetted by each prerequisite."""
+        points = []
+        for preq in self.prerequisites:
+            points += preq.get_target_points()
+        return points
+
+    def prerequisites_dump(self):
+        res = []
+        for preq in self.prerequisites:
+            res += preq.dump()
+        return res
+
+    def prerequisites_eval_all(self):
+        # (Validation: will abort on illegal trigger expressions.)
+        for preqs in [self.prerequisites, self.suicide_prerequisites]:
+            for preq in preqs:
+                preq.is_satisfied()
+
+    def prerequisites_are_all_satisfied(self):
+        return all(preq.is_satisfied() for preq in self.prerequisites)
+
+    def suicide_prerequisites_are_all_satisfied(self):
+        return all(preq.is_satisfied() for preq in self.suicide_prerequisites)
+
+    def set_prerequisites_all_satisfied(self):
+        for prereq in self.prerequisites:
+            prereq.set_satisfied()
+
+    def set_prerequisites_not_satisfied(self):
+        for prereq in self.prerequisites:
+            prereq.set_not_satisfied()
+
     def reset_state_ready(self):
         """Reset state to "ready"."""
         self.set_status('waiting')
-        self.record_db_event(event="reset to ready")
-        self.prerequisites.set_all_satisfied()
-        self.unfail()
+        self._db_events_insert(event="reset to ready")
+        self.set_prerequisites_all_satisfied()
+        self.unset_outputs()
         self.turn_off_timeouts()
         self.outputs.set_all_incomplete()
 
+    def reset_state_expired(self):
+        """Reset state to "expired"."""
+        self.set_status('expired')
+        self._db_events_insert(event="reset to expired")
+        self.set_prerequisites_all_satisfied()
+        self.unset_outputs()
+        self.turn_off_timeouts()
+        self.outputs.set_all_incomplete()
+        self.outputs.add(self.identity + ' expired', completed=True)
+
     def reset_state_waiting(self):
         """Reset state to "waiting".
 
@@ -479,28 +654,24 @@ class TaskProxy(object):
 
         """
         self.set_status('waiting')
-        self.record_db_event(event="reset to waiting")
-        self.prerequisites.set_all_unsatisfied()
-        self.unfail()
+        self._db_events_insert(event="reset to waiting")
+        self.set_prerequisites_not_satisfied()
+        self.unset_outputs()
         self.turn_off_timeouts()
         self.outputs.set_all_incomplete()
 
-    def reset_state_succeeded(self, manual=True):
+    def reset_state_succeeded(self):
         """Reset state to succeeded.
 
         All prerequisites satisified and all outputs complete.
 
         """
         self.set_status('succeeded')
-        if manual:
-            self.record_db_event(event="reset to succeeded")
-        else:
-            # Artificially set to succeeded but not by the user. E.g. by
-            # the purge algorithm and when reloading task definitions.
-            self.record_db_event(event="set to succeeded")
-        self.prerequisites.set_all_satisfied()
-        self.unfail()
+        self._db_events_insert(event="reset to succeeded")
+        self.set_prerequisites_all_satisfied()
+        self.unset_outputs()
         self.turn_off_timeouts()
+        # TODO - for message outputs this should be optional (see #1551):
         self.outputs.set_all_completed()
 
     def reset_state_failed(self):
@@ -510,8 +681,8 @@ class TaskProxy(object):
 
         """
         self.set_status('failed')
-        self.record_db_event(event="reset to failed")
-        self.prerequisites.set_all_satisfied()
+        self._db_events_insert(event="reset to failed")
+        self.set_prerequisites_all_satisfied()
         self.hold_on_retry = False
         self.outputs.set_all_incomplete()
         # set a new failed output just as if a failure message came in
@@ -525,9 +696,9 @@ class TaskProxy(object):
             self.state_before_held = task_state(self.state.get_status())
             self.set_status('held')
             self.turn_off_timeouts()
-            self.record_db_event(event="reset to held")
+            self._db_events_insert(event="reset to held")
             self.log(INFO, '%s => held' % self.state_before_held.get_status())
-        elif self.state.is_currently('submitted', 'running'):
+        elif self.is_active():
             self.hold_on_retry = True
 
     def reset_state_unheld(self, stop_point=None):
@@ -546,159 +717,342 @@ class TaskProxy(object):
         old_status = self.state_before_held.get_status()
         self.set_status(old_status)
         self.state_before_held = None
-        self.record_db_event(event="reset to %s" % (old_status))
+        self._db_events_insert(event="reset to %s" % (old_status))
         self.log(INFO, 'held => %s' % (old_status))
 
     def job_submission_callback(self, result):
         """Callback on job submission."""
-        out = ""
-        for line in result['OUT'].splitlines(True):
-            if line.startswith(BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID + "="):
-                self.submit_method_id = line.strip().replace(
-                    BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID + "=", "")
-            else:
-                out += line
-        self.command_log("SUBMIT", out, result['ERR'])
-        if result['EXIT'] != 0:
-            if result['EXIT'] == JOB_SKIPPED_FLAG:
-                pass
-            else:
-                self.job_submission_failed()
+        if result.out is not None:
+            out = ""
+            for line in result.out.splitlines(True):
+                if line.startswith(
+                        BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID + "="):
+                    self.submit_method_id = line.strip().replace(
+                        BATCH_SYS_MANAGER.CYLC_BATCH_SYS_JOB_ID + "=", "")
+                else:
+                    out += line
+            result.out = out
+        self.command_log(result)
+
+        if result.ret_code == SuiteProcPool.JOB_SKIPPED_FLAG:
             return
-        if self.submit_method_id:
-            self.log(INFO, 'submit_method_id=' + self.submit_method_id)
-            self.record_db_update(
-                "task_states", submit_method_id=self.submit_method_id)
-        self.job_submission_succeeded()
 
-    def job_poll_callback(self, result):
+        if self.submit_method_id and result.ret_code == 0:
+            self.job_submission_succeeded()
+        else:
+            self.job_submission_failed()
+
+    def job_poll_callback(self, line):
         """Callback on job poll."""
-        out = result['OUT']
-        err = result['ERR']
-        self.command_log("POLL", out, err)
-        if result['EXIT'] != 0:
+        ctx = SuiteProcContext(self.JOB_POLL, None)
+        ctx.out = line
+        ctx.ret_code = 0
+        self.command_log(ctx)
+
+        items = line.split("|")
+        # See cylc.batch_sys_manager.JobPollContext
+        try:
+            (
+                batch_sys_exit_polled, run_status, run_signal, _, time_run
+            ) = items[4:9]
+        except IndexError:
             self.summary['latest_message'] = 'poll failed'
-            self.log(WARNING, 'job(%02d) poll failed' % self.submit_num)
             flags.iflag = True
             return
-        if not self.state.is_currently('submitted', 'running'):
-            # Poll results can come in after a task finishes
-            msg = "Ignoring late poll result: task not active"
-            self.log(WARNING, msg)
-            self.command_log("POLL", err=msg)
+        if run_status == "1" and run_signal in ["ERR", "EXIT"]:
+            # Failed normally
+            self._process_poll_message(INFO, TaskMessage.FAILED)
+        elif run_status == "1" and batch_sys_exit_polled == "1":
+            # Failed by a signal, and no longer in batch system
+            self._process_poll_message(INFO, TaskMessage.FAILED)
+            self._process_poll_message(
+                INFO, TaskMessage.FAIL_MESSAGE_PREFIX + run_signal)
+        elif run_status == "1":
+            # The job has terminated, but is still managed by batch system.
+            # Some batch system may restart a job in this state, so don't
+            # mark as failed yet.
+            self._process_poll_message(INFO, TaskMessage.STARTED)
+        elif run_status == "0":
+            # The job succeeded
+            self._process_poll_message(INFO, TaskMessage.SUCCEEDED)
+        elif time_run and batch_sys_exit_polled == "1":
+            # The job has terminated without executing the error trap
+            self._process_poll_message(INFO, TaskMessage.FAILED)
+        elif time_run:
+            # The job has started, and is still managed by batch system
+            self._process_poll_message(INFO, TaskMessage.STARTED)
+        elif batch_sys_exit_polled == "1":
+            # The job never ran, and no longer in batch system
+            self._process_poll_message(INFO, "submission failed")
+        else:
+            # The job never ran, and is in batch system
+            self._process_poll_message(INFO, "submitted")
+
+    def _process_poll_message(self, priority, message):
+        """Wraps self.process_incoming_message for poll messages."""
+        self.process_incoming_message(
+            (priority, "%s %s" % (self.identity, message)),
+            msg_was_polled=True)
+
+    def job_poll_message_callback(self, line):
+        """Callback on job poll message."""
+        ctx = SuiteProcContext(self.JOB_POLL, None)
+        ctx.out = line
+        try:
+            priority, message = line.split("|")[3:5]
+        except ValueError:
+            ctx.ret_code = 1
         else:
-            # poll results emulate task messages
-            for line in out.splitlines():
-                if line.startswith('polled %s' % (self.identity)):
-                    self.process_incoming_message(('NORMAL', line))
-                    break
+            ctx.ret_code = 0
+            self.process_incoming_message(
+                (priority, message), msg_was_polled=True)
+        self.command_log(ctx)
 
-    def job_kill_callback(self, result):
+    def job_kill_callback(self, line):
         """Callback on job kill."""
-        out = result['OUT']
-        err = result['ERR']
-        self.command_log("KILL", out, err)
-        if result['EXIT'] != 0:
-            self.summary['latest_message'] = 'kill failed'
-            self.log(WARNING, 'job(%02d) kill failed' % self.submit_num)
-            flags.iflag = True
-            return
-        if self.state.is_currently('submitted'):
-            self.log(INFO, 'job killed')
+        ctx = SuiteProcContext(self.JOB_KILL, None)
+        ctx.out = line
+        try:
+            ctx.timestamp, _, ctx.ret_code = line.split("|", 2)
+        except ValueError:
+            ctx.ret_code = 1
+        else:
+            ctx.ret_code = int(ctx.ret_code)
+        self.command_log(ctx)
+        log_lvl = INFO
+        log_msg = 'killed'
+        if ctx.ret_code:  # non-zero exit status
+            log_lvl = WARNING
+            log_msg = 'kill failed'
+            self.kill_failed = True
+        elif self.state.is_currently('submitted'):
             self.job_submission_failed()
         elif self.state.is_currently('running'):
-            self.log(INFO, 'job killed')
             self.job_execution_failed()
         else:
-            msg = ('ignoring job kill result, unexpected task state: %s'
-                   % self.state.get_status())
-            self.log(WARNING, msg)
-
-    def event_handler_callback(self, result):
-        """Callback when event handler is done."""
-        out = result['OUT']
-        err = result['ERR']
-        self.command_log("EVENT", out, err)
-        if result['EXIT'] != 0:
-            self.log(WARNING, 'event handler failed:\n  ' + result['CMD'])
+            log_lvl = WARNING
+            log_msg = (
+                'ignoring job kill result, unexpected task state: %s' %
+                self.state.get_status())
+        self.summary['latest_message'] = log_msg
+        self.log(log_lvl, "job(%02d) %s" % (self.submit_num, log_msg))
+        flags.iflag = True
+
+    def job_submit_callback(self, line):
+        """Callback on job submit."""
+        ctx = SuiteProcContext(self.JOB_SUBMIT, None)
+        ctx.out = line
+        items = line.split("|")
+        try:
+            ctx.timestamp, _, ctx.ret_code = items[0:3]
+        except ValueError:
+            ctx.ret_code = 1
+        else:
+            ctx.ret_code = int(ctx.ret_code)
+        self.command_log(ctx)
+
+        if ctx.ret_code == SuiteProcPool.JOB_SKIPPED_FLAG:
             return
 
-    def handle_event(
-            self, event, descr=None, db_update=True, db_event=None,
-            db_msg=None):
-        """Call event handler."""
+        try:
+            self.submit_method_id = items[3]
+        except IndexError:
+            self.submit_method_id = None
+        self.register_job_logs(self.submit_num)
+        if self.submit_method_id and ctx.ret_code == 0:
+            self.job_submission_succeeded()
+        else:
+            self.job_submission_failed()
+
+    def job_cmd_out_callback(self, line):
+        """Callback on job command STDOUT/STDERR."""
+        job_log_dir = self.get_job_log_dir(
+            self.tdef.name, self.point, "NN", self.suite_name)
+        job_activity_log = os.path.join(job_log_dir, "job-activity.log")
+        try:
+            with open(job_activity_log, "ab") as handle:
+                if not line.endswith("\n"):
+                    line += "\n"
+                handle.write(line)
+        except IOError as exc:
+            self.log(WARNING, "%s: write failed\n%s" % (job_activity_log, exc))
+
+    def setup_event_handlers(
+            self, event, message, db_update=True, db_event=None, db_msg=None):
+        """Set up event handlers."""
         # extra args for inconsistent use between events, logging, and db
         # updates
         db_event = db_event or event
         if db_update:
-            self.record_db_event(event=db_event, message=db_msg)
+            self._db_events_insert(event=db_event, message=db_msg)
 
         if self.tdef.run_mode != 'live':
             return
 
-        handlers = self.event_hooks[event + ' handler']
-        if handlers:
-            self.log(DEBUG, "Queueing " + event + " event handler(s)")
-            for handler in handlers:
-                self.log(DEBUG, "Queueing " + event + " event handler")
-                cmd = ""
-                env = None
-                if TaskProxy.event_handler_env:
-                    env = dict(os.environ)
-                    env.update(TaskProxy.event_handler_env)
+        self.setup_job_logs_retrieval(event, message)
+        self.setup_event_mail(event, message)
+        self.setup_custom_event_handlers(event, message)
+
+    def setup_job_logs_retrieval(self, event, _=None):
+        """Set up remote job logs retrieval."""
+        if event not in ["failed", "retry", "succeeded"]:
+            return
+        if (self.user_at_host in [user + '@localhost', 'localhost'] or
+                not self._get_host_conf("retrieve job logs")):
+            key2 = (self.JOB_LOGS_REGISTER, self.submit_num)
+            if key2 in self.event_handler_try_states:
+                return
+            self.event_handler_try_states[key2] = TryState(
+                TaskJobLogsRegisterContext(
+                    # key, ctx_type
+                    self.JOB_LOGS_REGISTER, self.JOB_LOGS_REGISTER,
+                ),
+                self._get_events_conf("register job logs retry delays", []))
+        else:
+            key2 = (self.JOB_LOGS_RETRIEVE, self.submit_num)
+            if key2 in self.event_handler_try_states:
+                return
+            self.event_handler_try_states[key2] = TryState(
+                TaskJobLogsRetrieveContext(
+                    # key
+                    self.JOB_LOGS_RETRIEVE,
+                    # ctx_type
+                    self.JOB_LOGS_RETRIEVE,
+                    self.user_at_host,
+                    # max_size
+                    self._get_host_conf("retrieve job logs max size"),
+                ),
+                self._get_host_conf("retrieve job logs retry delays", []))
+
+    def setup_event_mail(self, event, message):
+        """Event notification, by email."""
+        key1 = (self.EVENT_MAIL, event)
+        if ((key1, self.submit_num) in self.event_handler_try_states or
+                event not in self._get_events_conf("mail events", [])):
+            return
+
+        self.event_handler_try_states[(key1, self.submit_num)] = TryState(
+            TaskEventMailContext(
+                key1,
+                self.EVENT_MAIL,  # ctx_type
+                event,
+                self._get_events_conf(  # mail_from
+                    "mail from",
+                    "notifications@" + get_suite_host(),
+                ),
+                self._get_events_conf("mail to", user),  # mail_to
+                self._get_events_conf("mail smtp"),  # mail_smtp
+            ),
+            self._get_events_conf("mail retry delays", []))
+
+    def setup_custom_event_handlers(self, event, message, only_list=None):
+        """Call custom event handlers."""
+        handlers = []
+        if self.event_hooks[event + ' handler']:
+            handlers = self.event_hooks[event + ' handler']
+        elif (self._get_events_conf('handlers', []) and
+                event in self._get_events_conf('handler events', [])):
+            handlers = self._get_events_conf('handlers', [])
+        retry_delays = self._get_events_conf(
+            'handler retry delays',
+            self._get_host_conf("task event handler retry delays", []))
+        for i, handler in enumerate(handlers):
+            key1 = (
+                "%s-%02d" % (self.CUSTOM_EVENT_HANDLER, i),
+                event)
+            if (key1, self.submit_num) in self.event_handler_try_states or (
+                    only_list and i not in only_list):
+                continue
+            cmd = handler % {
+                "event": quote(event),
+                "suite": quote(self.suite_name),
+                "point": quote(str(self.point)),
+                "name": quote(self.tdef.name),
+                "submit_num": self.submit_num,
+                "id": quote(self.identity),
+                "message": quote(message),
+            }
+            if cmd == handler:
+                # Nothing substituted, assume classic interface
                 cmd = "%s '%s' '%s' '%s' '%s'" % (
-                    handler, event, self.suite_name, self.identity, descr)
-                SuiteProcPool.get_inst().put_command(
-                    CMD_TYPE_EVENT_HANDLER, cmd, self.event_handler_callback,
-                    env=env, shell=True)
+                    handler, event, self.suite_name, self.identity, message)
+            self.log(DEBUG, "Queueing %s handler: %s" % (event, cmd))
+            self.event_handler_try_states[(key1, self.submit_num)] = TryState(
+                CustomTaskEventHandlerContext(
+                    key1,
+                    self.CUSTOM_EVENT_HANDLER,
+                    cmd,
+                ),
+                retry_delays)
+
+    def custom_event_handler_callback(self, result):
+        """Callback when a custom event handler is done."""
+        self.command_log(result)
+        try:
+            if result.ret_code == 0:
+                del self.event_handler_try_states[result.cmd_key]
+            else:
+                self.event_handler_try_states[result.cmd_key].unset_waiting()
+        except KeyError:
+            pass
 
     def job_submission_failed(self):
         """Handle job submission failure."""
         self.log(ERROR, 'submission failed')
+        self.db_updates_map[self.TABLE_TASK_JOBS].append({
+            "time_submit_exit": get_current_time_string(),
+            "submit_status": 1,
+        })
         self.submit_method_id = None
-        try:
-            sub_retry_delay = self.sub_retry_delays.popleft()
-        except IndexError:
+        if self.sub_try_state.next() is None:
             # No submission retry lined up: definitive failure.
             flags.pflag = True
             outp = self.identity + " submit-failed"  # hack: see github #476
             self.outputs.add(outp)
             self.outputs.set_completed(outp)
             self.set_status('submit-failed')
-            self.handle_event('submission failed', 'job submission failed')
+            self.setup_event_handlers(
+                'submission failed', 'job submission failed')
         else:
             # There is a submission retry lined up.
-            self.sub_retry_delay = sub_retry_delay
-            self.sub_retry_delay_timer_timeout = (
-                time.time() + sub_retry_delay)
-            timeout_str = get_time_string_from_unix_time(
-                self.sub_retry_delay_timer_timeout)
+            timeout_str = self.sub_try_state.timeout_as_str()
 
             delay_msg = "submit-retrying in %s" % (
-                get_seconds_as_interval_string(sub_retry_delay))
+                self.sub_try_state.delay_as_seconds())
             msg = "submission failed, %s (after %s)" % (delay_msg, timeout_str)
             self.log(INFO, "job(%02d) " % self.submit_num + msg)
             self.summary['latest_message'] = msg
+            self.summary['waiting for reload'] = self.reconfigure_me
 
-            self.sub_try_number += 1
             self.set_status('submit-retrying')
-            self.record_db_event(event="submission failed",
-                                 message=delay_msg)
-            self.prerequisites.set_all_satisfied()
+            self._db_events_insert(
+                event="submission failed", message=delay_msg)
+            self.set_prerequisites_all_satisfied()
             self.outputs.set_all_incomplete()
 
-            # TODO - is this record is redundant with that in handle_event?
-            self.record_db_event(
+            # TODO - is this record is redundant with that in
+            # setup_event_handlers?
+            self._db_events_insert(
                 event="submission failed",
-                message="submit-retrying in " + str(sub_retry_delay))
-            self.handle_event(
+                message="submit-retrying in " + str(self.sub_try_state.delay))
+            self.setup_event_handlers(
                 "submission retry", "job submission failed, " + delay_msg)
             if self.hold_on_retry:
                 self.reset_state_held()
 
     def job_submission_succeeded(self):
-        """Handle job succeeded."""
+        """Handle job submission succeeded."""
+        if self.submit_method_id is not None:
+            self.log(INFO, 'submit_method_id=' + self.submit_method_id)
         self.log(INFO, 'submission succeeded')
+        now = get_current_time_string()
+        self.db_updates_map[self.TABLE_TASK_STATES].append({
+            "time_updated": now,
+            "submit_method_id": self.submit_method_id})
+        self.db_updates_map[self.TABLE_TASK_JOBS].append({
+            "time_submit_exit": now,
+            "submit_status": 0,
+            "batch_sys_job_id": self.submit_method_id})
         if self.tdef.run_mode == 'simulation':
             if self.__class__.stop_sim_mode_job_submission:
                 # Real jobs that are ready to run are queued to the proc pool
@@ -733,7 +1087,7 @@ class TaskProxy(object):
             get_time_string_from_unix_time(self.submitted_time))
         self.summary['submit_method_id'] = self.submit_method_id
         self.summary['latest_message'] = "submitted"
-        self.handle_event(
+        self.setup_event_handlers(
             'submitted', 'job submitted', db_event='submission succeeded')
 
         if self.state.is_currently('ready'):
@@ -742,7 +1096,7 @@ class TaskProxy(object):
             # server, and the server has started the job before the job submit
             # command returns.
             self.set_status('submitted')
-            submit_timeout = self.event_hooks['submission timeout']
+            submit_timeout = self._get_events_conf('submission timeout')
             if submit_timeout:
                 self.submission_timer_timeout = (
                     self.submitted_time + submit_timeout
@@ -753,14 +1107,16 @@ class TaskProxy(object):
 
     def job_execution_failed(self):
         """Handle a job failure."""
-        self.finished_time = time.time()
+        self.finished_time = time.time()  # TODO: use time from message
         self.summary['finished_time'] = self.finished_time
         self.summary['finished_time_string'] = (
             get_time_string_from_unix_time(self.finished_time))
+        self.db_updates_map[self.TABLE_TASK_JOBS].append({
+            "run_status": 1,
+            "time_run_exit": self.summary['finished_time_string'],
+        })
         self.execution_timer_timeout = None
-        try:
-            retry_delay = self.retry_delays.popleft()
-        except IndexError:
+        if self.run_try_state.next() is None:
             # No retry lined up: definitive failure.
             # Note the 'failed' output is only added if needed.
             flags.pflag = True
@@ -768,36 +1124,33 @@ class TaskProxy(object):
             self.outputs.add(msg)
             self.outputs.set_completed(msg)
             self.set_status('failed')
-            self.handle_event('failed', 'job failed')
+            self.setup_event_handlers('failed', 'job failed')
 
         else:
             # There is a retry lined up
-            self.retry_delay = retry_delay
-            self.retry_delay_timer_timeout = (time.time() + retry_delay)
-            timeout_str = get_time_string_from_unix_time(
-                self.retry_delay_timer_timeout)
-
+            timeout_str = self.run_try_state.timeout_as_str()
             delay_msg = "retrying in %s" % (
-                get_seconds_as_interval_string(retry_delay))
+                self.run_try_state.delay_as_seconds())
             msg = "failed, %s (after %s)" % (delay_msg, timeout_str)
             self.log(INFO, "job(%02d) " % self.submit_num + msg)
             self.summary['latest_message'] = msg
 
-            self.try_number += 1
             self.set_status('retrying')
-            self.prerequisites.set_all_satisfied()
+            self.set_prerequisites_all_satisfied()
             self.outputs.set_all_incomplete()
-            self.handle_event(
+            self.setup_event_handlers(
                 "retry", "job failed, " + delay_msg, db_msg=delay_msg)
             if self.hold_on_retry:
                 self.reset_state_held()
 
     def reset_manual_trigger(self):
         """This is called immediately after manual trigger flag used."""
-        self.manual_trigger = False
-        # unset any retry delay timers
-        self.retry_delay_timer_timeout = None
-        self.sub_retry_delay_timer_timeout = None
+        if self.manual_trigger:
+            self.manual_trigger = False
+            self.is_manual_submit = True
+            # unset any retry delay timers
+            self.run_try_state.timeout = None
+            self.sub_try_state.timeout = None
 
     def set_from_rtconfig(self, cfg=None):
         """Populate task proxy with runtime configuration.
@@ -825,16 +1178,9 @@ class TaskProxy(object):
                 # note that a *copy* of the retry delays list is needed
                 # so that all instances of the same task don't pop off
                 # the same deque (but copy of rtconfig above solves this).
-                self.retry_delays = deque(rtconfig['retry delays'])
-                self.sub_retry_delays_orig = deque(
+                self.run_try_state.delays = list(rtconfig['retry delays'])
+                self.sub_try_state.delays = list(
                     rtconfig['job submission']['retry delays'])
-            else:
-                self.retry_delays = deque()
-                self.sub_retry_delays_orig = deque()
-
-            # retain the original submission retry deque for re-use in
-            # case execution fails and submission tries start over.
-            self.sub_retry_delays = copy(self.sub_retry_delays_orig)
 
         rrange = rtconfig['simulation mode']['run time range']
         if len(rrange) != 2:
@@ -859,73 +1205,80 @@ class TaskProxy(object):
             copy(GLOBAL_CFG.get(['execution polling intervals'])),
             'execution', self.log)
 
-    def increment_submit_num(self):
-        """Increment and record the submit number."""
-        self.log(DEBUG, "incrementing submit number")
-        self.submit_num += 1
-        self.summary['submit_num'] = self.submit_num
-        self.record_db_event(event="incrementing submit number")
-        self.record_db_update("task_states", submit_num=self.submit_num)
+    def register_job_logs(self, submit_num):
+        """Register job logs in the runtime database.
 
-    def submit(self, dry_run=False, overrides=None):
-        """Submit a job for this task."""
+        Return a list containing the names of the job logs.
 
-        if self.tdef.run_mode == 'simulation':
-            self.job_submission_succeeded()
-            return
+        """
+        data = []
+        job_log_dir = self.get_job_log_dir(
+            self.tdef.name, self.point, submit_num, self.suite_name)
+        try:
+            for filename in os.listdir(job_log_dir):
+                try:
+                    stat = os.stat(os.path.join(job_log_dir, filename))
+                except OSError:
+                    continue
+                else:
+                    data.append((stat.st_mtime, stat.st_size, filename))
+        except OSError:
+            pass
 
-        if dry_run or not self.job_file_written:
-            # Prepare the job submit command and write the job script.
-            # In a dry_run, force a rewrite in case of a previous aborted
-            # edit-run that left the file write flag set.
-            try:
-                self._prepare_submit(overrides=overrides)
-                JOB_FILE.write(self.job_conf)
-                self.job_file_written = True
-            except Exception, exc:
-                # Could be a bad command template.
-                if flags.debug:
-                    traceback.print_exc()
-                self.log(ERROR, "Failed to construct job submission command")
-                self.command_log("SUBMIT", err=str(exc))
-                self.job_submission_failed()
-                return
-            if dry_run:
-                # Note this is used to bail out in the first stage of an
-                # edit-run (i.e. write the job file but don't submit it).
-                # In a suite daemon, this must be an edit run.
-                self.log(WARNING, "Job file written for an edit-run.")
-                return self.job_conf['local job file path']
+        rel_job_log_dir = self.get_job_log_dir(
+            self.tdef.name, self.point, submit_num)
+        for mtime, size, filename in data:
+            self.db_inserts_map[self.TABLE_TASK_JOB_LOGS].append({
+                "submit_num": submit_num,
+                "filename": filename,
+                "location": os.path.join(rel_job_log_dir, filename),
+                "mtime": mtime,
+                "size": size})
 
-        # The job file is now (about to be) used: reset the file write flag so
-        # that subsequent manual retrigger will generate a new job file.
-        self.job_file_written = False
-        self.set_status('ready')
-        # Send the job to the command pool.
-        return self._run_job_command(
-            CMD_TYPE_JOB_SUBMISSION,
-            "job-submit",
-            args=[self.job_conf['job file path']],
-            callback=self.job_submission_callback,
-            is_bg_submit=BATCH_SYS_MANAGER.is_bg_submit(self.batch_sys_name),
-            stdin_file_path=self.job_conf['local job file path'])
+        return [datum[2] for datum in data]
 
-    def _prepare_submit(self, overrides=None):
-        """Get the job submission command.
+    def prep_submit(self, dry_run=False, overrides=None):
+        """Prepare job submission.
 
-        Exceptions here are caught in the task pool module.
+        Return self on a good preparation.
 
         """
-        self.increment_submit_num()
+        if self.tdef.run_mode == 'simulation' or (
+                self.job_file_written and not dry_run):
+            return self
+
+        try:
+            self._prep_submit_impl(overrides=overrides)
+            JOB_FILE.write(self.job_conf)
+            self.job_file_written = True
+        except Exception, exc:
+            # Could be a bad command template.
+            if flags.debug:
+                traceback.print_exc()
+            self.command_log(SuiteProcContext(
+                self.JOB_SUBMIT, '(prepare job file)', err=exc,
+                ret_code=1))
+            self.job_submission_failed()
+            return
+
+        if dry_run:
+            # This will be shown next to submit num in gcylc:
+            self.summary['latest_message'] = 'job file written for edit-run'
+            self.log(WARNING, self.summary['latest_message'])
+
+        # Return value used by "cylc submit" and "cylc jobscript":
+        return self
+
+    def _prep_submit_impl(self, overrides=None):
+        """Helper for self.prep_submit."""
+        self.log(DEBUG, "incrementing submit number")
+        self.submit_num += 1
+        self.summary['submit_num'] = self.submit_num
+        self._db_events_insert(event="incrementing submit number")
         self.job_file_written = False
 
-        local_job_log_dir, common_job_log_path = (
-            CommandLogger.get_create_job_log_path(
-                self.suite_name,
-                self.tdef.name,
-                self.point,
-                self.submit_num,
-                new_mode=True))
+        local_job_log_dir, common_job_log_path = self._create_job_log_path(
+            new_mode=True)
         local_jobfile_path = os.path.join(
             local_job_log_dir, common_job_log_path)
 
@@ -977,6 +1330,10 @@ class TaskProxy(object):
                 comstr += (
                     " --run-dir=" +
                     str(rtconfig['suite state polling']['run-dir']))
+            if rtconfig['suite state polling']['template']:
+                comstr += (
+                    " --template=" +
+                    str(rtconfig['suite state polling']['template']))
             comstr += " " + self.tdef.suite_polling_cfg['suite']
             command = "echo " + comstr + "\n" + comstr
 
@@ -1001,21 +1358,44 @@ class TaskProxy(object):
         RemoteJobHostManager.get_inst().init_suite_run_dir(
             self.suite_name, self.user_at_host)
 
-        self.record_db_update(
-            "task_states",
-            submit_method=self.batch_sys_name,
-            host=self.user_at_host,
-        )
+        self.db_updates_map[self.TABLE_TASK_STATES].append({
+            "time_updated": get_current_time_string(),
+            "submit_method": self.batch_sys_name,
+            "host": self.user_at_host,
+            "submit_num": self.submit_num})
         self._populate_job_conf(
             rtconfig, local_jobfile_path, common_job_log_path)
-        self.job_conf.update({
-            'use manual completion': use_manual,
-            'pre-script': precommand,
-            'script': command,
-            'post-script': postcommand,
+        self.job_conf.update(
+            {
+                'use manual completion': use_manual,
+                'pre-script': precommand,
+                'script': command,
+                'post-script': postcommand,
+            }.items()
+        )
+        self.db_inserts_map[self.TABLE_TASK_JOBS].append({
+            "is_manual_submit": self.is_manual_submit,
+            "try_num": self.run_try_state.num + 1,
+            "time_submit": get_current_time_string(),
+            "user_at_host": self.user_at_host,
+            "batch_sys_name": self.batch_sys_name,
         })
+        self.is_manual_submit = False
 
-    def _prepare_manip(self):
+    def submit(self):
+        """Submit a job for this task."""
+        # The job file is now (about to be) used: reset the file write flag so
+        # that subsequent manual retrigger will generate a new job file.
+        self.job_file_written = False
+        self.set_status('ready')
+        # Send the job to the command pool.
+        return self._run_job_command(
+            self.JOB_SUBMIT,
+            args=[self.job_conf['job file path']],
+            callback=self.job_submission_callback,
+            stdin_file_paths=[self.job_conf['local job file path']])
+
+    def prep_manip(self):
         """A cut down version of prepare_submit().
 
         This provides access to job poll commands before the task is submitted,
@@ -1028,9 +1408,7 @@ class TaskProxy(object):
                     self.user_at_host.split('@', 1))
             else:
                 self.task_host = self.user_at_host
-        local_job_log_dir, common_job_log_path = (
-            CommandLogger.get_create_job_log_path(
-                self.suite_name, self.tdef.name, self.point, self.submit_num))
+        local_job_log_dir, common_job_log_path = self._create_job_log_path()
         local_jobfile_path = os.path.join(
             local_job_log_dir, common_job_log_path)
         rtconfig = pdeepcopy(self.tdef.rtconfig)
@@ -1041,7 +1419,7 @@ class TaskProxy(object):
             self, rtconfig, local_jobfile_path, common_job_log_path):
         """Populate the configuration for submitting or manipulating a job."""
         self.batch_sys_name = rtconfig['job submission']['method']
-        self.job_conf = {
+        self.job_conf = OrderedDictWithDefaults({
             'suite name': self.suite_name,
             'task id': self.identity,
             'batch system name': rtconfig['job submission']['method'],
@@ -1060,8 +1438,8 @@ class TaskProxy(object):
             'script': '',
             'post-script': '',
             'namespace hierarchy': self.tdef.namespace_hierarchy,
-            'submission try number': self.sub_try_number,
-            'try number': self.try_number,
+            'submission try number': self.sub_try_state.num + 1,
+            'try number': self.run_try_state.num + 1,
             'absolute submit number': self.submit_num,
             'is cold-start': self.tdef.is_coldstart,
             'owner': self.task_owner,
@@ -1070,7 +1448,7 @@ class TaskProxy(object):
             'common job log path': common_job_log_path,
             'local job file path': local_jobfile_path,
             'job file path': local_jobfile_path,
-        }
+        }.items())
 
         log_files = self.job_conf['log files']
         log_files.add_path(local_jobfile_path)
@@ -1110,65 +1488,26 @@ class TaskProxy(object):
             log_files.add_path(self.job_conf['job file path'] + '.out')
             log_files.add_path(self.job_conf['job file path'] + '.err')
 
-    def check_timers(self):
-        """Check submission and execution timeout timers.
-
-        Not called in simulation mode.
-
-        """
-        if self.state.is_currently('submitted'):
-            self.check_submission_timeout()
-            if self.submission_poll_timer:
-                if self.submission_poll_timer.get():
-                    self.poll()
-                    self.submission_poll_timer.set_timer()
-        elif self.state.is_currently('running'):
-            self.check_execution_timeout()
-            if self.execution_poll_timer:
-                if self.execution_poll_timer.get():
-                    self.poll()
-                    self.execution_poll_timer.set_timer()
-
-    def check_submission_timeout(self):
-        """Check submission timeout, only called if in "submitted" state."""
-        if self.submission_timer_timeout is None:
-            # (explicit None in case of a zero timeout!)
-            # no timer set
-            return
-
-        # if timed out, log warning, poll, queue event handler, and turn off
-        # the timer
-        if time.time() > self.submission_timer_timeout:
-            msg = 'job submitted %s ago, but has not started' % (
-                get_seconds_as_interval_string(
-                    self.event_hooks['submission timeout'])
-            )
-            self.log(WARNING, msg)
-            self.poll()
-            self.handle_event('submission timeout', msg)
-            self.submission_timer_timeout = None
-
-    def check_execution_timeout(self):
-        """Check execution timeout, only called if in "running" state."""
-        if self.execution_timer_timeout is None:
-            # (explicit None in case of a zero timeout!)
-            # no timer set
-            return
+    def handle_submission_timeout(self):
+        """Handle submission timeout, only called if in "submitted" state."""
+        msg = 'job submitted %s ago, but has not started' % (
+            get_seconds_as_interval_string(
+                self.event_hooks['submission timeout'])
+        )
+        self.log(WARNING, msg)
+        self.setup_event_handlers('submission timeout', msg)
 
-        # if timed out: log warning, poll, queue event handler, and turn off
-        # the timer
-        if time.time() > self.execution_timer_timeout:
-            if self.event_hooks['reset timer']:
-                # the timer is being re-started by put messages
-                msg = 'last message %s ago, but job not finished'
-            else:
-                msg = 'job started %s ago, but has not finished'
-            msg = msg % get_seconds_as_interval_string(
-                self.event_hooks['execution timeout'])
-            self.log(WARNING, msg)
-            self.poll()
-            self.handle_event('execution timeout', msg)
-            self.execution_timer_timeout = None
+    def handle_execution_timeout(self):
+        """Handle execution timeout, only called if in "running" state."""
+        if self.event_hooks['reset timer']:
+            # the timer is being re-started by put messages
+            msg = 'last message %s ago, but job not finished'
+        else:
+            msg = 'job started %s ago, but has not finished'
+        msg = msg % get_seconds_as_interval_string(
+            self.event_hooks['execution timeout'])
+        self.log(WARNING, msg)
+        self.setup_event_handlers('execution timeout', msg)
 
     def sim_time_check(self):
         """Check simulation time."""
@@ -1184,21 +1523,6 @@ class TaskProxy(object):
         else:
             return False
 
-    def set_all_internal_outputs_completed(self):
-        """Shortcut all the outputs.
-
-        As if the task has gone through all the messages to "succeeded".
-
-        """
-        if self.reject_if_failed('set_all_internal_outputs_completed'):
-            return
-        self.log(DEBUG, 'setting all internal outputs completed')
-        for message in self.outputs.completed:
-            if (message != self.identity + ' started' and
-                    message != self.identity + ' succeeded' and
-                    message != self.identity + ' completed'):
-                self.message_queue.put('NORMAL', message)
-
     def reject_if_failed(self, message):
         """Reject a message if in the failed state.
 
@@ -1233,7 +1557,8 @@ class TaskProxy(object):
                 break
             queue.task_done()
 
-    def process_incoming_message(self, (priority, message)):
+    def process_incoming_message(
+            self, (priority, message), msg_was_polled=False):
         """Parse an incoming task message and update task state.
 
         Correctly handle late (out of order) message which would otherwise set
@@ -1244,38 +1569,21 @@ class TaskProxy(object):
 
         # Log incoming messages with '>' to distinguish non-message log entries
         self.log(
-            CommandLogger.LOGGING_PRIORITY[priority],
-            '(current:' + self.state.get_status() + ')> ' + message
-        )
+            self.LOGGING_LVL_OF.get(priority, INFO),
+            '(current:' + self.state.get_status() + ')> ' + message)
         # always update the suite state summary for latest message
         self.summary['latest_message'] = message.replace(
             self.identity, "", 1).strip()
+        if msg_was_polled:
+            self.summary['latest_message'] += " (polled)"
         flags.iflag = True
 
         if self.reject_if_failed(message):
             # Failed tasks do not send messages unless declared resurrectable
             return
 
-        msg_was_polled = False
-        if message.startswith('polled '):
-            if not self.state.is_currently('submitted', 'running'):
-                # Polling can take a few seconds or more, so it is
-                # possible for a poll result to come in after a task
-                # finishes normally (success or failure) - in which case
-                # we should ignore the poll result.
-                self.log(
-                    WARNING,
-                    "Ignoring late poll result: task is not active")
-                return
-            # remove polling prefix and treat as a normal task message
-            msg_was_polled = True
-            message = message[7:]
-
-        # remove the remote event time (or "unknown-time" from polling) from
-        # the end:
-        message = self.POLL_SUFFIX_RE.sub('', message)
-
         # Remove the prepended task ID.
+        message = self.MESSAGE_SUFFIX_RE.sub('', message)
         content = message.replace(self.identity + ' ', '')
 
         # If the message matches a registered output, record it as completed.
@@ -1283,10 +1591,8 @@ class TaskProxy(object):
             if not self.outputs.is_completed(message):
                 flags.pflag = True
                 self.outputs.set_completed(message)
-                self.record_db_event(event="output completed", message=content)
-            elif content == 'started' and self.job_vacated:
-                self.job_vacated = False
-                self.log(WARNING, "Vacated job restarted: " + message)
+                self._db_events_insert(
+                    event="output completed", message=content)
             elif not msg_was_polled:
                 # This output has already been reported complete. Not an error
                 # condition - maybe the network was down for a bit. Ok for
@@ -1295,28 +1601,43 @@ class TaskProxy(object):
                     WARNING,
                     "Unexpected output (already completed):\n  " + message)
 
-        if priority == 'WARNING':
-            self.handle_event('warning', content, db_update=False)
+        if msg_was_polled and not self.is_active():
+            # Polling can take a few seconds or more, so it is
+            # possible for a poll result to come in after a task
+            # finishes normally (success or failure) - in which case
+            # we should ignore the poll result.
+            self.log(
+                WARNING,
+                "Ignoring late poll result: task is not active")
+            return
 
-        if self.event_hooks['reset timer']:
+        if priority == TaskMessage.WARNING:
+            self.setup_event_handlers('warning', content, db_update=False)
+
+        if self._get_events_conf('reset timer'):
             # Reset execution timer on incoming messages
-            execution_timeout = self.event_hooks['execution timeout']
+            execution_timeout = self._get_events_conf('execution timeout')
             if execution_timeout:
                 self.execution_timer_timeout = (
                     time.time() + execution_timeout
                 )
 
-        elif (content == 'started' and
+        elif (content == TaskMessage.STARTED and
                 self.state.is_currently(
                     'ready', 'submitted', 'submit-failed')):
+            if self.job_vacated:
+                self.job_vacated = False
+                self.log(WARNING, "Vacated job restarted: " + message)
             # Received a 'task started' message
             flags.pflag = True
             self.set_status('running')
-            self.started_time = time.time()
+            self.started_time = time.time()  # TODO: use time from message
             self.summary['started_time'] = self.started_time
             self.summary['started_time_string'] = (
                 get_time_string_from_unix_time(self.started_time))
-            execution_timeout = self.event_hooks['execution timeout']
+            self.db_updates_map[self.TABLE_TASK_JOBS].append({
+                "time_run": self.summary['started_time_string']})
+            execution_timeout = self._get_events_conf('execution timeout')
             if execution_timeout:
                 self.execution_timer_timeout = (
                     self.started_time + execution_timeout
@@ -1325,12 +1646,11 @@ class TaskProxy(object):
                 self.execution_timer_timeout = None
 
             # submission was successful so reset submission try number
-            self.sub_try_number = 1
-            self.sub_retry_delays = copy(self.sub_retry_delays_orig)
-            self.handle_event('started', 'job started')
+            self.sub_try_state.num = 0
+            self.setup_event_handlers('started', 'job started')
             self.execution_poll_timer.set_timer()
 
-        elif (content == 'succeeded' and
+        elif (content == TaskMessage.SUCCEEDED and
                 self.state.is_currently(
                     'ready', 'submitted', 'submit-failed', 'running',
                     'failed')):
@@ -1343,39 +1663,57 @@ class TaskProxy(object):
             self.summary['finished_time'] = self.finished_time
             self.summary['finished_time_string'] = (
                 get_time_string_from_unix_time(self.finished_time))
+            self.db_updates_map[self.TABLE_TASK_JOBS].append({
+                "run_status": 0,
+                "time_run_exit": self.summary['finished_time_string'],
+            })
             # Update mean elapsed time only on task succeeded.
             self.tdef.update_mean_total_elapsed_time(
                 self.started_time, self.finished_time)
             self.set_status('succeeded')
-            self.handle_event("succeeded", "job succeeded")
+            self.setup_event_handlers("succeeded", "job succeeded")
             if not self.outputs.all_completed():
-                # In case start or succeed before submitted message.
-                msg = "Assuming non-reported outputs were completed:"
+                msg = "Succeeded with unreported outputs:"
                 for key in self.outputs.not_completed:
-                    msg += "\n" + key
-                self.log(INFO, msg)
-                self.outputs.set_all_completed()
-
-        elif (content == 'failed' and
+                    msg += "\n  " + key
+                self.log(WARNING, msg)
+                if msg_was_polled:
+                    # Assume all outputs complete (e.g. poll at restart).
+                    # TODO - just poll for outputs in the job status file.
+                    self.log(WARNING, "Assuming ALL outputs completed.")
+                    self.outputs.set_all_completed()
+                else:
+                    # A succeeded task MUST have submitted and started.
+                    # TODO - just poll for outputs in the job status file?
+                    for output in [self.identity + ' submitted',
+                                   self.identity + ' started']:
+                        if not self.outputs.is_completed(output):
+                            msg = "Assuming output completed:  \n %s" % output
+                            self.log(WARNING, msg)
+                            self.outputs.set_completed(output)
+
+        elif (content == TaskMessage.FAILED and
                 self.state.is_currently(
                     'ready', 'submitted', 'submit-failed', 'running')):
             # (submit- states in case of very fast submission and execution).
             self.job_execution_failed()
 
-        elif content.startswith("Task job script received signal"):
+        elif content.startswith(TaskMessage.FAIL_MESSAGE_PREFIX):
             # capture and record signals sent to task proxy
-            self.record_db_event(event="signaled", message=content)
+            self._db_events_insert(event="signaled", message=content)
+            signal = content.replace(TaskMessage.FAIL_MESSAGE_PREFIX, "")
+            self.db_updates_map[self.TABLE_TASK_JOBS].append(
+                {"run_signal": signal})
 
-        elif content.startswith("Task job script vacated by signal"):
+        elif content.startswith(TaskMessage.VACATION_MESSAGE_PREFIX):
             flags.pflag = True
             self.set_status('submitted')
-            self.record_db_event(event="vacated", message=content)
+            self._db_events_insert(event="vacated", message=content)
             self.execution_timer_timeout = None
             # TODO - check summary item value compat with GUI:
             self.summary['started_time'] = None
             self.summary['started_time_string'] = None
-            self.sub_try_number = 0
-            self.sub_retry_delays = copy(self.sub_retry_delays_orig)
+            self.sub_try_state.num = 0
             self.job_vacated = True
 
         elif content == "submission failed":
@@ -1400,12 +1738,12 @@ class TaskProxy(object):
             flags.iflag = True
             self.log(DEBUG, '(setting:' + status + ')')
             self.state.set_status(status)
-            self.record_db_update(
-                "task_states",
-                submit_num=self.submit_num,
-                try_num=self.try_number,
-                status=status
-            )
+            self.db_updates_map[self.TABLE_TASK_STATES].append({
+                "time_updated": get_current_time_string(),
+                "submit_num": self.submit_num,
+                "try_num": self.run_try_state.num + 1,
+                "status": status
+            })
 
     def dump_state(self, handle):
         """Write state information to the state dump file."""
@@ -1435,13 +1773,18 @@ class TaskProxy(object):
         if self.tdef.is_coldstart:
             self.state.set_spawned()
         return not self.state.has_spawned() and self.state.is_currently(
-            'submitted', 'running', 'succeeded', 'failed', 'retrying')
+            'expired', 'submitted', 'running', 'succeeded', 'failed',
+            'retrying')
 
     def done(self):
         """Return True if task has succeeded and spawned."""
         return (
             self.state.is_currently('succeeded') and self.state.has_spawned())
 
+    def is_active(self):
+        """Return True if task is in "submitted" or "running" state."""
+        return self.state.is_currently('submitted', 'running')
+
     def get_state_summary(self):
         """Return a dict containing the state summary of this task proxy."""
         self.summary['state'] = self.state.get_status()
@@ -1452,14 +1795,14 @@ class TaskProxy(object):
 
     def not_fully_satisfied(self):
         """Return True if prerequisites are not fully satisfied."""
-        return (not self.prerequisites.all_satisfied() or
-                not self.suicide_prerequisites.all_satisfied())
+        return (not self.prerequisites_are_all_satisfied() or
+                not self.suicide_prerequisites_are_all_satisfied())
 
     def satisfy_me(self, task_outputs):
-        """Attempt to to satify the prerequisites of this task proxy."""
-        self.prerequisites.satisfy_me(task_outputs)
-        if self.suicide_prerequisites.count() > 0:
-            self.suicide_prerequisites.satisfy_me(task_outputs)
+        """Attempt to get my prerequisites satisfied."""
+        for preqs in [self.prerequisites, self.suicide_prerequisites]:
+            for preq in preqs:
+                preq.satisfy_me(task_outputs)
 
     def next_point(self):
         """Return the next cycle point."""
@@ -1474,82 +1817,71 @@ class TaskProxy(object):
             p_next = min(adjusted)
         return p_next
 
-    def poll(self):
-        """Poll my live task job and update status accordingly."""
-        return self._manip_job_status("job-poll", self.job_poll_callback)
+    def _create_job_log_path(self, new_mode=False):
+        """Return a new job log path on the suite host, in two parts.
 
-    def kill(self):
-        """Kill current job of this task."""
-        self.reset_state_held()
-        return self._manip_job_status(
-            "job-kill", self.job_kill_callback, ['running', 'submitted'])
+        /part1/part2
 
-    def _manip_job_status(self, cmd_key, callback, ok_states=None):
-        """Manipulate the job status, e.g. poll or kill."""
-        # No real jobs in simulation mode.
-        if self.tdef.run_mode == 'simulation':
-            if cmd_key == 'job-kill':
-                self.reset_state_failed()
-            return
-        # Check that task states are compatible with the manipulation
-        if ok_states and not self.state.is_currently(*ok_states):
-            self.log(
-                WARNING,
-                'Can only do %s when in %s states' % (cmd_key, str(ok_states)))
-            return
-        # No submit method ID: should not happen
-        if not self.submit_method_id:
-            self.log(CRITICAL, 'No submit method ID')
-            return
-        # Detached tasks
-        if self.tdef.rtconfig['manual completion']:
-            self.log(
-                WARNING,
-                "Cannot %s detaching tasks (job ID unknown)" % (cmd_key))
-            return
+        * part1: the top level job log directory on the suite host.
+        * part2: the rest, which is also used on remote task hosts.
 
-        # Ensure settings are ready for manipulation on suite restart, etc
-        if self.job_conf is None:
-            self._prepare_manip()
+        The full local job log directory is created if necessary, and its
+        parent symlinked to NN (submit number).
 
-        # Invoke the manipulation
-        return self._run_job_command(
-            CMD_TYPE_JOB_POLL_KILL,
-            cmd_key,
-            args=[self.job_conf["job file path"] + ".status"],
-            callback=callback)
+        """
+
+        suite_job_log_dir = GLOBAL_CFG.get_derived_host_item(
+            self.suite_name, "suite job log directory")
 
-    def _run_job_command(
-            self, cmd_type, cmd_key, args, callback, is_bg_submit=None,
-            stdin_file_path=None):
-        """Run a job command, e.g. submit, poll, kill, etc.
+        the_rest_dir = os.path.join(
+            str(self.point), self.tdef.name, "%02d" % int(self.submit_num))
+        the_rest = os.path.join(the_rest_dir, "job")
+
+        local_log_dir = os.path.join(suite_job_log_dir, the_rest_dir)
+
+        if new_mode:
+            try:
+                rmtree(local_log_dir)
+            except OSError:
+                pass
+
+        mkdir_p(local_log_dir)
+        target = os.path.join(os.path.dirname(local_log_dir), "NN")
+        try:
+            os.unlink(target)
+        except OSError:
+            pass
+        try:
+            os.symlink(os.path.basename(local_log_dir), target)
+        except OSError as exc:
+            if not exc.filename:
+                exc.filename = target
+            raise exc
+        return suite_job_log_dir, the_rest
+
+    def _run_job_command(self, cmd_key, args, callback, stdin_file_paths=None):
+        """Help for self.submit.
 
         Run a job command with the multiprocess pool.
 
         """
-        if self.user_at_host in [user + '@localhost', 'localhost']:
-            cmd = ["cylc", cmd_key] + list(args)
-        else:  # if it is a remote job
-            ssh_tmpl = GLOBAL_CFG.get_host_item(
-                'remote shell template',
-                self.task_host,
-                self.task_owner).replace(" %s", "")
-            r_cylc = GLOBAL_CFG.get_host_item(
-                'cylc executable', self.task_host, self.task_owner)
-            sh_tmpl = "CYLC_VERSION='%s' "
-            if GLOBAL_CFG.get_host_item(
-                    'use login shell', self.task_host, self.task_owner):
-                sh_tmpl += "bash -lc 'exec \"$0\" \"$@\"' \"%s\" '%s'"
-            else:
-                sh_tmpl += "\"%s\" '%s'"
-            sh_cmd = sh_tmpl % (os.environ['CYLC_VERSION'], r_cylc, cmd_key)
-            if stdin_file_path:
-                sh_cmd += " --remote-mode"
-            for arg in args:
-                sh_cmd += ' "%s"' % (arg)
-            cmd = shlex.split(ssh_tmpl) + [str(self.user_at_host), sh_cmd]
+        cmd = ["cylc", cmd_key]
+        if cylc.flags.debug:
+            cmd.append("--debug")
+        remote_mode = False
+        for key, value, test_func in [
+                ('host', self.task_host, is_remote_host),
+                ('user', self.task_owner, is_remote_user)]:
+            if test_func(value):
+                cmd.append('--%s=%s' % (key, value))
+                remote_mode = True
+        if remote_mode:
+            cmd.append('--remote-mode')
+        cmd.append("--")
+        cmd += list(args)
 
         # Queue the command for execution
         self.log(INFO, "job(%02d) initiate %s" % (self.submit_num, cmd_key))
-        return SuiteProcPool.get_inst().put_command(
-            cmd_type, cmd, callback, is_bg_submit, stdin_file_path)
+        ctx = SuiteProcContext(
+            cmd_key, cmd, stdin_file_paths=stdin_file_paths)
+        return SuiteProcPool.get_inst().put_command(ctx, callback)
diff --git a/lib/cylc/task_receiver.py b/lib/cylc/task_receiver.py
deleted file mode 100644
index 5a2ee90..0000000
--- a/lib/cylc/task_receiver.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-import Pyro.core
-from Queue import Queue
-
-class msgqueue( Pyro.core.ObjBase ):
-    """Pyro-connected class to queue incoming task messages"""
-
-    def __init__( self ):
-        Pyro.core.ObjBase.__init__(self)
-        self.queue = Queue()
-
-    def put( self, priority, message ):
-        res = ( True, 'Message queued' )
-        # TODO - check for legal logging priority?
-        # queue incoming messages for this task
-        self.queue.put( (priority, message) )
-        return res
-
-    def get_queue( self ):
-        return self.queue
diff --git a/lib/cylc/task_state.py b/lib/cylc/task_state.py
index 8bb87a1..1f8a3ef 100644
--- a/lib/cylc/task_state.py
+++ b/lib/cylc/task_state.py
@@ -27,12 +27,15 @@ monitoring. It indicates a task in the runahead pool - which is technically
 'waiting' but inactive.
 """
 
-class TaskStateError( Exception ):
-    def __init__( self, msg ):
+
+class TaskStateError(Exception):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
+
+    def __str__(self):
         return repr(self.msg)
 
+
 class task_state(object):
 
     legal = [
@@ -40,6 +43,7 @@ class task_state(object):
         'held',
         'queued',
         'ready',
+        'expired',
         'submitted',
         'submit-failed',
         'submit-retrying',
@@ -60,21 +64,23 @@ class task_state(object):
     ]
 
     legal_for_trigger = {
-        'submit' : 'submitted',
-        'submit-fail' : 'submit-failed',
-        'start' : 'started',
-        'succeed' : 'succeeded',
-        'fail' : 'failed'
+        'expire': 'expired',
+        'submit': 'submitted',
+        'submit-fail': 'submit-failed',
+        'start': 'started',
+        'succeed': 'succeeded',
+        'fail': 'failed'
     }
 
     legal_for_restricted_monitoring = [
+        'expired',
         'submitted',
         'submit-failed',
         'submit-retrying',
         'running',
         'failed',
         'retrying'
-    ] 
+    ]
 
     @classmethod
     def is_legal(cls, state):
@@ -84,135 +90,142 @@ class task_state(object):
     def get_legal_trigger_state(cls, str):
         if str in cls.legal_for_trigger.values():
             return str
-        elif str in cls.legal_for_trigger.keys(): 
+        elif str in cls.legal_for_trigger.keys():
             return cls.legal_for_trigger[str]
         else:
             raise TaskStateError("Illegal trigger state: %s" % str)
 
     # GUI button labels
     labels = {
-            'waiting'    : '_waiting',
-            'queued'     : '_queued',
-            'ready'      : 'rea_dy',
-            'submitted'  : 'sub_mitted',
-            'submit-failed' : 'submit-f_ailed',
-            'submit-retrying' : 'submit-retryin_g',
-            'running'    : '_running',
-            'succeeded'  : '_succeeded',
-            'failed'     : '_failed',
-            'retrying'   : 'retr_ying',
-            'held'       : '_held',
-            'runahead'   : 'r_unahead',
-            }
+        'waiting': '_waiting',
+        'queued': '_queued',
+        'ready': 'rea_dy',
+        'expired': 'e_xpired',
+        'submitted': 'sub_mitted',
+        'submit-failed': 'submit-f_ailed',
+        'submit-retrying': 'submit-retryin_g',
+        'running': '_running',
+        'succeeded': '_succeeded',
+        'failed': '_failed',
+        'retrying': 'retr_ying',
+        'held': '_held',
+        'runahead': 'r_unahead',
+    }
     # terminal monitor color control codes
     ctrl = {
-            'waiting'    : "\033[1;36m",
-            'queued'     : "\033[1;38;44m",
-            'ready'      : "\033[1;32m",
-            'submitted'  : "\033[1;33m",
-            'submit-failed' : "\033[1;34m",
-            'submit-retrying'   : "\033[1;31m",
-            'running'    : "\033[1;37;42m",
-            'succeeded'  : "\033[0m",
-            'failed'     : "\033[1;37;41m",
-            'retrying'   : "\033[1;35m",
-            'held'       : "\033[1;37;43m",
-            'runahead'   : "\033[1;37;44m",
-            }
+        'waiting': "\033[1;36m",
+        'queued': "\033[1;38;44m",
+        'ready': "\033[1;32m",
+        'expired': "\033[1;37;40m",
+        'submitted': "\033[1;33m",
+        'submit-failed': "\033[1;34m",
+        'submit-retrying': "\033[1;31m",
+        'running': "\033[1;37;42m",
+        'succeeded': "\033[0m",
+        'failed': "\033[1;37;41m",
+        'retrying': "\033[1;35m",
+        'held': "\033[1;37;43m",
+        'runahead': "\033[1;37;44m",
+    }
 
     ctrl_end = "\033[0m"
 
     # Internal to this class spawned state is a string
-    allowed_bool = [ 'true', 'false' ]
+    allowed_bool = ['true', 'false']
 
-    def __init__( self, initial_state ):
+    def __init__(self, initial_state):
 
         self.state = {}
 
         if not initial_state:
             # defaults
-            self.state[ 'status' ] = 'waiting'
-            self.state[ 'spawned' ] = 'false'
+            self.state['status'] = 'waiting'
+            self.state['spawned'] = 'false'
         else:
             # could be a state dump file entry
             # or a raw string ('waiting' etc.)
-            self.state = self.parse( initial_state )
+            self.state = self.parse(initial_state)
             self.check()
 
-    def set_status( self, state ):
-        if self.__class__.is_legal( state ):
-            self.state[ 'status' ] = state
+    def set_status(self, state):
+        if self.__class__.is_legal(state):
+            self.state['status'] = state
 
-    def get_status( self ):
-        return self.state[ 'status' ]
+    def get_status(self):
+        return self.state['status']
 
-    def set_spawned( self ):
-        self.state[ 'spawned' ] = 'true'
+    def set_spawned(self):
+        self.state['spawned'] = 'true'
 
-    def set_unspawned( self ):
-        self.state[ 'spawned' ] = 'false'
+    def set_unspawned(self):
+        self.state['spawned'] = 'false'
 
-    def has_spawned( self ):
-        return self.state[ 'spawned' ] == 'true'
+    def has_spawned(self):
+        return self.state['spawned'] == 'true'
 
-    def is_currently( self, *states ):
+    def is_currently(self, *states):
         """Return true if current state matches any state in states."""
-        return self.state[ 'status' ] in states
+        return self.state['status'] in states
 
     # generic set for special dumpable state required by some tasks.
-    def set( self, item, value ):
-        self.state[ item ] = value
+    def set(self, item, value):
+        self.state[item] = value
 
     # generic get for special dumpable state required by some tasks.
-    def get( self, item ):
-        return self.state[ item ]
+    def get(self, item):
+        return self.state[item]
 
-    def check( self ):
+    def check(self):
         # check compulsory items have been defined correctly
         if 'status' not in self.state:
-            raise TaskStateError, 'ERROR, run status not defined'
-        if not self.__class__.is_legal( self.state[ 'status' ] ):
-            raise TaskStateError, 'ERROR, illegal run status: ' + str( self.state[ 'status' ])
+            raise TaskStateError('ERROR, run status not defined')
+        if not self.__class__.is_legal(self.state['status']):
+            raise TaskStateError(
+                'ERROR, illegal run status: ' + str(self.state['status']))
 
         if 'spawned' not in self.state:
-            raise TaskStateError, 'ERROR, task spawned status not defined'
-        if self.state[ 'spawned' ] not in [ 'true', 'false' ]:
-            raise TaskStateError, 'ERROR, illegal task spawned status: ' + str( self.state[ 'spawned' ])
-            sys.exit(1)
+            raise TaskStateError('ERROR, task spawned status not defined')
+        if self.state['spawned'] not in ['true', 'false']:
+            raise TaskStateError(
+                'ERROR, illegal task spawned status: ' +
+                str(self.state['spawned']))
 
-    def dump( self ):
+    def dump(self):
         # format: 'item1=value1, item2=value2, ...'
         result = ''
         for key in self.state:
-            result += key + '=' + str( self.state[ key ] ) + ', '
-        result = result.rstrip( ', ' )
+            result += key + '=' + str(self.state[key]) + ', '
+        result = result.rstrip(', ')
         return result
 
-    def parse( self, input ):
+    def parse(self, input):
         state = {}
 
         if self.__class__.is_legal(input):
-            state[ 'status' ] = input
+            state['status'] = input
             # ASSUME THAT ONLY succeeded TASKS, AT STARTUP, HAVE spawned
             # (in fact this will only be used to start tasks in 'waiting')
             if input == 'succeeded':
-                state[ 'spawned' ] = 'true'
+                state['spawned'] = 'true'
             else:
-                state[ 'spawned' ] = 'false'
+                state['spawned'] = 'false'
 
         else:
             # reconstruct state from a dumped state string
-            pairs = input.split( ', ' )
+            pairs = input.split(', ')
             for pair in pairs:
-                [ item, value ] = pair.split( '=' )
-                if item not in [ 'status', 'spawned' ]:
-                    raise TaskStateError, 'ERROR, illegal task status key: ' + item
-                if item == 'status' :
-                    if not self.__class__.is_legal( value ):
-                        raise TaskStateError, 'ERROR, illegal task state: ' + value
-                elif item == 'spawned' :
-                    if value not in [ 'true', 'false' ]:
-                        raise TaskStateError, 'ERROR, illegal task spawned status: ' + value
-                state[ item ] = value
+                [item, value] = pair.split('=')
+                if item not in ['status', 'spawned']:
+                    raise TaskStateError(
+                        'ERROR, illegal task status key: ' + item)
+                if item == 'status':
+                    if not self.__class__.is_legal(value):
+                        raise TaskStateError(
+                            'ERROR, illegal task state: ' + value)
+                elif item == 'spawned':
+                    if value not in ['true', 'false']:
+                        raise TaskStateError(
+                            'ERROR, illegal task spawned status: ' + value)
+                state[item] = value
 
         return state
diff --git a/lib/cylc/taskdef.py b/lib/cylc/taskdef.py
index d2a8015..3abe9e5 100644
--- a/lib/cylc/taskdef.py
+++ b/lib/cylc/taskdef.py
@@ -15,15 +15,8 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""Task definition.
 
-NOTE on conditional and non-conditional triggers: all plain triggers
-(for a single task) are held in a single prerequisite object; but one
-such object is held for each conditional trigger. This has
-implications for global detection of duplicated prerequisites
-(detection is currently disabled).
-
-"""
+"""Task definition."""
 
 from cylc.cycling.loader import get_point_relative, get_interval
 from cylc.task_id import TaskID
@@ -37,7 +30,7 @@ class TaskDefError(Exception):
         self.msg = msg
 
     def __str__(self):
-        return "ERROR: %s" % self.msg 
+        return "ERROR: %s" % self.msg
 
 
 class TaskDef(object):
@@ -63,30 +56,22 @@ class TaskDef(object):
         self.suite_polling_cfg = {}
 
         self.clocktrigger_offset = None
+        self.expiration_offset = None
         self.namespace_hierarchy = []
-        # triggers[0,6] = [ A, B:1, C(T-6), ... ]
         self.triggers = {}
-        # cond[6,18] = [ '(A & B)|C', 'C | D | E', ... ]
-        self.cond_triggers = {}
-        # list of explicit internal outputs; change to dict if need to vary per
-        # cycle.
         self.outputs = []
 
+        self.external_triggers = []
+
         self.name = name
         self.elapsed_times = []
         self.mean_total_elapsed_time = None
 
-    def add_trigger(self, trigger, sequence):
-        """Add trigger to a named sequence."""
+    def add_trigger(self, triggers, expression, sequence):
+        """Add triggers to a named sequence."""
         if sequence not in self.triggers:
             self.triggers[sequence] = []
-        self.triggers[sequence].append(trigger)
-
-    def add_conditional_trigger(self, triggers, exp, sequence):
-        """Add conditional trigger to a named sequence."""
-        if sequence not in self.cond_triggers:
-            self.cond_triggers[sequence] = []
-        self.cond_triggers[sequence].append([triggers, exp])
+        self.triggers[sequence].append([triggers, expression])
 
     def add_sequence(self, sequence, is_implicit=False):
         """Add a sequence."""
@@ -108,7 +93,8 @@ class TaskDef(object):
         Must be called after all graph sequences added.
         """
         if len(self.sequences) == 0 and self.used_in_offset_trigger:
-            raise TaskDefError("No cycling sequences defined for %s" % self.name)
+            raise TaskDefError(
+                "No cycling sequences defined for %s" % self.name)
 
     @classmethod
     def get_cleanup_cutoff_point(cls, my_point, offset_sequence_tuples):
diff --git a/lib/cylc/time_parser.py b/lib/cylc/time_parser.py
index 3a66677..b8a6792 100644
--- a/lib/cylc/time_parser.py
+++ b/lib/cylc/time_parser.py
@@ -73,8 +73,7 @@ class CylcTimeParser(object):
 
     POINT_INVALID_FOR_CYLC_REGEXES = [
         (r"^\d\d$", ("2 digit centuries not allowed. " +
-                     "Did you mean T-digit-digit e.g. 'T00'?")
-        )
+                     "Did you mean T-digit-digit e.g. 'T00'?"))
     ]
 
     RECURRENCE_FORMAT_REGEXES = [
@@ -121,9 +120,9 @@ class CylcTimeParser(object):
                 dump_format = u"+XCCYYMMDDThhmmZ"
             else:
                 dump_format = "CCYYMMDDThhmmZ"
-            
+
         self.timepoint_parser = isodatetime.parsers.TimePointParser(
-            allow_only_basic=False, # TODO - Ben: why was this set True
+            allow_only_basic=False,  # TODO - Ben: why was this set True
             allow_truncated=True,
             num_expanded_year_digits=num_expanded_year_digits,
             dump_format=dump_format,
@@ -148,7 +147,7 @@ class CylcTimeParser(object):
         self.context_end_point = context_end_point
         self.duration_parser = isodatetime.parsers.DurationParser()
         self.recurrence_parser = isodatetime.parsers.TimeRecurrenceParser(
-                        timepoint_parser=self.timepoint_parser)
+            timepoint_parser=self.timepoint_parser)
 
     def parse_interval(self, expr):
         """Parse an interval (duration) in full ISO date/time format."""
@@ -169,8 +168,7 @@ class CylcTimeParser(object):
         if context_point is None:
             context_point = self.context_start_point
         point, offset = self._get_point_from_expression(
-                                                  expr, context_point,
-                                                  allow_truncated=True)
+            expr, context_point, allow_truncated=True)
         if point is not None:
             if point.truncated:
                 point += context_point
@@ -178,8 +176,8 @@ class CylcTimeParser(object):
                 point += offset
             return point
         raise CylcTimeSyntaxError(
-                    ("'%s': not a valid cylc-shorthand or full " % expr) +
-                     "ISO 8601 date representation")
+            ("'%s': not a valid cylc-shorthand or full " % expr) +
+            "ISO 8601 date representation")
 
     def parse_recurrence(self, expression,
                          context_start_point=None,
@@ -223,7 +221,7 @@ class CylcTimeParser(object):
             if end_point is not None and end_point.truncated:
                 intv_context_truncated_point = end_point
             interval = self._get_interval_from_expression(
-                             intv, context=intv_context_truncated_point)
+                intv, context=intv_context_truncated_point)
             if format_num == 1:
                 interval = None
             if repetitions == 1:
@@ -241,8 +239,8 @@ class CylcTimeParser(object):
                     end_point += end_offset
 
             if (start_point is None and repetitions is None and
-                   interval is not None and
-                   context_start_point is not None):
+                    interval is not None and
+                    context_start_point is not None):
                 # isodatetime only reverses bounded end-point recurrences.
                 # This is unbounded, and will come back in reverse order.
                 # We need to reverse it.
@@ -258,8 +256,8 @@ class CylcTimeParser(object):
                 start_point=start_point,
                 duration=interval,
                 end_point=end_point
-            )         
-            
+            )
+
         raise CylcTimeSyntaxError("Could not parse %s" % expression)
 
     def _get_interval_from_expression(self, expr, context=None):
@@ -295,8 +293,7 @@ class CylcTimeParser(object):
         min_entry = ""
         for point in points:
             cpoint, offset = self._get_point_from_expression(
-                                              point, context,
-                                              allow_truncated=True)
+                point, context, allow_truncated=True)
             if cpoint is not None:
                 if cpoint.truncated:
                     cpoint += context
@@ -327,7 +324,7 @@ class CylcTimeParser(object):
             chain_expr = re.findall(self.CHAIN_REGEX, expr)
             expr = ""
             for item in chain_expr:
-                if not "P" in item:
+                if "P" not in item:
                     expr += item
                     continue
                 split_expr = self._offset_rec.split(item)
@@ -335,8 +332,7 @@ class CylcTimeParser(object):
                 if split_expr[1] == "+":
                     split_expr.pop(1)
                 expr_offset_item = "".join(split_expr[1:])
-                expr_offset_item = self.duration_parser.parse(
-                                                item[1:])
+                expr_offset_item = self.duration_parser.parse(item[1:])
                 if item[0] == "-":
                     expr_offset_item *= -1
                 if not expr_offset:
@@ -371,8 +367,8 @@ class CylcTimeParser(object):
                             continue
                         return expr_point, expr_offset
         raise CylcTimeSyntaxError(
-                  ("'%s': not a valid cylc-shorthand or full " % expr) +
-                  "ISO 8601 date representation")
+            ("'%s': not a valid cylc-shorthand or full " % expr) +
+            "ISO 8601 date representation")
 
 
 class TestRecurrenceSuite(unittest.TestCase):
@@ -382,7 +378,7 @@ class TestRecurrenceSuite(unittest.TestCase):
     def setUp(self):
         self._start_point = "19991226T0930Z"
         # Note: the following timezone will be Z-ified *after* truncation
-        # or offsets are applied. 
+        # or offsets are applied.
         self._end_point = "20010506T1200+0200"
         self._parsers = {
             0: CylcTimeParser(
@@ -525,8 +521,7 @@ class TestRecurrenceSuite(unittest.TestCase):
 
     def test_inter_cycle_timepoints(self):
         """Test the inter-cycle point parsing."""
-        task_cycle_time = self._parsers[0].parse_timepoint(
-                                "20000101T00Z")
+        task_cycle_time = self._parsers[0].parse_timepoint("20000101T00Z")
         tests = [("T06", "20000101T0600Z", 0),
                  ("-PT6H", "19991231T1800Z", 0),
                  ("+P5Y2M", "20050301T0000Z", 0),
@@ -539,8 +534,8 @@ class TestRecurrenceSuite(unittest.TestCase):
         for expression, ctrl_data, num_expanded_year_digits in tests:
             parser = self._parsers[num_expanded_year_digits]
             test_data = str(parser.parse_timepoint(
-                                  expression,
-                                  context_point=task_cycle_time))
+                expression,
+                context_point=task_cycle_time))
             self.assertEqual(test_data, ctrl_data)
 
     def test_interval(self):
diff --git a/lib/cylc/trigger.py b/lib/cylc/trigger.py
index 2caceaf..3ec66f4 100644
--- a/lib/cylc/trigger.py
+++ b/lib/cylc/trigger.py
@@ -29,11 +29,13 @@ BACK_COMPAT_MSG_RE = re.compile('^(.*)\[\s*T\s*(([+-])\s*(\d+))?\s*\](.*)$')
 MSG_RE = re.compile('^(.*)\[\s*(([+-])?\s*(.*))?\s*\](.*)$')
 
 
-class TriggerError( Exception ):
-    def __init__( self, msg ):
+class TriggerError(Exception):
+    def __init__(self, msg):
         self.msg = msg
-    def __str__( self ):
-        return repr( self.msg )
+
+    def __str__(self):
+        return repr(self.msg)
+
 
 class trigger(object):
     """
@@ -79,10 +81,9 @@ Task triggers, used to generate prerequisite messages.
         try:
             msg = outputs[qualifier]
         except KeyError:
-            raise TriggerError, (
-                    "ERROR: undefined trigger qualifier: %s:%s" % (
-                        task_name, qualifier)
-            )
+            raise TriggerError(
+                "ERROR: undefined trigger qualifier: %s:%s" % (
+                    task_name, qualifier))
         else:
             # Back compat for [T+n] in message string.
             m = re.match(BACK_COMPAT_MSG_RE, msg)
@@ -90,7 +91,8 @@ Task triggers, used to generate prerequisite messages.
             if m:
                 prefix, signed_offset, sign, offset, suffix = m.groups()
                 if offset:
-                    msg_offset = base_interval.get_inferred_child(signed_offset)
+                    msg_offset = base_interval.get_inferred_child(
+                        signed_offset)
                 else:
                     msg_offset = get_interval_cls().get_null()
             else:
@@ -102,17 +104,16 @@ Task triggers, used to generate prerequisite messages.
                     else:
                         msg_offset = get_interval_cls().get_null()
                 else:
-                    raise TriggerError, (
-                            "ERROR: undefined trigger qualifier: %s:%s" % (
-                                task_name, qualifier)
-                    )
+                    raise TriggerError(
+                        "ERROR: undefined trigger qualifier: %s:%s" % (
+                            task_name, qualifier))
             self.message = msg
             self.message_offset = msg_offset
 
     def is_standard(self):
         return self.builtin is not None
 
-    def get(self, point):
+    def get_prereq(self, point):
         """Return a prerequisite string and the relevant point."""
         if self.message:
             # Message trigger
@@ -123,9 +124,8 @@ Task triggers, used to generate prerequisite messages.
                 if self.message_offset:
                     point += self.message_offset
                 if self.graph_offset_string:
-                    point = get_point_relative(
-                            self.graph_offset_string, point)
-            preq = re.sub( '\[.*\]', str(point), preq )
+                    point = get_point_relative(self.graph_offset_string, point)
+            preq = re.sub('\[.*\]', str(point), preq)
         else:
             # Built-in trigger
             if self.cycle_point:
diff --git a/lib/cylc/which.py b/lib/cylc/which.py
index 209bea0..3971e6a 100644
--- a/lib/cylc/which.py
+++ b/lib/cylc/which.py
@@ -20,6 +20,7 @@
 
 import os
 
+
 def which(program):
     def is_exe(fpath):
         return os.path.exists(fpath) and os.access(fpath, os.X_OK)
diff --git a/lib/parsec/Jinja2Support.py b/lib/parsec/Jinja2Support.py
index a8a5bb5..07572e1 100644
--- a/lib/parsec/Jinja2Support.py
+++ b/lib/parsec/Jinja2Support.py
@@ -21,10 +21,7 @@ import glob
 from jinja2 import (
         Environment,
         FileSystemLoader,
-        TemplateSyntaxError,
         TemplateError,
-        TemplateNotFound,
-        UndefinedError,
         StrictUndefined)
 import cylc.flags
 
@@ -44,7 +41,7 @@ def load_template_vars( pairs, pairs_file ):
                 if re.match( '^\s*$', line ):
                     # skip blank lines:
                     continue
-                var, val = line.split('=')
+                var, val = line.split('=', 1)
                 var = var.strip()
                 val = val.strip()
                 res[var] = val
@@ -52,7 +49,7 @@ def load_template_vars( pairs, pairs_file ):
         else:
             raise TemplateError, "ERROR: template vars file not found: " + pairs_file
     for i in pairs:
-        var, val = i.split('=')
+        var, val = i.split('=', 1)
         var = var.strip()
         val = val.strip()
         res[var] = val
@@ -82,7 +79,7 @@ def Jinja2Process( flines, dir, inputs=[], inputs_file=None ):
     for filterdir in usedfdirs:
         sys.path.append( os.path.abspath( filterdir ))
         for f in glob.glob( os.path.join( filterdir, '*.py' )):
-            fname = os.path.basename( f ).rstrip( '.py' )
+            fname = os.path.splitext(os.path.basename(f))[0]
             # TODO - EXCEPTION HANDLING FOR LOADING CUSTOM FILTERS
             m = __import__( fname )
             env.filters[ fname ] = getattr( m, fname )
@@ -96,14 +93,16 @@ def Jinja2Process( flines, dir, inputs=[], inputs_file=None ):
     # CALLERS SHOULD HANDLE JINJA2 TEMPLATESYNTAXERROR AND TEMPLATEERROR
     # try:
     template = env.from_string( '\n'.join(flines[1:]) )
-    # except Exception, x:
+    # except Exception as exc:
     #     # This happens if we use an unknown Jinja2 filter, for example.
     ##     # TODO - THIS IS CAUGHT BY VALIDATE BUT NOT BY VIEW COMMAND...
-    #     raise TemplateError( x )
+    #     raise TemplateError(exc)
     try:
         template_vars = load_template_vars( inputs, inputs_file )
-    except Exception, x:
-        raise TemplateError( x )
+    except Exception as exc:
+        if isinstance(exc, TemplateError):
+            raise
+        raise TemplateError(exc)
 
     # CALLERS SHOULD HANDLE JINJA2 TEMPLATESYNTAXERROR AND TEMPLATEERROR
     # AND TYPEERROR (e.g. for not using "|int" filter on number inputs.
diff --git a/lib/parsec/OrderedDict.py b/lib/parsec/OrderedDict.py
index d959d35..2e5ea5a 100644
--- a/lib/parsec/OrderedDict.py
+++ b/lib/parsec/OrderedDict.py
@@ -18,6 +18,7 @@
 
 """Ordered Dictionary data structure used extensively in cylc."""
 
+
 try:
     # first try the fast ordereddict C implementation.
     # DOWNLOAD: http://anthon.home.xs4all.nl/Python/ordereddict/
@@ -32,3 +33,88 @@ except ImportError:
         # then try the pre-2.7 backport from ActiveState
         # (packaged with cylc)
         from OrderedDictCompat import OrderedDict
+
+
+class OrderedDictWithDefaults(OrderedDict):
+
+    """Subclass to provide defaults fetching capability.
+
+    Note that defining a '__missing__' method would work for foo[key],
+    but doesn't for foo.get(key).
+
+    """
+
+    def __init__(self, *args, **kwargs):
+        """Allow a defaults argument."""
+        self._allow_contains_default = True
+        super(OrderedDictWithDefaults, self).__init__(*args, **kwargs)
+
+    def __getitem__(self, key):
+        """Override to look in our special .defaults attribute, if it exists."""
+        try:
+            return OrderedDict.__getitem__(self, key)
+        except KeyError:
+            if hasattr(self, 'defaults_'):
+                return self.defaults_[key]
+            raise
+
+    def __setitem__(self, *args, **kwargs):
+        """Make sure that we don't set the default value!"""
+        self._allow_contains_default = False
+        return_value = OrderedDict.__setitem__(self, *args, **kwargs)
+        self._allow_contains_default = True
+        return return_value
+
+    def keys(self):
+        """Include the default keys, after the list of actually-set ones."""
+        keys = list(self)
+        for key in getattr(self, 'defaults_', []):
+            if key not in keys:
+                keys.append(key)
+        return keys
+
+    def values(self):
+        """Return a list of values, including default ones."""
+        return [self[key] for key in self.keys()]
+
+    def items(self):
+        """Return key-value pairs, including default ones."""
+        return [(key, self[key]) for key in self.keys()]
+
+    def iterkeys(self):
+        """Include default keys - no memory saving over .keys()."""
+        for k in self.keys():
+            yield k
+
+    def itervalues(self):
+        """Include default values - no memory saving over .values()."""
+        for k in self.keys():
+            yield self[k]
+
+    def iteritems(self):
+        """Include default key-value pairs - no memory saving over .items()"""
+        for k in self.keys():
+            yield (k, self[k])
+
+    def __contains__(self, key):
+        if self._allow_contains_default:
+            if key in getattr(self, "defaults_", {}):
+                return True
+        return OrderedDict.__contains__(self, key)
+
+    def __nonzero__(self):
+        """Include any default keys in the nonzero calculation."""
+        return bool(self.keys())
+
+    def __repr__(self):
+        """User-friendly-ish representation of defaults and others."""
+        non_default_items = []
+        non_default_keys = list(self)
+        for key in non_default_keys:
+            non_default_items.append((key, self[key]))
+        default_items = []
+        for key in getattr(self, 'defaults_', []):
+            if key not in non_default_keys:
+                default_items.append((key, self[key]))
+        repr_map = {"": non_default_items, "defaults_": default_items}
+        return "<" + type(self).__name__ + "(" + repr(repr_map) + ")>\n"
diff --git a/lib/parsec/__init__.py b/lib/parsec/__init__.py
index e69de29..bc1783e 100644
--- a/lib/parsec/__init__.py
+++ b/lib/parsec/__init__.py
@@ -0,0 +1,6 @@
+
+class ParsecError(Exception):
+    def __init__(self, msg):
+        self.msg = msg
+    def __str__(self):
+        return str(self.msg)
diff --git a/lib/parsec/config.py b/lib/parsec/config.py
index f155fa8..b4ee44a 100755
--- a/lib/parsec/config.py
+++ b/lib/parsec/config.py
@@ -17,27 +17,25 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os, sys, re
+from parsec import ParsecError
 from parsec.fileparse import parse, FileNotFoundError
 from parsec.util import printcfg
 from parsec.validate import validate, check_compulsory, expand, validator
-from parsec.OrderedDict import OrderedDict
+from parsec.OrderedDict import OrderedDictWithDefaults
 from parsec.util import replicate, itemstr
 from parsec.upgrade import UpgradeError
 import cylc.flags
 
-class ParsecError( Exception ):
-    def __init__( self, msg ):
-        self.msg = msg
-    def __str__( self ):
-        return repr(self.msg)
 
-class ItemNotFoundError( ParsecError ):
-    def __init__( self, msg ):
-        self.msg = 'ERROR: item not found: ' + msg
+class ItemNotFoundError(ParsecError):
+    def __init__(self, msg):
+        self.msg = 'ERROR: item not found: %s' % msg
+
+
+class NotSingleItemError(ParsecError):
+    def __init__(self, msg):
+        self.msg = 'ERROR: not a singular item: %s' % msg
 
-class NotSingleItemError( ParsecError ):
-    def __init__( self, msg ):
-        self.msg = 'ERROR: not a singular item: ' + msg
 
 class config( object ):
     "Object wrapper for parsec functions"
@@ -45,8 +43,8 @@ class config( object ):
     def __init__( self, spec, upgrader=None, write_proc=False,
             tvars=[], tvars_file=None ):
 
-        self.sparse = OrderedDict()
-        self.dense = OrderedDict()
+        self.sparse = OrderedDictWithDefaults()
+        self.dense = OrderedDictWithDefaults()
         self.upgrader = upgrader
         self.tvars = tvars
         self.tvars_file = tvars_file
@@ -62,48 +60,31 @@ class config( object ):
                 self.checkspec( value, pars )
             else:
                 if not isinstance( value, validator ):
-                    raise ParsecError( "Illegal file spec item: " + itemstr( pars, repr(value)) )
+                    raise ParsecError(
+                        "Illegal file spec item: %s" % itemstr(
+                            pars, repr(value)))
 
 
-    def loadcfg( self, rcfile, title="", strict=False, silent=False ):
+    def loadcfg(self, rcfile, title=""):
         """Parse a config file, upgrade or deprecate items if necessary,
         validate it against the spec, and if this is not the first load,
         combine/override with the existing loaded config."""
-        try:
-            sparse = parse( rcfile, write_proc=self.write_proc,
-                template_vars=self.tvars, template_vars_file=self.tvars_file )
-        except Exception, x:
-            if strict:
-                raise
-            if not silent or cylc.flags.verbose:
-                # no global.rc file, for instance, is not really an error.
-                print >> sys.stderr, x
-                print >> sys.stderr, "WARNING: " + title + " parsing failed (continuing)"
-        else:
-            # upgrade deprecated items if necessary
-            # (before validation, else validation will fail)
-            if self.upgrader is not None:
-                try:
-                    self.upgrader( sparse, title )
-                except UpgradeError, x:
-                    print >> sys.stderr, x
-                    print >> sys.stderr, "WARNING: " + title + " upgrade error, validation may fail"
 
-            try:
-                self.validate( sparse )
-            except Exception, x:
-                if strict:
-                    raise
-                if cylc.flags.verbose:
-                    print >> sys.stderr, x
-                    print >> sys.stderr, "WARNING: " + title + " validation failed"
+        sparse = parse(
+                rcfile, write_proc=self.write_proc,
+                template_vars=self.tvars,
+                template_vars_file=self.tvars_file)
 
-            else:
-                if not self.sparse:
-                    self.sparse = sparse
-                else:
-                    # already loaded, this must be an override
-                    replicate( self.sparse, sparse )
+        if self.upgrader is not None:
+            self.upgrader(sparse, title)
+
+        self.validate(sparse)
+
+        if not self.sparse:
+            self.sparse = sparse
+        else:
+            # Already loaded, override with new items.
+            replicate(self.sparse, sparse)
 
     def validate( self, sparse ):
         "Validate sparse config against the file spec."
@@ -134,7 +115,7 @@ class config( object ):
             try:
                 cfg = cfg[key]
             except KeyError, x:
-                raise ItemNotFoundError( itemstr(parents,key) )
+                raise ItemNotFoundError(itemstr(parents,key))
             else:
                 parents.append(key)
 
@@ -163,7 +144,7 @@ class config( object ):
             for keys in mkeys:
                 item = self.get( keys, sparse )
                 if isinstance( item, list ) or isinstance( item, dict ):
-                    raise NotSingleItemError( itemstr(keys) )
+                    raise NotSingleItemError(itemstr(keys))
                 if not item:
                     item = none_str or "None"
                 items.append(str(item))
diff --git a/lib/parsec/fileparse.py b/lib/parsec/fileparse.py
index a1c3874..13bb3dc 100644
--- a/lib/parsec/fileparse.py
+++ b/lib/parsec/fileparse.py
@@ -21,7 +21,8 @@ import sys
 import re
 import traceback
 
-from parsec.OrderedDict import OrderedDict
+from parsec import ParsecError
+from parsec.OrderedDict import OrderedDictWithDefaults
 from parsec.include import inline, IncludeFileNotFoundError
 from parsec.util import itemstr
 import cylc.flags
@@ -44,17 +45,13 @@ parsec config file parsing:
 """
 
 try:
-    from Jinja2Support import (
-            Jinja2Process,
-            TemplateError,
-            TemplateNotFound,
-            UndefinedError,
-            TemplateSyntaxError)
+    from Jinja2Support import Jinja2Process, TemplateError
 except ImportError:
     jinja2_disabled = True
 else:
     jinja2_disabled = False
 
+
 # heading/sections can contain commas (namespace name lists) and any
 # regex pattern characters (this was for pre cylc-6 satellite tasks).
 # Proper task names are checked later in config.py.
@@ -97,22 +94,29 @@ _TRIPLE_QUOTE = {
 }
 
 
-class ParseError( Exception ):
-    def __init__(self, reason, index=None, line=None, prefix="ParseError: "):
-        self.msg = prefix + reason
+class FileParseError(ParsecError):
+
+    """An error raised when attempting to read in the config file(s)."""
+
+    def __init__(self, reason, index=None, line=None, lines=None,
+                 error_name="FileParseError"):
+        self.msg = error_name + ":\n" + reason
         if index:
             self.msg += " (line " + str(index+1) + ")"
         if line:
             self.msg += ":\n   " + line.strip()
+        if lines:
+            self.msg += "\nContext lines:\n" + "\n".join(lines)
+            self.msg += "\t<-- " + error_name
         if index:
             # TODO - make 'view' function independent of cylc:
             self.msg += "\n(line numbers match 'cylc view -p')"
-    def __str__( self ):
-        return self.msg
 
-class FileNotFoundError( ParseError ):
+
+class FileNotFoundError(FileParseError):
     pass
 
+
 def _concatenate( lines ):
     """concatenate continuation lines"""
     index = 0
@@ -142,7 +146,7 @@ def addsect( cfig, sname, parents ):
         if cylc.flags.verbose:
             print 'Section already encountered: ' + itemstr( parents + [sname] )
     else:
-        cfig[sname] = OrderedDict()
+        cfig[sname] = OrderedDictWithDefaults()
 
 def addict( cfig, key, val, parents, index ):
     """Add a new [parents...]key=value pair to a nested dict."""
@@ -153,7 +157,7 @@ def addict( cfig, key, val, parents, index ):
     if not isinstance( cfig, dict ):
         # an item of this name has already been encountered at this level
         print >> sys.stderr, itemstr( parents, key, val )
-        raise ParseError( 'ERROR line ' + str(index) + ': already encountered ' + itemstr( parents ))
+        raise FileParseError( 'ERROR line ' + str(index) + ': already encountered ' + itemstr( parents ))
 
     if key in cfig:
         # this item already exists
@@ -193,7 +197,7 @@ def multiline( flines, value, index, maxline ):
     elif newvalue.find(quot) != -1:
         # TODO - this should be handled by validation?:
         # e.g. non-comment follows single-line triple-quoted string
-        raise ParseError( 'Invalid line', o_index, flines[index] )
+        raise FileParseError( 'Invalid line', o_index, flines[index] )
 
     while index < maxline:
         index += 1
@@ -205,12 +209,12 @@ def multiline( flines, value, index, maxline ):
             # end of multiline, process it
             break
     else:
-        raise ParseError( 'Multiline string not closed', o_index, flines[o_index] )
+        raise FileParseError( 'Multiline string not closed', o_index, flines[o_index] )
 
     mat = multi_line.match(line)
     if not mat:
         # e.g. end multi-line string followed by a non-comment
-        raise ParseError( 'Invalid line', o_index, line )
+        raise FileParseError( 'Invalid line', o_index, line )
 
     #value, comment = mat.groups()
     return quot + newvalue + line, index
@@ -250,24 +254,19 @@ def read_and_proc( fpath, template_vars=[], template_vars_file=None, viewcfg=Non
         try:
             flines = inline( flines, fdir, fpath, False, viewcfg=viewcfg, for_edit=asedit )
         except IncludeFileNotFoundError, x:
-            raise ParseError( str(x) )
+            raise FileParseError( str(x) )
 
     # process with Jinja2
     if do_jinja2:
         if flines and re.match( '^#![jJ]inja2\s*', flines[0] ):
             if jinja2_disabled:
-                raise ParseError( 'Jinja2 is not installed' )
+                raise FileParseError( 'Jinja2 is not installed' )
             if cylc.flags.verbose:
                 print "Processing with Jinja2"
             try:
                 flines = Jinja2Process(
                         flines, fdir, template_vars, template_vars_file)
-            except (
-                    TemplateSyntaxError,
-                    TemplateNotFound,
-                    TemplateError,
-                    TypeError, 
-                    UndefinedError) as exc:
+            except (TemplateError, TypeError) as exc:
                 # Extract diagnostic info from the end of the Jinja2 traceback.
                 exc_lines = traceback.format_exc().splitlines()
                 suffix = []
@@ -276,7 +275,21 @@ def read_and_proc( fpath, template_vars=[], template_vars_file=None, viewcfg=Non
                     if re.match("\s*File", line):
                         break
                 msg = '\n'.join(reversed(suffix))
-                raise ParseError(msg, prefix="Jinja2 Error:\n")
+                lines = None
+                if (hasattr(exc, 'lineno') and
+                        getattr(exc, 'filename', None) is None):
+                    # Jinja2 omits the line if it isn't from an external file.
+                    line_index = exc.lineno - 1
+                    if getattr(exc, 'source', None) is None:
+                        # Jinja2Support strips the shebang line.
+                        lines = flines[1:]
+                    elif isinstance(exc.source, basestring):
+                        lines = exc.source.splitlines()
+                    if lines:
+                        min_line_index = max(line_index - 3, 0)
+                        lines = lines[min_line_index: line_index + 1]
+                raise FileParseError(
+                    msg, lines=lines, error_name="Jinja2Error")
 
     # concatenate continuation lines
     if do_contin:
@@ -298,11 +311,11 @@ def parse( fpath, write_proc=False,
         if cylc.flags.verbose:
             print "Writing file " + fpath_processed
         f = open( fpath_processed, 'w' )
-        f.write('\n'.join(flines))
+        f.write('\n'.join(flines) + '\n')
         f.close()
 
     nesting_level = 0
-    config = OrderedDict()
+    config = OrderedDictWithDefaults()
     sect_name = None
     parents = []
 
@@ -328,7 +341,7 @@ def parse( fpath, write_proc=False,
             nb = len(s_open)
 
             if nb != len(s_close):
-                raise ParseError('bracket mismatch', index, line )
+                raise FileParseError('bracket mismatch', index, line )
             elif nb == nesting_level:
                 # sibling section
                 parents = parents[:-1] + [sect_name]
@@ -340,7 +353,7 @@ def parse( fpath, write_proc=False,
                 ndif = nesting_level -nb
                 parents = parents[:-ndif-1] + [sect_name]
             else:
-                raise ParseError( 'Error line ' + str(index+1) + ': ' + line )
+                raise FileParseError( 'Error line ' + str(index+1) + ': ' + line )
             nesting_level = nb
             addsect( config, sect_name, parents[:-1] )
 
@@ -355,6 +368,6 @@ def parse( fpath, write_proc=False,
                 addict( config, key, val, parents, index )
             else:
                 # no match
-                raise ParseError( 'Invalid line ' + str(index+1) + ': ' + line )
+                raise FileParseError( 'Invalid line ' + str(index+1) + ': ' + line )
 
     return config
diff --git a/lib/parsec/include.py b/lib/parsec/include.py
index 4fc0a85..117e6bc 100644
--- a/lib/parsec/include.py
+++ b/lib/parsec/include.py
@@ -20,17 +20,25 @@ import re, os, sys
 import datetime
 from shutil import copy as shcopy
 from copy import copy
+from parsec import ParsecError
 
-class IncludeFileNotFoundError( Exception ):
 
-    def __init__( self, flist ):
+class IncludeFileNotFoundError(ParsecError):
+
+    def __init__(self, flist):
+        """Missing include file error.
+
+        E.g. for [DIR/top.rc, DIR/inc/sub.rc, DIR/inc/gone.rc]
+        "Include-file not found: inc/gone.rc via inc/sub.rc from DIR/top.rc"
+        """
         rflist = copy(flist)
+        top_file = rflist[0]
+        top_dir = os.path.dirname(top_file) + '/'
         rflist.reverse()
-        self.msg = "File not found: " + rflist[0]
-        for rf in rflist[1:]:
-            self.msg += "\n   via " + rf
-    def __str__( self ):
-        return self.msg
+        self.msg = "Include-file not found: %s" % rflist[0].replace(top_dir, '')
+        for f in rflist[1:-1]:
+            self.msg += ' via %s' % f.replace(top_dir, '')
+        self.msg += ' from %s' % top_file
 
 done = []
 modtimes = {}
diff --git a/lib/parsec/tests/nullcfg/bin/missing.py b/lib/parsec/tests/nullcfg/bin/missing.py
deleted file mode 100755
index fbfa8f8..0000000
--- a/lib/parsec/tests/nullcfg/bin/missing.py
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/usr/bin/env python
-
-import os, sys
-
-fpath = os.path.dirname(os.path.abspath(__file__))
-# parsec
-sys.path.append( fpath + '/../../..' )
-
-"""
-A missing config file should successfully yield an empty sparse config dict.
-""" 
-
-from parsec.config import config
-from parsec.validate import validator as vdr
-from parsec.OrderedDict import OrderedDict
-
-SPEC = { 'title' : vdr( vtype="string" ) }
-cfg = config( SPEC )
-cfg.loadcfg( "missing.rc" )
-
-if cfg.get(sparse=True) != OrderedDict():
-    sys.exit(1)
diff --git a/lib/parsec/tests/synonyms/bin/synonyms.py b/lib/parsec/tests/synonyms/bin/synonyms.py
index e36682e..ea34b38 100755
--- a/lib/parsec/tests/synonyms/bin/synonyms.py
+++ b/lib/parsec/tests/synonyms/bin/synonyms.py
@@ -17,7 +17,7 @@ rcfile = rcname + '.rc'
 
 cfg = config( SPEC )
 
-cfg.loadcfg( rcfile, strict=True )
+cfg.loadcfg( rcfile )
 
 res = cfg.get( sparse=True)
 
diff --git a/lib/parsec/upgrade.py b/lib/parsec/upgrade.py
index b2ac0f5..8a27a83 100755
--- a/lib/parsec/upgrade.py
+++ b/lib/parsec/upgrade.py
@@ -22,16 +22,14 @@ if __name__ == '__main__':
     here = os.path.dirname( __file__ )
     sys.path.append( here + '/..' )
 
+from parsec import ParsecError
 from parsec.OrderedDict import OrderedDict
 import cylc.flags
 
 """Support automatic deprecation and obsoletion of parsec config items."""
 
-class UpgradeError( Exception ):
-    def __init__( self, msg ):
-        self.msg = msg
-    def __str__( self ):
-        return repr(self.msg)
+class UpgradeError(ParsecError):
+    pass
 
 class converter( object ):
     """Create custom config value converters."""
@@ -109,7 +107,7 @@ class upgrader( object ):
             return [upg]
         if upg['old'].count( '__MANY__' ) > 1:
             print >> sys.stderr, upg['old']
-            raise UpgradeError( "Multiple simultaneous __MANY__ not supported" )
+            raise UpgradeError("Multiple simultaneous __MANY__ not supported")
         exp_upgs = []
         pre = []
         post = []
diff --git a/lib/parsec/util.py b/lib/parsec/util.py
index 30d12b3..ebe4a29 100755
--- a/lib/parsec/util.py
+++ b/lib/parsec/util.py
@@ -19,7 +19,7 @@
 import os
 import sys
 from copy import copy
-from parsec.OrderedDict import OrderedDict
+from parsec.OrderedDict import OrderedDictWithDefaults
 
 """
 Utility functions for printing and manipulating PARSEC NESTED DICTS.
@@ -94,28 +94,32 @@ def replicate( target, source ):
     otherwise adds elements to it.
     """
     if not source:
-        target = OrderedDict()
+        target = OrderedDictWithDefaults()
         return
+    if hasattr(source, "defaults_"):
+        target.defaults_ = pdeepcopy(source.defaults_)
     for key,val in source.items():
         if isinstance( val, dict ):
             if key not in target:
-                target[key] = OrderedDict()
+                target[key] = OrderedDictWithDefaults()
+            if hasattr(val, 'defaults_'):
+                target[key].defaults_ = pdeepcopy(val.defaults_)
             replicate( target[key], val )
         elif isinstance( val, list ):
             target[key] = val[:]
         else:
             target[key] = val
 
-def pdeepcopy( source):
+def pdeepcopy(source):
     """Make a deep copy of a pdict source"""
-    target = OrderedDict()
+    target = OrderedDictWithDefaults()
     replicate( target, source )
     return target
 
 def poverride( target, sparse ):
     """Override items in a target pdict, target sub-dicts must already exist."""
     if not sparse:
-        target = OrderedDict()
+        target = OrderedDictWithDefaults()
         return
     for key,val in sparse.items():
         if isinstance( val, dict ):
@@ -129,32 +133,58 @@ def m_override( target, sparse ):
     """Override items in a target pdict. Target keys must already exist
     unless there is a "__MANY__" placeholder in the right position."""
     if not sparse:
-        target = OrderedDict()
+        target = OrderedDictWithDefaults()
         return
-    for key,val in sparse.items():
-        if isinstance( val, dict ):
-            if key not in target:
-                if '__MANY__' in target:
-                    target[key] = OrderedDict()
-                    replicate( target[key], target['__MANY__'] )
+    stack = [(sparse, target, [], OrderedDictWithDefaults())]
+    defaults_list = []
+    while stack:
+        source, dest, keylist, many_defaults = stack.pop(0)
+        if many_defaults:
+            defaults_list.append((dest, many_defaults))
+        for key, val in source.items():
+            if isinstance( val, dict ):
+                if key in many_defaults:
+                    child_many_defaults = many_defaults[key]
                 else:
-                    # TODO - validation prevents this, but handle properly for completeness.
-                    raise Exception( "parsec dict override: no __MANY__ placeholder" )
-            m_override( target[key], val )
-        else:
-            if key not in target:
-                if '__MANY__' in target:
-                    if isinstance( val, list ):
-                        target[key] = val[:]
+                    child_many_defaults = OrderedDictWithDefaults()
+                if key not in dest:
+                    if '__MANY__' in dest:
+                        dest[key] = OrderedDictWithDefaults()
+                        child_many_defaults = dest['__MANY__']
+                    elif '__MANY__' in many_defaults:
+                        # A 'sub-many' dict - would it ever exist in real life?
+                        dest[key] = OrderedDictWithDefaults()
+                        child_many_defaults = many_defaults['__MANY__']
+                    elif key in many_defaults:
+                        dest[key] = OrderedDictWithDefaults()
                     else:
-                        target[key] = val
-                else:
-                    # TODO - validation prevents this, but handle properly for completeness.
-                    raise Exception( "parsec dict override: no __MANY__ placeholder" )
-            if isinstance( val, list ):
-                target[key] = val[:]
+                        # TODO - validation prevents this, but handle properly for completeness.
+                        raise Exception(
+                            "parsec dict override: no __MANY__ placeholder" +
+                            "%s" % (keylist + [key])
+                        )
+                stack.append((val, dest[key], keylist + [key], child_many_defaults))
             else:
-                target[key] = val
+                if key not in dest:
+                    if '__MANY__' in dest or key in many_defaults or '__MANY__' in many_defaults:
+                        if isinstance( val, list ):
+                            dest[key] = val[:]
+                        else:
+                            dest[key] = val
+
+                    else:
+                        # TODO - validation prevents this, but handle properly for completeness.
+                        raise Exception(
+                            "parsec dict override: no __MANY__ placeholder" +
+                            "%s" % (keylist + [key])
+                        )
+                if isinstance( val, list ):
+                    dest[key] = val[:]
+                else:
+                    dest[key] = val
+    for dest_dict, defaults in defaults_list:
+        dest_dict.defaults_ = defaults
+
 
 def un_many( cfig ):
     """Remove any '__MANY__' items from a nested dict, in-place."""
@@ -162,7 +192,13 @@ def un_many( cfig ):
         return
     for key,val in cfig.items():
         if key == '__MANY__':
-            del cfig[key]
+            try:
+                del cfig[key]
+            except KeyError:
+                if hasattr(cfig, 'defaults_') and key in cfig.defaults_:
+                    del cfig.defaults_[key]
+                else:
+                    raise
         elif isinstance( val, dict ):
             un_many( cfig[key] )
 
diff --git a/lib/parsec/validate.py b/lib/parsec/validate.py
index 52eeee2..47e3908 100644
--- a/lib/parsec/validate.py
+++ b/lib/parsec/validate.py
@@ -17,7 +17,8 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import sys, re
-from parsec.OrderedDict import OrderedDict
+from parsec import ParsecError
+from parsec.OrderedDict import OrderedDictWithDefaults
 from parsec.util import m_override, un_many, itemstr
 from copy import copy
 
@@ -44,21 +45,20 @@ _UQLP = re.compile(r"""(['"]?)(.*?)\1(,|$)""")
 _SQV = re.compile( "((?:^[^']*(?:'[^']*')*[^']*)*)(#.*)$" )
 _DQV = re.compile( '((?:^[^"]*(?:"[^"]*")*[^"]*)*)(#.*)$' )
 
-class ValidationError( Exception ):
-    def __init__( self, msg ):
-        self.msg = msg
-    def __str__( self ):
-        return repr(self.msg)
 
-class IllegalValueError( ValidationError ):
-    def __init__( self, vtype, keys, value ):
-        msg = 'Illegal ' + vtype + ' value: ' + itemstr( keys, value=value )
-        ValidationError.__init__( self, msg )
+class ValidationError(ParsecError):
+    pass
+
+
+class IllegalValueError(ValidationError):
+    def __init__(self, vtype, keys, value):
+        self.msg = 'Illegal %s value: %s' % (vtype, itemstr(keys, value=value))
+
+
+class IllegalItemError(ValidationError):
+    def __init__(self, keys, key):
+        self.msg = 'Illegal item: %s' % itemstr(keys, key)
 
-class IllegalItemError( ValidationError ):
-    def __init__( self, keys, key ):
-        msg = 'Illegal item: ' + itemstr( keys, key )
-        ValidationError.__init__( self, msg )
 
 def validate( cfig, spec, keys=[] ):
     """Validate and coerce a nested dict against a parsec spec."""
@@ -111,14 +111,14 @@ def _populate_spec_defaults( defs, spec ):
     for key,val in spec.items():
         if isinstance( val, dict ):
             if key not in defs:
-                defs[key] = OrderedDict()
+                defs[key] = OrderedDictWithDefaults()
             _populate_spec_defaults( defs[key], spec[key] )
         else:
             defs[key] = spec[key].args['default']
 
 def get_defaults( spec ):
     """Return a nested dict of default values from a parsec spec."""
-    defs = OrderedDict()
+    defs = OrderedDictWithDefaults()
     _populate_spec_defaults( defs, spec )
     return defs
 
diff --git a/lib/xdot.py b/lib/xdot.py
index 0b4a4df..0a759d3 100644
--- a/lib/xdot.py
+++ b/lib/xdot.py
@@ -524,6 +524,7 @@ UNDERLINE = 4
 SUPERSCRIPT = 8
 SUBSCRIPT = 16
 STRIKE_THROUGH = 32
+OVERLINE = 64
 
 
 class XDotAttrParser:
@@ -1162,7 +1163,7 @@ class DotParser(Parser):
 
 class XDotParser(DotParser):
 
-    XDOTVERSION = '1.6'
+    XDOTVERSION = '1.7'
 
     def __init__(self, xdotcode):
         lexer = DotLexer(buf = xdotcode)
@@ -1834,12 +1835,13 @@ class DotWidget(gtk.DrawingArea):
 
             if event.button == 1 or event.button == 3:
                 url = self.get_url(x, y)
-                if url is not None:
-                    self.emit('clicked', unicode(url.url), event)
-                elif event.button == 1:
+                if event.button == 1:
                     jump = self.get_jump(x, y)
                     if jump is not None:
                         self.animate_to(jump.x, jump.y)
+                elif url is not None:
+                    self.emit('clicked', unicode(url.url), event)
+
 
                 return True
 
diff --git a/tests/authentication/00-identity.t b/tests/authentication/00-identity.t
new file mode 100644
index 0000000..965d06a
--- /dev/null
+++ b/tests/authentication/00-identity.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Test authentication - privilege 'identity'.
+
+. $(dirname $0)/test_header
+set_test_number 6
+
+install_suite "${TEST_NAME_BASE}" basic
+
+TEST_NAME="${TEST_NAME_BASE}-validate"
+run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
+
+# Run the suite.
+cat > global.rc << __END__
+[authentication]
+    public = identity
+__END__
+CYLC_CONF_PATH="${PWD}" cylc run "${SUITE_NAME}"
+
+# Wait for first task 'foo' to fail.
+cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --cycle=1 \
+    --interval=1 --max-polls=10 || exit 1
+
+# Disable the suite passphrase (to leave us with public access privilege).
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS"
+
+# Check scan output.
+PORT=$(cylc ping -v "${SUITE_NAME}" | awk '{print $4}')
+cylc scan -fb -n "${SUITE_NAME}" 'localhost' >'scan.out' 2>'/dev/null'
+cmp_ok scan.out << __END__
+${SUITE_NAME} ${USER}@localhost:${PORT}
+   (description and state totals withheld)
+__END__
+
+# "cylc show" should be denied.
+TEST_NAME="${TEST_NAME_BASE}-show"
+run_fail "${TEST_NAME}" cylc show "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log1
+grep_ok "\[client-connect] DENIED (privilege 'identity' < 'description') ${USER}@.*:cylc-show" suite.log1
+
+# Commands should be denied.
+TEST_NAME="${TEST_NAME_BASE}-stop"
+run_fail "${TEST_NAME}" cylc stop "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log2
+grep_ok "\[client-connect] DENIED (privilege 'identity' < 'shutdown') ${USER}@.*:cylc-stop" suite.log2
+
+# Restore the passphrase.
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase"
+
+# Stop and purge the suite.
+cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/authentication/01-description.t b/tests/authentication/01-description.t
new file mode 100644
index 0000000..448a402
--- /dev/null
+++ b/tests/authentication/01-description.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Test authentication - privilege 'description'.
+
+. $(dirname $0)/test_header
+set_test_number 7
+
+install_suite "${TEST_NAME_BASE}" basic
+
+TEST_NAME="${TEST_NAME_BASE}-validate"
+run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
+
+# Run the suite.
+cat > global.rc << __END__
+[authentication]
+    public = description
+__END__
+CYLC_CONF_PATH="${PWD}" cylc run "${SUITE_NAME}"
+
+# Wait for first task 'foo' to fail.
+cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --cycle=1 \
+    --interval=1 --max-polls=10 || exit 1
+
+# Disable the suite passphrase (to leave us with public access privilege).
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS"
+
+# Check scan output.
+PORT=$(cylc ping -v "${SUITE_NAME}" | awk '{print $4}')
+cylc scan -fb -n "${SUITE_NAME}" 'localhost' >'scan.out' 2>'/dev/null'
+cmp_ok scan.out << __END__
+${SUITE_NAME} ${USER}@localhost:${PORT}
+   Title:
+      "Authentication test suite."
+   Description:
+      "Stalls when the first task fails."
+   (state totals withheld)
+__END__
+
+# "cylc show" (suite info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show1"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}"
+
+# "cylc show" (task info) should be denied.
+TEST_NAME="${TEST_NAME_BASE}-show2"
+run_fail "${TEST_NAME}" cylc show "${SUITE_NAME}" foo.1
+cylc log "${SUITE_NAME}" > suite.log1
+grep_ok "\[client-connect] DENIED (privilege 'description' < 'full-read') ${USER}@.*:cylc-show" suite.log1
+
+# Commands should be denied.
+TEST_NAME="${TEST_NAME_BASE}-stop"
+run_fail "${TEST_NAME}" cylc stop "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log2
+grep_ok "\[client-connect] DENIED (privilege 'description' < 'shutdown') ${USER}@.*:cylc-stop" suite.log2
+
+# Restore the passphrase.
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase"
+
+# Stop and purge the suite.
+cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/authentication/02-state-totals.t b/tests/authentication/02-state-totals.t
new file mode 100644
index 0000000..55c34b4
--- /dev/null
+++ b/tests/authentication/02-state-totals.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Test authentication - privilege 'state-totals'.
+
+. $(dirname $0)/test_header
+set_test_number 7
+
+install_suite "${TEST_NAME_BASE}" basic
+
+TEST_NAME="${TEST_NAME_BASE}-validate"
+run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
+
+# Run the suite.
+cat > global.rc << __END__
+[authentication]
+    public = state-totals
+__END__
+CYLC_CONF_PATH="${PWD}" cylc run "${SUITE_NAME}"
+
+# Wait for first task 'foo' to fail.
+cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --cycle=1 \
+    --interval=1 --max-polls=10 || exit 1
+
+# Disable the suite passphrase (to leave us with public access privilege).
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS"
+
+# Check scan output.
+PORT=$(cylc ping -v "${SUITE_NAME}" | awk '{print $4}')
+cylc scan -fb -n "${SUITE_NAME}" 'localhost' >'scan.out' 2>'/dev/null'
+cmp_ok scan.out << __END__
+${SUITE_NAME} ${USER}@localhost:${PORT}
+   Title:
+      "Authentication test suite."
+   Description:
+      "Stalls when the first task fails."
+   Task state totals:
+      failed:1 waiting:1
+      1 failed:1 waiting:1
+__END__
+
+# "cylc show" (suite info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show1"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}"
+
+# "cylc show" (task info) should be denied.
+TEST_NAME="${TEST_NAME_BASE}-show2"
+run_fail "${TEST_NAME}" cylc show "${SUITE_NAME}" foo.1
+cylc log "${SUITE_NAME}" > suite.log1
+grep_ok "\[client-connect] DENIED (privilege 'state-totals' < 'full-read') ${USER}@.*:cylc-show" suite.log1
+
+# Commands should be denied.
+TEST_NAME="${TEST_NAME_BASE}-stop"
+run_fail "${TEST_NAME}" cylc stop "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log2
+grep_ok "\[client-connect] DENIED (privilege 'state-totals' < 'shutdown') ${USER}@.*:cylc-stop" suite.log2
+
+# Restore the passphrase.
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase"
+
+# Stop and purge the suite.
+cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/authentication/03-full-read.t b/tests/authentication/03-full-read.t
new file mode 100644
index 0000000..d1c62f8
--- /dev/null
+++ b/tests/authentication/03-full-read.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Test authentication - privilege 'full-read'.
+
+. $(dirname $0)/test_header
+set_test_number 8
+
+install_suite "${TEST_NAME_BASE}" basic
+
+TEST_NAME="${TEST_NAME_BASE}-validate"
+run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
+
+# Run the suite.
+cat > global.rc << __END__
+[authentication]
+    public = full-read
+__END__
+CYLC_CONF_PATH="${PWD}" cylc run "${SUITE_NAME}"
+
+# Wait for first task 'foo' to fail.
+cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --cycle=1 \
+    --interval=1 --max-polls=10 || exit 1
+
+# Disable the suite passphrase (to leave us with public access privilege).
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS"
+
+# Check scan output.
+PORT=$(cylc ping -v "${SUITE_NAME}" | awk '{print $4}')
+cylc scan -fb -n "${SUITE_NAME}" 'localhost' >'scan.out' 2>'/dev/null'
+cmp_ok scan.out << __END__
+${SUITE_NAME} ${USER}@localhost:${PORT}
+   Title:
+      "Authentication test suite."
+   Description:
+      "Stalls when the first task fails."
+   Task state totals:
+      failed:1 waiting:1
+      1 failed:1 waiting:1
+__END__
+
+# "cylc show" (suite info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show1"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log1
+grep_ok "\[client-command] get_suite_info ${USER}@.*:cylc-show" suite.log1
+
+# "cylc show" (task info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show2"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}" foo.1
+cylc log "${SUITE_NAME}" > suite.log2
+grep_ok "\[client-command] get_task_info ${USER}@.*:cylc-show" suite.log2
+
+# Commands should be denied.
+TEST_NAME="${TEST_NAME_BASE}-stop"
+run_fail "${TEST_NAME}" cylc stop "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log3
+grep_ok "\[client-connect] DENIED (privilege 'full-read' < 'shutdown') ${USER}@.*:cylc-stop" suite.log3
+
+# Restore the passphrase.
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase"
+
+# Stop and purge the suite.
+cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/authentication/04-shutdown.t b/tests/authentication/04-shutdown.t
new file mode 100644
index 0000000..6738b42
--- /dev/null
+++ b/tests/authentication/04-shutdown.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Test authentication - privilege 'shutdown'.
+
+. $(dirname $0)/test_header
+set_test_number 9
+
+install_suite "${TEST_NAME_BASE}" basic
+
+TEST_NAME="${TEST_NAME_BASE}-validate"
+run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
+
+cat > global.rc << __END__
+[authentication]
+    public = shutdown
+__END__
+CYLC_CONF_PATH="${PWD}" cylc run "${SUITE_NAME}"
+
+# Wait for first task 'foo' to fail.
+cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --cycle=1 \
+    --interval=1 --max-polls=10 || exit 1
+
+# Disable the suite passphrase (to leave us with public access privilege).
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS"
+
+PORT=$(cylc ping -v "${SUITE_NAME}" | awk '{print $4}')
+cylc scan -fb -n "${SUITE_NAME}" 'localhost' >'scan.out' 2>'/dev/null'
+cmp_ok scan.out << __END__
+${SUITE_NAME} ${USER}@localhost:${PORT}
+   Title:
+      "Authentication test suite."
+   Description:
+      "Stalls when the first task fails."
+   Task state totals:
+      failed:1 waiting:1
+      1 failed:1 waiting:1
+__END__
+
+# "cylc show" (suite info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show1"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log1
+grep_ok "\[client-command] get_suite_info ${USER}@.*:cylc-show" suite.log1
+
+# "cylc show" (task info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show2"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}" foo.1
+cylc log "${SUITE_NAME}" > suite.log2
+grep_ok "\[client-command] get_task_info ${USER}@.*:cylc-show" suite.log2
+
+# Commands (other than shutdown) should be denied.
+TEST_NAME="${TEST_NAME_BASE}-trigger"
+run_fail "${TEST_NAME}" cylc trigger "${SUITE_NAME}" foo 1
+cylc log "${SUITE_NAME}" > suite.log3
+grep_ok "\[client-connect] DENIED (privilege 'shutdown' < 'full-control') ${USER}@.*:cylc-trigger" suite.log3
+
+# Stop OK (without the passphrase!).
+TEST_NAME="${TEST_NAME_BASE}-stop"
+run_ok "${TEST_NAME}" cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/authentication/05-full-control.t b/tests/authentication/05-full-control.t
new file mode 100644
index 0000000..6665e39
--- /dev/null
+++ b/tests/authentication/05-full-control.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Test authentication - privilege 'full-control' (with passphrase).
+
+. $(dirname $0)/test_header
+set_test_number 9
+
+install_suite "${TEST_NAME_BASE}" basic
+
+TEST_NAME="${TEST_NAME_BASE}-validate"
+run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
+
+# Run the suite.
+# Set public auth low to test that passphrase gives full control
+cat > global.rc << __END__
+[authentication]
+    public = identity
+__END__
+CYLC_CONF_PATH="${PWD}" cylc run "${SUITE_NAME}"
+
+# Wait for first task 'foo' to fail.
+cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --cycle=1 \
+    --interval=1 --max-polls=10 || exit 1
+
+# Check scan output.
+PORT=$(cylc ping -v "${SUITE_NAME}" | awk '{print $4}')
+cylc scan -fb -n "${SUITE_NAME}" 'localhost' >'scan.out' 2>'/dev/null'
+cmp_ok scan.out << __END__
+${SUITE_NAME} ${USER}@localhost:${PORT}
+   Title:
+      "Authentication test suite."
+   Description:
+      "Stalls when the first task fails."
+   Task state totals:
+      failed:1 waiting:1
+      1 failed:1 waiting:1
+__END__
+
+# "cylc show" (suite info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show1"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log1
+grep_ok "\[client-command] get_suite_info ${USER}@.*:cylc-show" suite.log1
+
+# "cylc show" (task info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show2"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}" foo.1
+cylc log "${SUITE_NAME}" > suite.log2
+grep_ok "\[client-command] get_task_info ${USER}@.*:cylc-show" suite.log2
+
+# Commands OK.
+# (Reset to same state).
+TEST_NAME="${TEST_NAME_BASE}-trigger"
+run_ok "${TEST_NAME}" cylc reset "${SUITE_NAME}" -s failed foo 1
+cylc log "${SUITE_NAME}" > suite.log3
+grep_ok "\[client-command] reset_task_state ${USER}@.*:cylc-reset" suite.log3
+
+# Shutdown and purge.
+TEST_NAME="${TEST_NAME_BASE}-stop"
+run_ok "${TEST_NAME}" cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/authentication/06-suite-override.t b/tests/authentication/06-suite-override.t
new file mode 100644
index 0000000..82277af
--- /dev/null
+++ b/tests/authentication/06-suite-override.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Test authentication - privilege 'shutdown'.
+# (Suite overrides global privilege 'identity'.)
+
+. $(dirname $0)/test_header
+set_test_number 9
+
+install_suite "${TEST_NAME_BASE}" override
+
+TEST_NAME="${TEST_NAME_BASE}-validate"
+run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
+
+cat > global.rc << __END__
+[authentication]
+    public = identity
+__END__
+CYLC_CONF_PATH="${PWD}" cylc run "${SUITE_NAME}"
+
+# Wait for first task 'foo' to fail.
+cylc suite-state "${SUITE_NAME}" --task=foo --status=failed --cycle=1 \
+    --interval=1 --max-polls=10 || exit 1
+
+# Disable the suite passphrase (to leave us with public access privilege).
+mv "${TEST_DIR}/${SUITE_NAME}/passphrase" \
+    "${TEST_DIR}/${SUITE_NAME}/passphrase.DIS"
+
+PORT=$(cylc ping -v "${SUITE_NAME}" | awk '{print $4}')
+cylc scan -fb -n "${SUITE_NAME}" 'localhost' >'scan.out' 2>'/dev/null'
+cmp_ok scan.out << __END__
+${SUITE_NAME} ${USER}@localhost:${PORT}
+   Title:
+      "Authentication test suite."
+   Description:
+      "Stalls when the first task fails.
+       Suite overrides global authentication settings."
+   Task state totals:
+      failed:1 waiting:1
+      1 failed:1 waiting:1
+__END__
+
+# "cylc show" (suite info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show1"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}"
+cylc log "${SUITE_NAME}" > suite.log1
+grep_ok "\[client-command] get_suite_info ${USER}@.*:cylc-show" suite.log1
+
+# "cylc show" (task info) OK.
+TEST_NAME="${TEST_NAME_BASE}-show2"
+run_ok "${TEST_NAME}" cylc show "${SUITE_NAME}" foo.1
+cylc log "${SUITE_NAME}" > suite.log2
+grep_ok "\[client-command] get_task_info ${USER}@.*:cylc-show" suite.log2
+
+# Commands (other than shutdown) should be denied.
+TEST_NAME="${TEST_NAME_BASE}-trigger"
+run_fail "${TEST_NAME}" cylc trigger "${SUITE_NAME}" foo 1
+cylc log "${SUITE_NAME}" > suite.log3
+grep_ok "\[client-connect] DENIED (privilege 'shutdown' < 'full-control') ${USER}@.*:cylc-trigger" suite.log3
+
+# Stop OK (without the passphrase!).
+TEST_NAME="${TEST_NAME_BASE}-stop"
+run_ok "${TEST_NAME}" cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/authentication/07-back-compat.t b/tests/authentication/07-back-compat.t
new file mode 100644
index 0000000..7a14da4
--- /dev/null
+++ b/tests/authentication/07-back-compat.t
@@ -0,0 +1,106 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+# Test authentication - ignore old client denials, report bad new clients.
+
+. $(dirname $0)/test_header
+set_test_number 13
+
+# Set things up and run the suite.
+install_suite "${TEST_NAME_BASE}" basic
+TEST_NAME="${TEST_NAME_BASE}-validate"
+run_ok "${TEST_NAME}" cylc validate "${SUITE_NAME}"
+cylc run "${SUITE_NAME}"
+
+# Scan to grab the suite's port.
+sleep 5  # Wait for the suite to initialize.
+TEST_NAME="${TEST_NAME_BASE}-new-scan"
+PORT=$(cylc scan -b -n $SUITE_NAME 'localhost' 2>'/dev/null' \
+    | sed -e 's/.*@localhost://')
+
+# Simulate an old client with the wrong passphrase.
+ERR_PATH="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/log/suite/err"
+TEST_NAME="${TEST_NAME_BASE}-old-client-snapshot-err"
+run_ok "${TEST_NAME}" cp "${ERR_PATH}" err-before-scan
+TEST_NAME="${TEST_NAME_BASE}-old-client-simulate"
+run_fail "${TEST_NAME}" python -c "
+import sys
+import Pyro.core
+uri = 'PYROLOC://localhost:' + sys.argv[1] + '/cylcid'
+proxy = Pyro.core.getProxyForURI(uri)
+proxy._setIdentification('0123456789abcdef')
+name, owner = proxy.id()" "${PORT}"
+grep_ok "ConnectionDeniedError" "${TEST_NAME}.stderr"
+
+# Check that the old client connection is not logged.
+# Get any new lines added to the error file.
+comm -13 err-before-scan "${ERR_PATH}" >"${TEST_NAME_BASE}-old-client-err-diff"
+TEST_NAME="${TEST_NAME_BASE}-log-old-client"
+run_fail "${TEST_NAME}" grep "WARNING - \[client-connect\] DENIED" \
+    "${TEST_NAME_BASE}-old-client-err-diff"
+
+# Simulate an old client with the right passphrase.
+ERR_PATH="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/log/suite/err"
+TEST_NAME="${TEST_NAME_BASE}-old-client-snapshot-ok"
+run_ok "${TEST_NAME}" cp "${ERR_PATH}" err-before-scan
+TEST_NAME="${TEST_NAME_BASE}-old-client-simulate-ok"
+PASSPHRASE=$(cat $(cylc get-dir $SUITE_NAME)/passphrase)
+run_ok "${TEST_NAME}" python -c "
+import sys
+import Pyro.core
+uri = 'PYROLOC://localhost:' + sys.argv[1] + '"/${USER}.${SUITE_NAME}.suite-info"'
+print >> sys.stderr, uri
+proxy = Pyro.core.getProxyForURI(uri)
+proxy._setIdentification('"$PASSPHRASE"')
+info = proxy.get('get_suite_info')" "${PORT}"
+grep_ok "\[client-command\] get_suite_info (user)@(host):(OLD_CLIENT) (uuid)" "$(cylc cat-log -l $SUITE_NAME)"
+
+# Simulate a new, suspicious client.
+TEST_NAME="${TEST_NAME_BASE}-new-bad-client-snapshot-err"
+run_ok "${TEST_NAME}" cp "${ERR_PATH}" err-before-scan
+TEST_NAME="${TEST_NAME_BASE}-new-bad-client-simulate"
+run_fail "${TEST_NAME}" python -c '
+import sys
+import Pyro.core, Pyro.protocol
+
+class MyConnValidator(Pyro.protocol.DefaultConnValidator):
+
+    """Create an incorrect but plausible auth token."""
+
+    def createAuthToken(self, authid, challenge, peeraddr, URI, daemon):
+        return "colonel_mustard:drawing_room:dagger:mystery:57abbed"
+
+uri = "PYROLOC://localhost:" + sys.argv[1] + "/cylcid"
+proxy = Pyro.core.getProxyForURI(uri)
+proxy._setNewConnectionValidator(MyConnValidator())
+proxy._setIdentification("0123456789abcdef")
+proxy.identify()' "${PORT}"
+grep_ok "ConnectionDeniedError" "${TEST_NAME}.stderr"
+
+# Check that the new client connection failure is logged (it is suspicious).
+TEST_NAME="${TEST_NAME_BASE}-log-new-client"
+# Get any new lines added to the error file.
+comm -13 err-before-scan "${ERR_PATH}" >"${TEST_NAME_BASE}-new-client-err-diff"
+# Check the new lines for a connection denied report.
+grep_ok "WARNING - \[client-connect\] DENIED colonel_mustard at drawing_room:mystery dagger$" \
+    "${TEST_NAME_BASE}-new-client-err-diff"
+
+# Shutdown and purge.
+TEST_NAME="${TEST_NAME_BASE}-stop"
+run_ok "${TEST_NAME}" cylc stop --max-polls=10 --interval=1 "${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/authentication/basic/suite.rc b/tests/authentication/basic/suite.rc
new file mode 100644
index 0000000..dbdc51d
--- /dev/null
+++ b/tests/authentication/basic/suite.rc
@@ -0,0 +1,14 @@
+title = Authentication test suite.
+description = Stalls when the first task fails.
+[cylc]
+    [[event hooks]]
+        timeout = PT30S
+        abort on timeout = True
+[scheduling]
+    [[dependencies]]
+        graph = foo => bar
+[runtime]
+    [[foo]]
+        script = /bin/false
+    [[bar]]
+        script = /bin/true
diff --git a/tests/authentication/override/suite.rc b/tests/authentication/override/suite.rc
new file mode 100644
index 0000000..3869f85
--- /dev/null
+++ b/tests/authentication/override/suite.rc
@@ -0,0 +1,17 @@
+title = Authentication test suite.
+description = """Stalls when the first task fails.
+Suite overrides global authentication settings."""
+[cylc]
+    [[event hooks]]
+        timeout = PT30S
+        abort on timeout = True
+    [[authentication]]
+        public = shutdown
+[scheduling]
+    [[dependencies]]
+        graph = foo => bar
+[runtime]
+    [[foo]]
+        script = /bin/false
+    [[bar]]
+        script = /bin/true
diff --git a/tests/purge/test_header b/tests/authentication/test_header
similarity index 100%
copy from tests/purge/test_header
copy to tests/authentication/test_header
diff --git a/tests/broadcast/00-simple.t b/tests/broadcast/00-simple.t
index 63d8b12..ced8e62 100644
--- a/tests/broadcast/00-simple.t
+++ b/tests/broadcast/00-simple.t
@@ -16,16 +16,52 @@
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
 # Test broadcasts
-. $(dirname $0)/test_header
-#-------------------------------------------------------------------------------
-set_test_number 2
-#-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE $TEST_NAME_BASE
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
-#-------------------------------------------------------------------------------
-purge_suite $SUITE_NAME
+. "$(dirname "$0")/test_header"
+set_test_number 4
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --debug --reference-test "${SUITE_NAME}"
+
+DB_FILE="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}/cylc-suite.db"
+NAME='select-broadcast-events.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT change, point, namespace, key, value FROM broadcast_events
+     ORDER BY time, change, point, namespace, key' >"${NAME}"
+cmp_ok "${NAME}" <<'__SELECT__'
++|*|root|[environment]BCAST|ROOT
++|2010080800|foo|[environment]BCAST|FOO
++|*|bar|[environment]BCAST|BAR
++|2010080900|baz|[environment]BCAST|BAZ
++|2010080900|qux|[environment]BCAST|QUX
+-|2010080900|qux|[environment]BCAST|QUX
++|*|wibble|[environment]BCAST|WIBBLE
+-|*|wibble|[environment]BCAST|WIBBLE
++|*|ENS|[environment]BCAST|ENS
++|*|ENS1|[environment]BCAST|ENS1
++|2010080900|m2|[environment]BCAST|M2
++|*|m7|[environment]BCAST|M7
++|*|m8|[environment]BCAST|M8
++|*|m9|[environment]BCAST|M9
+-|2010080800|foo|[environment]BCAST|FOO
+__SELECT__
+
+NAME='select-broadcast-states.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT point, namespace, key, value FROM broadcast_states
+     ORDER BY point, namespace, key' >"${NAME}"
+cmp_ok "${NAME}" <<'__SELECT__'
+*|ENS|[environment]BCAST|ENS
+*|ENS1|[environment]BCAST|ENS1
+*|bar|[environment]BCAST|BAR
+*|m7|[environment]BCAST|M7
+*|m8|[environment]BCAST|M8
+*|m9|[environment]BCAST|M9
+*|root|[environment]BCAST|ROOT
+2010080900|baz|[environment]BCAST|BAZ
+2010080900|m2|[environment]BCAST|M2
+__SELECT__
+
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/broadcast/01-dependencies/reference.log b/tests/broadcast/01-dependencies/reference.log
deleted file mode 100644
index f220ce1..0000000
--- a/tests/broadcast/01-dependencies/reference.log
+++ /dev/null
@@ -1,105 +0,0 @@
-2013/10/17 10:29:08 INFO - Thread-2 start (Event Handler Submission)
-2013/10/17 10:29:08 INFO - Thread-3 start (Poll and Kill Command Submission)
-2013/10/17 10:29:08 INFO - port:7766
-2013/10/17 10:29:08 INFO - Suite starting at 2013-10-17 10:29:08.030736
-2013/10/17 10:29:08 INFO - Log event clock: real time
-2013/10/17 10:29:08 INFO - Run mode: live
-2013/10/17 10:29:08 INFO - Initial point: 1
-2013/10/17 10:29:08 INFO - Final point: 1
-2013/10/17 10:29:08 INFO - Thread-4 start (Job Submission)
-2013/10/17 10:29:08 INFO - Thread-5 start (Request Handling)
-2013/10/17 10:29:08 DEBUG - [a.1] -task proxy added to the pool
-2013/10/17 10:29:08 DEBUG - [c.1] -task proxy added to the pool
-2013/10/17 10:29:08 DEBUG - [b.1] -task proxy added to the pool
-2013/10/17 10:29:08 DEBUG - [d.1] -task proxy added to the pool
-2013/10/17 10:29:08 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:08 INFO - [a.1] -(setting:queued)
-2013/10/17 10:29:08 INFO - [a.1] -(setting:submitting)
-2013/10/17 10:29:08 INFO - [a.1] -triggered off []
-2013/10/17 10:29:08 DEBUG - END TASK PROCESSING (took 0.019732 sec)
-2013/10/17 10:29:08 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:09 DEBUG - Job Submission batch 1/1 (1 members):
-2013/10/17 10:29:09 INFO - [a.1] -(current:submitting)> a.1 submitting now
-2013/10/17 10:29:09 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:09 DEBUG - Job Submission: batch completed
-  Time taken: 0:00:00.230629
-  All 1 items succeeded
-2013/10/17 10:29:10 INFO - [a.1] -(current:submitting)> a.1 submission succeeded
-2013/10/17 10:29:10 INFO - [a.1] -(setting:submitted)
-2013/10/17 10:29:10 INFO - [a.1] -(current:submitted)> a.1 submit_method_id=21873
-2013/10/17 10:29:10 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:11 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:11 DEBUG - END TASK PROCESSING (took 0.016003 sec)
-2013/10/17 10:29:11 INFO - [a.1] -(current:submitted)> a.1 started at 2013-10-17T10:29:10
-2013/10/17 10:29:11 INFO - [a.1] -(setting:running)
-2013/10/17 10:29:11 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:12 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:12 DEBUG - END TASK PROCESSING (took 0.013639 sec)
-2013/10/17 10:29:12 INFO - Command succeeded: add prerequisite(d.1,b.1 succeeded)
-2013/10/17 10:29:12 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:22 INFO - [a.1] -(current:running)> a.1 succeeded at 2013-10-17T10:29:22
-2013/10/17 10:29:22 INFO - [a.1] -(setting:succeeded)
-2013/10/17 10:29:22 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:23 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:23 INFO - [c.1] -(setting:queued)
-2013/10/17 10:29:23 INFO - [b.1] -(setting:queued)
-2013/10/17 10:29:23 INFO - [c.1] -(setting:submitting)
-2013/10/17 10:29:23 INFO - [b.1] -(setting:submitting)
-2013/10/17 10:29:23 INFO - [c.1] -triggered off ['a.1']
-2013/10/17 10:29:23 INFO - [b.1] -triggered off ['a.1']
-2013/10/17 10:29:23 DEBUG - END TASK PROCESSING (took 0.017143 sec)
-2013/10/17 10:29:23 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:23 DEBUG - Job Submission batch 1/1 (2 members):
-2013/10/17 10:29:24 INFO - [c.1] -(current:submitting)> c.1 submitting now
-2013/10/17 10:29:24 INFO - [c.1] -(current:submitting)> c.1 submission succeeded
-2013/10/17 10:29:24 INFO - [c.1] -(setting:submitted)
-2013/10/17 10:29:24 INFO - [c.1] -(current:submitted)> c.1 submit_method_id=21989
-2013/10/17 10:29:24 INFO - [b.1] -(current:submitting)> b.1 submitting now
-2013/10/17 10:29:24 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:24 DEBUG - Job Submission: batch completed
-  Time taken: 0:00:01.365507
-  All 2 items succeeded
-2013/10/17 10:29:25 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:25 DEBUG - END TASK PROCESSING (took 0.012907 sec)
-2013/10/17 10:29:25 INFO - [c.1] -(current:submitted)> c.1 started at 2013-10-17T10:29:24
-2013/10/17 10:29:25 INFO - [c.1] -(setting:running)
-2013/10/17 10:29:25 INFO - [b.1] -(current:submitting)> b.1 submission succeeded
-2013/10/17 10:29:25 INFO - [b.1] -(setting:submitted)
-2013/10/17 10:29:25 INFO - [b.1] -(current:submitted)> b.1 submit_method_id=22056
-2013/10/17 10:29:25 INFO - [b.1] -(current:submitted)> b.1 started at 2013-10-17T10:29:24
-2013/10/17 10:29:25 INFO - [b.1] -(setting:running)
-2013/10/17 10:29:25 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:26 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:26 DEBUG - END TASK PROCESSING (took 0.012717 sec)
-2013/10/17 10:29:26 INFO - [c.1] -(current:running)> c.1 succeeded at 2013-10-17T10:29:25
-2013/10/17 10:29:26 INFO - [c.1] -(setting:succeeded)
-2013/10/17 10:29:26 INFO - [b.1] -(current:running)> b.1 succeeded at 2013-10-17T10:29:25
-2013/10/17 10:29:26 INFO - [b.1] -(setting:succeeded)
-2013/10/17 10:29:26 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:27 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:27 INFO - [d.1] -(setting:queued)
-2013/10/17 10:29:27 INFO - [d.1] -(setting:submitting)
-2013/10/17 10:29:27 INFO - [d.1] -triggered off ['b.1']
-2013/10/17 10:29:27 DEBUG - END TASK PROCESSING (took 0.014147 sec)
-2013/10/17 10:29:27 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:27 DEBUG - Job Submission batch 1/1 (1 members):
-2013/10/17 10:29:27 DEBUG - Job Submission: batch completed
-  Time taken: 0:00:00.219651
-  All 1 items succeeded
-2013/10/17 10:29:28 INFO - [d.1] -(current:submitting)> d.1 submitting now
-2013/10/17 10:29:28 INFO - [d.1] -(current:submitting)> d.1 submission succeeded
-2013/10/17 10:29:28 INFO - [d.1] -(setting:submitted)
-2013/10/17 10:29:28 INFO - [d.1] -(current:submitted)> d.1 submit_method_id=22201
-2013/10/17 10:29:28 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:29 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:29 DEBUG - END TASK PROCESSING (took 0.013136 sec)
-2013/10/17 10:29:29 INFO - [d.1] -(current:submitted)> d.1 started at 2013-10-17T10:29:28
-2013/10/17 10:29:29 INFO - [d.1] -(setting:running)
-2013/10/17 10:29:29 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:30 DEBUG - BEGIN TASK PROCESSING
-2013/10/17 10:29:30 DEBUG - END TASK PROCESSING (took 0.012368 sec)
-2013/10/17 10:29:30 INFO - [d.1] -(current:running)> d.1 succeeded at 2013-10-17T10:29:29
-2013/10/17 10:29:30 INFO - [d.1] -(setting:succeeded)
-2013/10/17 10:29:30 DEBUG - UPDATING STATE SUMMARY
-2013/10/17 10:29:30 INFO - All non-cycling tasks have succeeded
-2013/10/17 10:29:30 INFO - Suite shutting down at 2013-10-17 10:29:30.267063
diff --git a/tests/broadcast/01-dependencies/suite.rc b/tests/broadcast/01-dependencies/suite.rc
deleted file mode 100644
index 3037043..0000000
--- a/tests/broadcast/01-dependencies/suite.rc
+++ /dev/null
@@ -1,14 +0,0 @@
-[cylc]
-   [[reference test]]
-       required run mode = live
-       live mode suite timeout = 0.5 # minutes
-[scheduling]
-    [[dependencies]]
-        graph = """a => b
-                   a => c
-                   c => d"""
-[runtime]
-    [[a]]
-        script = "cylc depend $CYLC_SUITE_NAME d.1 b.1; sleep 10"
-    [[b,c,d]]
-        script = true
diff --git a/tests/events/00-suite.t b/tests/broadcast/08-space.t
old mode 100644
new mode 100755
similarity index 76%
copy from tests/events/00-suite.t
copy to tests/broadcast/08-space.t
index d9dcf9b..56558ff
--- a/tests/events/00-suite.t
+++ b/tests/broadcast/08-space.t
@@ -15,16 +15,14 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Test broadcast -s '[foo]  bar=baz' syntax. cylc/cylc#1680
 . "$(dirname "$0")/test_header"
 set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}" \
+    cylc run --debug --reference-test "${SUITE_NAME}"
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/broadcast/08-space/reference.log b/tests/broadcast/08-space/reference.log
new file mode 100644
index 0000000..2ded416
--- /dev/null
+++ b/tests/broadcast/08-space/reference.log
@@ -0,0 +1,6 @@
+2015-05-01T07:41:43Z INFO - Run mode: live
+2015-05-01T07:41:43Z INFO - Initial point: 20200202T0000Z
+2015-05-01T07:41:43Z INFO - Final point: 20200202T0000Z
+2015-05-01T07:41:43Z INFO - Cold Start 20200202T0000Z
+2015-05-01T07:41:43Z INFO - [broadcast.20200202T0000Z] -triggered off []
+2015-05-01T07:41:43Z INFO - [test-env.20200202T0000Z] -triggered off ['broadcast.20200202T0000Z']
diff --git a/tests/broadcast/08-space/suite.rc b/tests/broadcast/08-space/suite.rc
new file mode 100644
index 0000000..028b210
--- /dev/null
+++ b/tests/broadcast/08-space/suite.rc
@@ -0,0 +1,25 @@
+title=broadcast section-space-key
+description=Test broadcast set section-space-key syntax
+[cylc]
+    abort if any task fails = True
+    UTC mode = True
+    [[event hooks]]
+        abort on timeout = True
+        timeout=PT1M
+[scheduling]
+    initial cycle point = 20200202
+    final cycle point = 20200202
+    [[dependencies]]
+        [[[P1M]]]
+            graph = "broadcast => test-env"
+[runtime]
+    [[broadcast]]
+        script="""
+cylc broadcast -s '[environment] FOO=${FOO:-foo}' -n 'test-env' "${CYLC_SUITE_NAME}"
+"""
+    [[test-env]]
+        script="""
+test "${FOO}" = 'foo'
+"""
+        [[[environment]]]
+            FOO=bar
diff --git a/tests/events/00-suite.t b/tests/clock-expire/00-basic.t
similarity index 76%
copy from tests/events/00-suite.t
copy to tests/clock-expire/00-basic.t
index d9dcf9b..9ad4f37 100644
--- a/tests/events/00-suite.t
+++ b/tests/clock-expire/00-basic.t
@@ -15,16 +15,15 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Validate and run the clock-expire test suite
 . "$(dirname "$0")/test_header"
+
 set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" 
 
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+suite_run_ok "${TEST_NAME_BASE}-run" cylc run --debug "${SUITE_NAME}"
+
+purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/clock-expire/00-basic/suite.rc b/tests/clock-expire/00-basic/suite.rc
new file mode 100644
index 0000000..55b6b7f
--- /dev/null
+++ b/tests/clock-expire/00-basic/suite.rc
@@ -0,0 +1,33 @@
+title = task expire example suite
+description = """
+Skip a daily post-processing workflow if the 'copy' task has expired."""
+
+[cylc]
+    cycle point format = %Y-%m-%dT%H
+    abort if any task fails = True
+    [[event hooks]]
+        abort on timeout = True
+        timeout = PT1M
+[scheduling]
+    initial cycle point = now
+    final cycle point = +P3D
+    [[special tasks]]
+        clock-expire = copy(-P1DT1H)
+        # NOTE this would normally be copy(P1D) i.e. expire if more than 1 day
+        # behind the wall clock, but here we have to start from 'now' in order
+        # to stay near the wall clock, so expire the task if more than 1 day
+        # behind "now + 1 day". This makes the first two 'copy' tasks expire.
+    [[dependencies]]
+        [[[P1D]]]
+            graph = """
+        model[-P1D] => model => copy => proc
+              copy:expired => !proc"""
+[runtime]
+    [[root]]
+        script = /bin/true
+    [[copy]]
+        script = """
+# Abort if I run in either of the first two cycle points.
+[[ $CYLC_TASK_CYCLE_POINT == $CYLC_SUITE_INITIAL_CYCLE_POINT ]] && exit 1
+P2D=$(cylc cyclepoint --offset=P1D $CYLC_SUITE_INITIAL_CYCLE_POINT)
+[[ $CYLC_TASK_CYCLE_POINT == $P2D ]] && exit 1"""
diff --git a/tests/pyc/test_header b/tests/clock-expire/test_header
similarity index 100%
rename from tests/pyc/test_header
rename to tests/clock-expire/test_header
diff --git a/tests/cyclers/00-daily.t b/tests/cyclers/00-daily.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/23-multidaily_local.t b/tests/cyclers/23-multidaily_local.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/24-360_calendar.t b/tests/cyclers/24-360_calendar.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/25-no_initial_cycle_point.t b/tests/cyclers/25-no_initial_cycle_point.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/26-no_final_cycle_point.t b/tests/cyclers/26-no_final_cycle_point.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/27-no_initial_but_final_cycle_point.t b/tests/cyclers/27-no_initial_but_final_cycle_point.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/29-0000_rollunder.t b/tests/cyclers/29-0000_rollunder.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/30-9999_rollover.t b/tests/cyclers/30-9999_rollover.t
old mode 100644
new mode 100755
index 8ea9409..9b81aa9
--- a/tests/cyclers/30-9999_rollover.t
+++ b/tests/cyclers/30-9999_rollover.t
@@ -26,7 +26,7 @@ TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_fail $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+suite_run_fail $TEST_NAME cylc run --debug $SUITE_NAME
 grep_ok "Cannot dump TimePoint year: 10000 not in bounds 0 to 9999" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/cyclers/34-implicit-back-compat.t b/tests/cyclers/34-implicit-back-compat.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/35-implicit-disallowed.t b/tests/cyclers/35-implicit-disallowed.t
old mode 100644
new mode 100755
diff --git a/tests/cyclers/22-integer1.t b/tests/cyclers/40-integer1.t
similarity index 100%
rename from tests/cyclers/22-integer1.t
rename to tests/cyclers/40-integer1.t
diff --git a/tests/cyclers/25-r1_initial_immortal.t b/tests/cyclers/41-r1_initial_immortal.t
similarity index 100%
rename from tests/cyclers/25-r1_initial_immortal.t
rename to tests/cyclers/41-r1_initial_immortal.t
diff --git a/tests/cyclers/28-back_comp_start_up_two_step.t b/tests/cyclers/42-back_comp_start_up_two_step.t
similarity index 100%
rename from tests/cyclers/28-back_comp_start_up_two_step.t
rename to tests/cyclers/42-back_comp_start_up_two_step.t
diff --git a/tests/cyclers/29-back_comp_start_up_simple.t b/tests/cyclers/43-back_comp_start_up_simple.t
similarity index 100%
rename from tests/cyclers/29-back_comp_start_up_simple.t
rename to tests/cyclers/43-back_comp_start_up_simple.t
diff --git a/tests/cyclers/9999_rollover/reference.log b/tests/cyclers/9999_rollover/reference.log
deleted file mode 100644
index 246a744..0000000
--- a/tests/cyclers/9999_rollover/reference.log
+++ /dev/null
@@ -1,8 +0,0 @@
-2014-08-26T16:02:56Z INFO - port:7766
-2014-08-26T16:02:56Z INFO - Suite starting at 2014-08-26T16:02:56Z
-2014-08-26T16:02:56Z INFO - Log event clock: real time
-2014-08-26T16:02:56Z INFO - Run mode: live
-2014-08-26T16:02:56Z INFO - Initial point: 99991231T2300Z
-2014-08-26T16:02:56Z INFO - Final point: None
-2014-08-26T16:02:56Z INFO - Cold Start 99991231T2300Z
-2014-08-26T16:02:56Z INFO - Suite shutting down at 2014-08-26T16:02:56Z (ERROR: Cannot dump TimePoint year: 10000 not in bounds 0 to 9999.)
diff --git a/tests/cyclers/9999_rollover/suite.rc b/tests/cyclers/9999_rollover/suite.rc
index 9d830df..84c95fb 100644
--- a/tests/cyclers/9999_rollover/suite.rc
+++ b/tests/cyclers/9999_rollover/suite.rc
@@ -1,12 +1,17 @@
+# A suite that tries to run beyond year 9999 without using extended year digits.
+# [visualization]number of cycle points = 1" keeps it under the limit during
+# validation, but R3/PT1H puts it over the limit at run time.
+
 [cylc]
     UTC mode = True
 [scheduling]
-    initial cycle point = 99991231T2300
+    initial cycle point = 99991231T2200
     [[dependencies]]
-        [[[ R2//PT1H ]]]
+        [[[ R3//PT1H ]]]
             graph = "foo"
 [runtime]
     [[root]]
         script = true
 [visualization]
-    initial cycle point = 99991231T2300
+    initial cycle point = 99991231T2200
+    number of cycle points = 1
diff --git a/tests/cyclers/integer1/suite.rc b/tests/cyclers/integer1/suite.rc
index 539d4f0..a868c3a 100644
--- a/tests/cyclers/integer1/suite.rc
+++ b/tests/cyclers/integer1/suite.rc
@@ -43,7 +43,7 @@ sleep 5
 touch typing
                             """
         [[[outputs]]]
-            out1 = "the cheese is ready for [T+3]"
+            out1 = "the cheese is ready for [+P3]"
 
 [visualization]
     default node attributes = "style=filled"
diff --git a/tests/cyclers/r1_at_icp_or/reference.log b/tests/cyclers/r1_at_icp_or/reference.log
index f73ce76..05dfc4c 100644
--- a/tests/cyclers/r1_at_icp_or/reference.log
+++ b/tests/cyclers/r1_at_icp_or/reference.log
@@ -18,7 +18,7 @@
 2015-02-20T10:03:02Z INFO - [foo.20130808T0000Z] -(current:running)> foo.20130808T0000Z succeeded at 2015-02-20T10:03:02Z
 2015-02-20T10:03:02Z INFO - [bar.20130808T1200Z] -(current:running)> bar.20130808T1200Z succeeded at 2015-02-20T10:03:02Z
 2015-02-20T10:03:03Z INFO - [baz.20130808T1200Z] -initiate job-submit
-2015-02-20T10:03:03Z INFO - [baz.20130808T1200Z] -triggered off ['bar.20130808T1200Z', 'foo.20130808T0000Z']
+2015-02-20T10:03:03Z INFO - [baz.20130808T1200Z] -triggered off ['foo.20130808T0000Z']
 
 2015-02-20T10:03:04Z INFO - [baz.20130808T1200Z] -submit_method_id=5750
 2015-02-20T10:03:04Z INFO - [baz.20130808T1200Z] -submission succeeded
diff --git a/tests/cyclers/r1_at_icp_or/suite.rc b/tests/cyclers/r1_at_icp_or/suite.rc
index 57159ff..f51c65d 100644
--- a/tests/cyclers/r1_at_icp_or/suite.rc
+++ b/tests/cyclers/r1_at_icp_or/suite.rc
@@ -13,6 +13,8 @@
 [runtime]
     [[root]]
         script = true
+    [[bar]]
+        script = sleep 5
 [visualization]
     initial cycle point = 20130808T00
     final cycle point = 20130809T18
diff --git a/tests/cyclers/r1_initial_immortal/suite.rc b/tests/cyclers/r1_initial_immortal/suite.rc
index 4b7d302..34b78a1 100644
--- a/tests/cyclers/r1_initial_immortal/suite.rc
+++ b/tests/cyclers/r1_initial_immortal/suite.rc
@@ -1,5 +1,7 @@
 [cylc]
     UTC mode = True
+   [[reference test]]
+      live mode suite timeout = PT2M
 [scheduling]
     initial cycle point = 20140101
     [[dependencies]]
diff --git a/tests/cylc-5to6/00-simple-start-up.t b/tests/cylc-5to6/00-simple-start-up.t
index c8d5ed6..1cb27f5 100755
--- a/tests/cylc-5to6/00-simple-start-up.t
+++ b/tests/cylc-5to6/00-simple-start-up.t
@@ -105,5 +105,5 @@ title = Simple start-up suite.
 __OUT__
 cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
-#purge_suite $SUITE_NAME
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/cylc-5to6/01-single-cycler.t b/tests/cylc-5to6/01-single-cycler.t
index 169e0bf..8e49dcd 100755
--- a/tests/cylc-5to6/01-single-cycler.t
+++ b/tests/cylc-5to6/01-single-cycler.t
@@ -38,5 +38,5 @@ cmp_ok "$TEST_NAME.stdout" <<'__OUT__'
 __OUT__
 cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
-#purge_suite $SUITE_NAME
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/cylc-cat-log/00-local.t b/tests/cylc-cat-log/00-local.t
index 6c8b3b6..4713177 100755
--- a/tests/cylc-cat-log/00-local.t
+++ b/tests/cylc-cat-log/00-local.t
@@ -24,11 +24,11 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
 # Run detached so we get suite out and err logs.
-suite_run_ok $TEST_NAME cylc run $SUITE_NAME
+suite_run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}"
+sleep 5
 # Wait for the suite to finish.
-cylc stop --max-polls=10 --interval=2 $SUITE_NAME 
+cylc stop --max-polls=10 --interval=2 $SUITE_NAME 2>'/dev/null'
 #-------------------------------------------------------------------------------
 TEST_NAME=${TEST_NAME_BASE}-suite-log-log
 cylc cat-log $SUITE_NAME >$TEST_NAME.out
@@ -70,7 +70,7 @@ grep_ok "CYLC_BATCH_SYS_NAME=background" $TEST_NAME.out
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-task-activity
 cylc cat-log -a $SUITE_NAME a-task.1 >$TEST_NAME.out
-grep_ok "SUBMIT-OUT" $TEST_NAME.out
+grep_ok '\[job-submit ret_code\] 0' $TEST_NAME.out
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-task-custom
 cylc cat-log -c 'job.custom-log' $SUITE_NAME a-task.1 >$TEST_NAME.out
diff --git a/tests/cylc-cat-log/01-remote.t b/tests/cylc-cat-log/01-remote.t
index 774dcca..86df81f 100755
--- a/tests/cylc-cat-log/01-remote.t
+++ b/tests/cylc-cat-log/01-remote.t
@@ -18,11 +18,13 @@
 # Test "cylc cat-log" for remote tasks.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-export CYLC_TEST_HOST=$(cylc get-global-config -i '[test battery]remote host')
+RC_ITEM='[test battery]remote host'
+export CYLC_TEST_HOST=$(cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
 if [[ -z $CYLC_TEST_HOST ]]; then
-    skip_all '[test battery]remote host: not defined'
+    skip_all '"[test battery]remote host": not defined'
 fi
 set_test_number 14
+export CYLC_CONF_PATH=
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 # Install suite passphrase.
@@ -67,7 +69,7 @@ grep_ok "CYLC_BATCH_SYS_NAME=background" $TEST_NAME.out
 # local
 TEST_NAME=$TEST_NAME_BASE-task-activity
 cylc cat-log -a $SUITE_NAME a-task.1 >$TEST_NAME.out
-grep_ok "SUBMIT-OUT" $TEST_NAME.out
+grep_ok '\[job-submit ret_code\] 0' $TEST_NAME.out
 #-------------------------------------------------------------------------------
 # remote
 TEST_NAME=$TEST_NAME_BASE-task-custom
diff --git a/tests/cylc-cat-log/01-remote/suite.rc b/tests/cylc-cat-log/01-remote/suite.rc
index e1499af..cb97b72 100644
--- a/tests/cylc-cat-log/01-remote/suite.rc
+++ b/tests/cylc-cat-log/01-remote/suite.rc
@@ -2,7 +2,7 @@
 [cylc]
    [[event hooks]]
        abort on timeout = True
-       timeout = PT20S
+       timeout = PT1M
 [scheduling]
     [[dependencies]]
         graph = a-task
diff --git a/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs.t b/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs.t
new file mode 100755
index 0000000..c7dde2a
--- /dev/null
+++ b/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test "cylc cat-log" for viewing PBS runtime STDOUT/STDERR by a custom command
+. "$(dirname "$0")/test_header"
+
+RC_PREF='[test battery][batch systems][pbs]'
+export CYLC_TEST_HOST="$( \
+    cylc get-global-config -i "${RC_PREF}host" 2>'/dev/null')"
+if [[ -z "${CYLC_TEST_HOST}" ]]; then
+    skip_all '"[test battery][batch systems][pbs]host": not defined'
+fi
+ERR_VIEWER="$(cylc get-global-config -i "${RC_PREF}err viewer" 2>'/dev/null')"
+OUT_VIEWER="$(cylc get-global-config -i "${RC_PREF}out viewer" 2>'/dev/null')"
+if [[ -z "${ERR_VIEWER}" || -z "${OUT_VIEWER}" ]]; then
+    skip_all '"[test battery][pbs]* viewer": not defined'
+fi
+export CYLC_TEST_DIRECTIVES="$( \
+    cylc get-global-config -i "${RC_PREF}[directives]" 2>'/dev/null')"
+set_test_number 2
+
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${CYLC_TEST_HOST}" \
+    "mkdir -p .cylc/${SUITE_NAME}/ && cat >.cylc/${SUITE_NAME}/passphrase" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+mkdir 'conf'
+cat >'conf/global.rc' <<__GLOBAL_RC__
+[hosts]
+    [[${CYLC_TEST_HOST}]]
+        [[[batch systems]]]
+            [[[[pbs]]]]
+                err viewer = ${ERR_VIEWER}
+                out viewer = ${OUT_VIEWER}
+__GLOBAL_RC__
+export CYLC_CONF_PATH="${PWD}/conf"
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}" \
+    cylc run --debug --reference-test "${SUITE_NAME}"
+
+${SSH} -n "${CYLC_TEST_HOST}" "rm -rf .cylc/${SUITE_NAME} cylc-run/${SUITE_NAME}"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/reference.log b/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/reference.log
new file mode 100644
index 0000000..7572caf
--- /dev/null
+++ b/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/reference.log
@@ -0,0 +1,6 @@
+2015-06-19T14:47:30+01 INFO - Run mode: live
+2015-06-19T14:47:30+01 INFO - Initial point: 1
+2015-06-19T14:47:30+01 INFO - Final point: 1
+2015-06-19T14:47:30+01 INFO - Cold Start 1
+2015-06-19T14:47:30+01 INFO - [a-task.1] -triggered off []
+2015-06-19T14:47:30+01 INFO - [b-task.1] -triggered off ['a-task.1']
diff --git a/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/suite.rc b/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/suite.rc
new file mode 100644
index 0000000..d50ae80
--- /dev/null
+++ b/tests/cylc-cat-log/02-remote-custom-runtime-viewer-pbs/suite.rc
@@ -0,0 +1,33 @@
+#!Jinja2
+[cylc]
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT3M
+[scheduling]
+    [[dependencies]]
+        graph = a-task:echo => b-task
+[runtime]
+    [[a-task]]
+        script = """
+echo rubbish
+echo garbage >&2
+cylc message '1 echo done'
+sleep 60
+"""
+        [[[remote]]]
+            host={{environ["CYLC_TEST_HOST"]}}
+        [[[job submission]]]
+            method=pbs
+        [[[directives]]]
+{% if "CYLC_TEST_DIRECTIVES" in environ and environ["CYLC_TEST_DIRECTIVES"] %}
+            {{environ["CYLC_TEST_DIRECTIVES"]}}
+{% endif %}
+        [[[outputs]]]
+            echo = "[] echo done"
+
+    [[b-task]]
+        script = """
+sleep 10  # wait for buffer to flush?
+cylc cat-log --debug -o "${CYLC_SUITE_NAME}" 'a-task.1' | grep 'rubbish'
+cylc cat-log --debug -e "${CYLC_SUITE_NAME}" 'a-task.1' | grep 'garbage'
+"""
diff --git a/tests/events/00-suite.t b/tests/cylc-cat-log/03-bad-suite.t
old mode 100644
new mode 100755
similarity index 58%
copy from tests/events/00-suite.t
copy to tests/cylc-cat-log/03-bad-suite.t
index d9dcf9b..2345688
--- a/tests/events/00-suite.t
+++ b/tests/cylc-cat-log/03-bad-suite.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,16 +15,21 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Test "cylc cat-log" with bad suite name.
 . "$(dirname "$0")/test_header"
-set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
+set_test_number 4
+
+CYLC_RUN_DIR="$(cylc get-global-config --print-run-dir)"
+BAD_NAME="$(basename "$(mktemp -u "${CYLC_RUN_DIR}/XXXXXXXX")")"
+
+run_fail "${TEST_NAME_BASE}-suite" cylc cat-log "${BAD_NAME}"
+cmp_ok "${TEST_NAME_BASE}-suite.stderr" <<__ERR__
+cat: ${CYLC_RUN_DIR}/${BAD_NAME}/log/suite/log: No such file or directory
+__ERR__
 
-run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+run_fail "${TEST_NAME_BASE}-suite" cylc cat-log "${BAD_NAME}" "garbage.1"
+cmp_ok "${TEST_NAME_BASE}-suite.stderr" <<__ERR__
+cat: ${CYLC_RUN_DIR}/${BAD_NAME}/log/job/1/garbage/NN/job: No such file or directory
+__ERR__
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
 exit
diff --git a/tests/remote/00-basic.t b/tests/cylc-cat-log/04-local-tail.t
old mode 100644
new mode 100755
similarity index 69%
copy from tests/remote/00-basic.t
copy to tests/cylc-cat-log/04-local-tail.t
index 394d435..aa4606d
--- a/tests/remote/00-basic.t
+++ b/tests/cylc-cat-log/04-local-tail.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,30 +15,32 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test remote host settings.
+# Test "cylc cat-log --tail" with a custom local tail command.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 4
-#-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE basic
+install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-userathost
-SUITE_RUN_DIR=$(cylc get-global-config --print-run-dir)/$SUITE_NAME
-echo $CYLC_TEST_TASK_OWNER@$CYLC_TEST_TASK_HOST > userathost
-cmp_ok userathost - <<__OUT__
-$(sqlite3 $SUITE_RUN_DIR/cylc-suite.db "select host from task_states where name='foo'")
-__OUT__
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-hostonly
-echo $CYLC_TEST_TASK_HOST > hostonly
-cmp_ok hostonly - <<__OUT__
-$(sqlite3 $SUITE_RUN_DIR/cylc-suite.db "select host from task_states where name='bar'")
-__OUT__
+# Run detached.
+suite_run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}"
+#-------------------------------------------------------------------------------
+mkdir 'conf'
+export CYLC_CONF_PATH="${PWD}/conf"
+cat > "$PWD/conf/global.rc" <<__GLOBAL_RC__
+[hosts]
+   [[localhost]]
+        local tail command template = $PWD/bin/my-tailer.sh %(filename)s
+__GLOBAL_RC__
+#-------------------------------------------------------------------------------
+sleep 10
+TEST_NAME=$TEST_NAME_BASE-cat-log
+cylc cat-log $SUITE_NAME -o --tail foo.1 > ${TEST_NAME}.out
+grep_ok "HELLO from foo 1" ${TEST_NAME}.out
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-stop
+run_ok $TEST_NAME cylc stop --kill --max-polls=10 --interval=1 $SUITE_NAME
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/cylc-cat-log/04-local-tail/bin/my-tailer.sh b/tests/cylc-cat-log/04-local-tail/bin/my-tailer.sh
new file mode 100755
index 0000000..029734c
--- /dev/null
+++ b/tests/cylc-cat-log/04-local-tail/bin/my-tailer.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# Modify 'tail' output to prove that cylc used the custom tailer.
+# Exit immediately, for the test (i.e. don't 'tail -F')
+FILE=$1
+tail -n +1 $FILE | awk '{print "HELLO", $0; fflush() }'
diff --git a/tests/cylc-cat-log/04-local-tail/suite.rc b/tests/cylc-cat-log/04-local-tail/suite.rc
new file mode 100644
index 0000000..43aec37
--- /dev/null
+++ b/tests/cylc-cat-log/04-local-tail/suite.rc
@@ -0,0 +1,14 @@
+[cylc]
+   [[event hooks]]
+       abort on timeout = True
+       timeout = PT2M
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+        script = """
+for I in $(seq 1 100); do
+    echo "from $CYLC_TASK_NAME $I"
+    sleep 1
+done"""
diff --git a/tests/job-kill/01-remote.t b/tests/cylc-cat-log/05-remote-tail.t
similarity index 56%
copy from tests/job-kill/01-remote.t
copy to tests/cylc-cat-log/05-remote-tail.t
index 1cc953d..a70a280 100755
--- a/tests/job-kill/01-remote.t
+++ b/tests/cylc-cat-log/05-remote-tail.t
@@ -15,34 +15,48 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test killing of jobs on a remote host.
+# Test "cylc cat-log --tail" with a custom remote tail command.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-export CYLC_TEST_HOST=$(cylc get-global-config -i '[test battery]remote host')
-if [[ -z $CYLC_TEST_HOST ]]; then
-    skip_all '[test battery]remote host: not defined'
+CYLC_TEST_HOST="$( \
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')"
+if [[ -z "${CYLC_TEST_HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
 fi
-N_TESTS=3
-set_test_number $N_TESTS
+export CYLC_TEST_HOST
+set_test_number 4
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 set -eu
 SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+SCP='scp -oBatchMode=yes -oConnectTimeout=5'
 $SSH $CYLC_TEST_HOST \
-    "mkdir -p .cylc/$SUITE_NAME/ && cat >.cylc/$SUITE_NAME/passphrase" \
+    "mkdir -p .cylc/$SUITE_NAME/bin && cat >.cylc/$SUITE_NAME/passphrase" \
     <$TEST_DIR/$SUITE_NAME/passphrase
-set +eu
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+# Run detached.
+suite_run_ok "${TEST_NAME_BASE}-run" cylc run "${SUITE_NAME}"
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-ps
-run_fail $TEST_NAME \
-    $SSH $CYLC_TEST_HOST "ps \$(cat cylc-run/$SUITE_NAME/work/*/t*/file)"
+mkdir 'conf'
+export CYLC_CONF_PATH="${PWD}/conf"
+cat > "$PWD/conf/global.rc" <<__GLOBAL_RC__
+[hosts]
+   [[$CYLC_TEST_HOST]]
+        remote tail command template = \$HOME/.cylc/$SUITE_NAME/bin/my-tailer.sh %(filename)s
+__GLOBAL_RC__
+$SCP $PWD/bin/my-tailer.sh ${CYLC_TEST_HOST}:.cylc/$SUITE_NAME/bin/my-tailer.sh
+#-------------------------------------------------------------------------------
+sleep 10
+TEST_NAME=$TEST_NAME_BASE-cat-log
+cylc cat-log $SUITE_NAME -o --tail foo.1 > ${TEST_NAME}.out
+grep_ok "HELLO from foo 1" ${TEST_NAME}.out
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-stop
+run_ok $TEST_NAME cylc stop --kill --max-polls=10 --interval=1 $SUITE_NAME
 #-------------------------------------------------------------------------------
 $SSH $CYLC_TEST_HOST \
     "rm -rf .cylc/$SUITE_NAME cylc-run/$SUITE_NAME"
 purge_suite $SUITE_NAME
-exit
+
diff --git a/tests/cylc-cat-log/05-remote-tail/bin/my-tailer.sh b/tests/cylc-cat-log/05-remote-tail/bin/my-tailer.sh
new file mode 100755
index 0000000..029734c
--- /dev/null
+++ b/tests/cylc-cat-log/05-remote-tail/bin/my-tailer.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+# Modify 'tail' output to prove that cylc used the custom tailer.
+# Exit immediately, for the test (i.e. don't 'tail -F')
+FILE=$1
+tail -n +1 $FILE | awk '{print "HELLO", $0; fflush() }'
diff --git a/tests/cylc-cat-log/05-remote-tail/suite.rc b/tests/cylc-cat-log/05-remote-tail/suite.rc
new file mode 100644
index 0000000..638a246
--- /dev/null
+++ b/tests/cylc-cat-log/05-remote-tail/suite.rc
@@ -0,0 +1,17 @@
+#!Jinja2
+[cylc]
+   [[event hooks]]
+       abort on timeout = True
+       timeout = PT2M
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+        script = """
+for I in $(seq 1 100); do
+    echo "from $CYLC_TASK_NAME $I"
+    sleep 1
+done"""
+        [[[remote]]]
+            host={{environ['CYLC_TEST_HOST']}}
diff --git a/tests/cylc-get-config/00-simple/section1.stdout b/tests/cylc-get-config/00-simple/section1.stdout
index 1acf366..2bd270d 100644
--- a/tests/cylc-get-config/00-simple/section1.stdout
+++ b/tests/cylc-get-config/00-simple/section1.stdout
@@ -1,10 +1,11 @@
-cycling mode = integer
-initial cycle point constraints = 
+runahead limit = 
+hold after point = 
 max active cycle points = 3
 final cycle point constraints = 
+cycling mode = integer
 initial cycle point = 1
+initial cycle point constraints = 
 final cycle point = 1
-runahead limit = 
 [[dependencies]]
    graph = OPS:finish-all => VAR
 [[queues]]
@@ -12,9 +13,11 @@ runahead limit =
       limit = 0
       members = ops_s1, ops_s2, ops_p1, ops_p2, var_p1, var_p2, var_s1, var_s2
 [[special tasks]]
+   clock-trigger = 
+   external-trigger = 
+   sequential = 
+   clock-expire = 
    include at start-up = 
    start-up = 
-   cold-start = 
-   sequential = 
-   clock-triggered = 
    exclude at start-up = 
+   cold-start = 
diff --git a/tests/cylc-get-config/00-simple/section2.stdout b/tests/cylc-get-config/00-simple/section2.stdout
index a49443c..862c89e 100644
--- a/tests/cylc-get-config/00-simple/section2.stdout
+++ b/tests/cylc-get-config/00-simple/section2.stdout
@@ -20,15 +20,29 @@
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[environment]]]
    [[[directives]]]
       job_type = parallel
@@ -53,11 +67,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -85,15 +103,29 @@
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[environment]]]
    [[[directives]]]
    [[[environment filter]]]
@@ -117,11 +149,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -149,15 +185,29 @@
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[environment]]]
    [[[directives]]]
       job_type = serial
@@ -182,11 +232,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -214,15 +268,29 @@
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[environment]]]
    [[[directives]]]
       job_type = serial
@@ -247,11 +315,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -279,15 +351,29 @@
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[environment]]]
    [[[directives]]]
       job_type = parallel
@@ -312,11 +398,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -339,17 +429,31 @@
    pre-script = 
    post-script = 
    inherit = OPS, PARALLEL
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[event hooks]]]
       submission timeout handler = 
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
@@ -377,11 +481,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -404,17 +512,31 @@
    pre-script = 
    post-script = 
    inherit = VAR, PARALLEL
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[event hooks]]]
       submission timeout handler = 
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
@@ -442,11 +564,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -469,17 +595,31 @@
    pre-script = 
    post-script = 
    inherit = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[event hooks]]]
       submission timeout handler = 
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
@@ -506,11 +646,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -533,17 +677,31 @@
    pre-script = 
    post-script = 
    inherit = VAR, SERIAL
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[event hooks]]]
       submission timeout handler = 
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
@@ -571,11 +729,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -598,17 +760,31 @@
    pre-script = 
    post-script = 
    inherit = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[event hooks]]]
       submission timeout handler = 
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
@@ -636,11 +812,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -663,17 +843,31 @@
    pre-script = 
    post-script = 
    inherit = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[event hooks]]]
       submission timeout handler = 
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
@@ -700,11 +894,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -727,17 +925,31 @@
    pre-script = 
    post-script = 
    inherit = 
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[event hooks]]]
       submission timeout handler = 
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
@@ -765,11 +977,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
@@ -792,17 +1008,31 @@
    pre-script = 
    post-script = 
    inherit = VAR, SERIAL
+   [[[events]]]
+      handler events = 
+      mail smtp = 
+      handlers = 
+      mail events = 
+      mail retry delays = 
+      handler retry delays = 
+      mail to = 
+      mail from = 
+      execution timeout = 
+      submission timeout = 
+      register job logs retry delays = 
+      reset timer = 
    [[[event hooks]]]
       submission timeout handler = 
       submitted handler = 
       started handler = 
       execution timeout handler = 
+      expired handler = 
       submission failed handler = 
       submission retry handler = 
       warning handler = 
       succeeded handler = 
       retry handler = 
-      reset timer = False
+      reset timer = 
       execution timeout = 
       failed handler = 
       submission timeout = 
@@ -830,11 +1060,15 @@
       max-polls = 
       run-dir = 
       user = 
+      template = 
       verbose mode = 
    [[[remote]]]
       owner = 
       suite definition directory = 
       host = 
+      retrieve job logs = 
+      retrieve job logs max size = 
+      retrieve job logs retry delays = 
    [[[job submission]]]
       shell = /bin/bash
       command template = 
diff --git a/tests/cylc-graph-diff/00-simple.t b/tests/cylc-graph-diff/00-simple.t
index 980208c..8f2f308 100644
--- a/tests/cylc-graph-diff/00-simple.t
+++ b/tests/cylc-graph-diff/00-simple.t
@@ -40,7 +40,7 @@ TEST_NAME=$TEST_NAME_BASE-bad-suites-number-1
 run_fail $TEST_NAME cylc graph-diff "$DIFF_SUITE_NAME"
 cmp_ok "$TEST_NAME.stdout" </dev/null
 cmp_ok "$TEST_NAME.stderr" <<'__ERR__'
-USAGE: cylc graph-diff [OPTIONS] SUITE1 SUITE2 -- [GRAPH_OPTIONS_ARGS]
+Usage: cylc graph-diff [OPTIONS] SUITE1 SUITE2 -- [GRAPH_OPTIONS_ARGS]
 
 Difference 'cylc graph --reference' output for SUITE1 and SUITE2.
 
@@ -56,7 +56,7 @@ run_fail $TEST_NAME cylc graph-diff "$DIFF_SUITE_NAME" "$CONTROL_SUITE_NAME" \
     "$SAME_SUITE_NAME"
 cmp_ok "$TEST_NAME.stdout" </dev/null
 cmp_ok "$TEST_NAME.stderr" <<'__ERR__'
-USAGE: cylc graph-diff [OPTIONS] SUITE1 SUITE2 -- [GRAPH_OPTIONS_ARGS]
+Usage: cylc graph-diff [OPTIONS] SUITE1 SUITE2 -- [GRAPH_OPTIONS_ARGS]
 
 Difference 'cylc graph --reference' output for SUITE1 and SUITE2.
 
diff --git a/tests/cylc-insert/01-insert-bad-cycle-point.t b/tests/cylc-insert/01-insert-bad-cycle-point.t
index ed9dbae..60a9f39 100644
--- a/tests/cylc-insert/01-insert-bad-cycle-point.t
+++ b/tests/cylc-insert/01-insert-bad-cycle-point.t
@@ -27,6 +27,6 @@ run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
 suite_run_fail $TEST_NAME cylc run -v -v --reference-test --debug $SUITE_NAME
-grep_ok "teatime: invalid cycle point for inserted task" $TEST_NAME.stderr
+grep_ok "Invalid cycle point" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/cylc-insert/02-insert-bad-stop-cycle-point.t b/tests/cylc-insert/02-insert-bad-stop-cycle-point.t
index 14a73e8..6b52610 100644
--- a/tests/cylc-insert/02-insert-bad-stop-cycle-point.t
+++ b/tests/cylc-insert/02-insert-bad-stop-cycle-point.t
@@ -27,6 +27,6 @@ run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
 suite_run_fail $TEST_NAME cylc run -v -v --reference-test --debug $SUITE_NAME
-grep_ok "soon: invalid stop cycle point for inserted task" $TEST_NAME.stderr
+grep_ok "Invalid cycle point" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/broadcast/01-dependencies.t b/tests/cylc-insert/04-insert-family.t
similarity index 87%
rename from tests/broadcast/01-dependencies.t
rename to tests/cylc-insert/04-insert-family.t
index 63d8b12..e27155e 100644
--- a/tests/broadcast/01-dependencies.t
+++ b/tests/cylc-insert/04-insert-family.t
@@ -15,17 +15,17 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test broadcasts
+# Test "cylc insert" on insertion of a task family (and family remove too).
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
 #-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE $TEST_NAME_BASE
+install_suite $TEST_NAME_BASE insert-family
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+suite_run_ok $TEST_NAME cylc run -v --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/cylc-insert/insert-family/reference.log b/tests/cylc-insert/insert-family/reference.log
new file mode 100644
index 0000000..1279b05
--- /dev/null
+++ b/tests/cylc-insert/insert-family/reference.log
@@ -0,0 +1,63 @@
+2015-07-17T14:57:39+12 INFO - port:7766
+2015-07-17T14:57:39+12 INFO - Suite starting at 2015-07-17T14:57:39+12
+2015-07-17T14:57:39+12 INFO - Run mode: live
+2015-07-17T14:57:39+12 INFO - Initial point: 1
+2015-07-17T14:57:39+12 INFO - Final point: 2
+2015-07-17T14:57:39+12 INFO - Cold Start 1
+2015-07-17T14:57:39+12 INFO - [remover.1] -job(01) initiate job-submit
+2015-07-17T14:57:39+12 INFO - [remover.1] -triggered off []
+2015-07-17T14:57:39+12 INFO - Client: remove_task (cylc-remove oliverh at oliverh-34403DL.niwa.local 7a3be907-3ab8-441b-b053-a936ee65e941)
+2015-07-17T14:57:40+12 INFO - [remover.1] -submit_method_id=31752
+2015-07-17T14:57:40+12 INFO - [remover.1] -submission succeeded
+2015-07-17T14:57:40+12 INFO - [remover.1] -(current:submitted)> remover.1 started at 2015-07-17T14:57:39+12
+2015-07-17T14:57:40+12 INFO - [remover.1] -(current:running)> remover.1 succeeded at 2015-07-17T14:57:40+12
+2015-07-17T14:57:40+12 INFO - Command succeeded: remove_task(FAM-.,1,True,False)
+2015-07-17T14:57:41+12 INFO - [inserter.1] -job(01) initiate job-submit
+2015-07-17T14:57:41+12 INFO - [inserter.1] -triggered off ['remover.1']
+2015-07-17T14:57:42+12 INFO - Client: insert_task (cylc-insert oliverh at oliverh-34403DL.niwa.local ae74aa6d-c344-4792-9ca5-1eee2896ecdb)
+2015-07-17T14:57:42+12 INFO - [inserter.1] -submit_method_id=31832
+2015-07-17T14:57:42+12 INFO - [inserter.1] -submission succeeded
+2015-07-17T14:57:42+12 INFO - [inserter.1] -(current:submitted)> inserter.1 started at 2015-07-17T14:57:41+12
+2015-07-17T14:57:42+12 INFO - [inserter.1] -(current:running)> inserter.1 succeeded at 2015-07-17T14:57:42+12
+2015-07-17T14:57:42+12 INFO - Command succeeded: insert_task(FAM-A,1,True,None)
+2015-07-17T14:57:43+12 INFO - [a2.1] -job(01) initiate job-submit
+2015-07-17T14:57:43+12 INFO - [a1.1] -job(01) initiate job-submit
+2015-07-17T14:57:43+12 INFO - [a2.1] -triggered off ['inserter.1']
+2015-07-17T14:57:43+12 INFO - [a1.1] -triggered off ['inserter.1']
+2015-07-17T14:57:46+12 INFO - [a2.1] -submit_method_id=31931
+2015-07-17T14:57:46+12 INFO - [a2.1] -submission succeeded
+2015-07-17T14:57:46+12 INFO - [a1.1] -submit_method_id=31930
+2015-07-17T14:57:46+12 INFO - [a1.1] -submission succeeded
+2015-07-17T14:57:47+12 INFO - [a2.1] -(current:submitted)> a2.1 started at 2015-07-17T14:57:45+12
+2015-07-17T14:57:47+12 INFO - [a2.1] -(current:running)> a2.1 succeeded at 2015-07-17T14:57:46+12
+2015-07-17T14:57:47+12 INFO - [a1.1] -(current:submitted)> a1.1 started at 2015-07-17T14:57:45+12
+2015-07-17T14:57:47+12 INFO - [a1.1] -(current:running)> a1.1 succeeded at 2015-07-17T14:57:46+12
+2015-07-17T14:57:48+12 INFO - [remover.2] -job(01) initiate job-submit
+2015-07-17T14:57:48+12 INFO - [remover.2] -triggered off ['a1.1', 'a2.1']
+2015-07-17T14:57:48+12 INFO - Client: remove_task (cylc-remove oliverh at oliverh-34403DL.niwa.local 6112821f-b7ab-4243-b82f-bcaadb1772e8)
+2015-07-17T14:57:49+12 INFO - [remover.2] -submit_method_id=32055
+2015-07-17T14:57:49+12 INFO - [remover.2] -submission succeeded
+2015-07-17T14:57:49+12 INFO - [remover.2] -(current:submitted)> remover.2 started at 2015-07-17T14:57:48+12
+2015-07-17T14:57:49+12 INFO - [remover.2] -(current:running)> remover.2 succeeded at 2015-07-17T14:57:49+12
+2015-07-17T14:57:49+12 INFO - Command succeeded: remove_task(FAM-.,2,True,False)
+2015-07-17T14:57:50+12 INFO - [inserter.2] -job(01) initiate job-submit
+2015-07-17T14:57:50+12 INFO - [inserter.2] -triggered off ['remover.2']
+2015-07-17T14:57:51+12 INFO - Client: insert_task (cylc-insert oliverh at oliverh-34403DL.niwa.local 9275ddcb-f67c-48a2-9626-795925b9020b)
+2015-07-17T14:57:51+12 INFO - [inserter.2] -submit_method_id=32136
+2015-07-17T14:57:51+12 INFO - [inserter.2] -submission succeeded
+2015-07-17T14:57:51+12 INFO - [inserter.2] -(current:submitted)> inserter.2 started at 2015-07-17T14:57:51+12
+2015-07-17T14:57:51+12 INFO - [inserter.2] -(current:running)> inserter.2 succeeded at 2015-07-17T14:57:51+12
+2015-07-17T14:57:51+12 INFO - Command succeeded: insert_task(FAM-A,2,True,None)
+2015-07-17T14:57:52+12 INFO - [a2.2] -job(01) initiate job-submit
+2015-07-17T14:57:52+12 INFO - [a1.2] -job(01) initiate job-submit
+2015-07-17T14:57:52+12 INFO - [a2.2] -triggered off ['inserter.2']
+2015-07-17T14:57:52+12 INFO - [a1.2] -triggered off ['inserter.2']
+2015-07-17T14:57:54+12 INFO - [a2.2] -submit_method_id=32226
+2015-07-17T14:57:54+12 INFO - [a2.2] -submission succeeded
+2015-07-17T14:57:54+12 INFO - [a1.2] -submit_method_id=32240
+2015-07-17T14:57:54+12 INFO - [a1.2] -submission succeeded
+2015-07-17T14:57:54+12 INFO - [a2.2] -(current:submitted)> a2.2 started at 2015-07-17T14:57:53+12
+2015-07-17T14:57:54+12 INFO - [a2.2] -(current:running)> a2.2 succeeded at 2015-07-17T14:57:53+12
+2015-07-17T14:57:54+12 INFO - [a1.2] -(current:submitted)> a1.2 started at 2015-07-17T14:57:53+12
+2015-07-17T14:57:54+12 INFO - [a1.2] -(current:running)> a1.2 succeeded at 2015-07-17T14:57:53+12
+2015-07-17T14:57:55+12 INFO - Suite shutting down at 2015-07-17T14:57:55+12
diff --git a/tests/cylc-insert/insert-family/suite.rc b/tests/cylc-insert/insert-family/suite.rc
new file mode 100644
index 0000000..1877ccb
--- /dev/null
+++ b/tests/cylc-insert/insert-family/suite.rc
@@ -0,0 +1,33 @@
+title = Test removal and insertion of task familes.
+description = """Two families are removed, then one is re-inserted.
+The ref test will fail if either operation fails to work properly."""
+
+[cylc]
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT1M
+[scheduling]
+    initial cycle point = 1
+    final cycle point = 2
+    cycling mode = integer
+    [[dependencies]]
+        [[[P1]]]
+            graph = """
+        remover => inserter => FAM-A & FAM-B
+        FAM-A[-P1]:succeed-all => remover"""
+[runtime]
+    [[root]]
+        script = /bin/true
+    [[remover]]
+        # Remove both families (also tests removal by pattern).
+        script = """
+cylc remove --no-spawn -m $CYLC_SUITE_NAME 'FAM-.' $CYLC_TASK_CYCLE_POINT"""
+    [[inserter]]
+        # Re-insert one family.
+        script = """
+cylc insert -m $CYLC_SUITE_NAME 'FAM-A' $CYLC_TASK_CYCLE_POINT"""
+    [[FAM-A, FAM-B]]
+    [[a1, a2]]
+        inherit = FAM-A
+    [[b1, b2]]
+        inherit = FAM-B
diff --git a/tests/cylc-job-poll/02-loadleveler.t b/tests/cylc-job-poll/02-loadleveler.t
index cb0cbaa..d501fb8 100755
--- a/tests/cylc-job-poll/02-loadleveler.t
+++ b/tests/cylc-job-poll/02-loadleveler.t
@@ -77,7 +77,7 @@ __PYTHON__
 #-------------------------------------------------------------------------------
 if ! ${IS_AT_T_HOST:-false}; then
     RC_ITEM='[test battery][batch systems]loadleveler]host'
-    T_HOST=$(cylc get-global-config -i "${RC_ITEM}")
+    T_HOST=$(cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
     if [[ -z $T_HOST ]]; then
         skip_all "\"${RC_ITEM}\" not defined"
     fi
@@ -99,7 +99,8 @@ fi
 T_DIRECTIVES_MORE=
 if ! ${HAS_READ_T_DIRECTIVES_MORE:-false}; then
     RC_ITEM='[test battery][batch systems]loadleveler][directives]'
-    export T_DIRECTIVES_MORE=$(cylc get-global-config -i "${RC_ITEM}")
+    export T_DIRECTIVES_MORE=$( \
+        cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
     export HAS_READ_T_DIRECTIVES_MORE=true
 fi
 FAKE_JOB_ID=$(get_fake_job_id)
diff --git a/tests/cylc-job-poll/03-slurm.t b/tests/cylc-job-poll/03-slurm.t
index 0f5a5e4..4be455a 100755
--- a/tests/cylc-job-poll/03-slurm.t
+++ b/tests/cylc-job-poll/03-slurm.t
@@ -78,7 +78,7 @@ __PYTHON__
 #-------------------------------------------------------------------------------
 if ! ${IS_AT_T_HOST:-false}; then
     RC_ITEM='[test battery][batch systems][slurm]host'
-    T_HOST=$(cylc get-global-config -i "${RC_ITEM}")
+    T_HOST=$(cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
     if [[ -z $T_HOST ]]; then
         skip_all "\"${RC_ITEM}\" not defined"
     fi
@@ -100,7 +100,8 @@ fi
 T_DIRECTIVES_MORE=
 if ! ${HAS_READ_T_DIRECTIVES_MORE:-false}; then
     RC_ITEM='[test battery][batch systems][slurm][directives]'
-    export T_DIRECTIVES_MORE=$(cylc get-global-config -i "${RC_ITEM}")
+    export T_DIRECTIVES_MORE=$( \
+        cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
     export HAS_READ_T_DIRECTIVES_MORE=true
 fi
 FAKE_JOB_ID=$(get_fake_job_id)
diff --git a/tests/cylc-job-poll/04-pbs.t b/tests/cylc-job-poll/04-pbs.t
index 99ad415..d2e46bd 100755
--- a/tests/cylc-job-poll/04-pbs.t
+++ b/tests/cylc-job-poll/04-pbs.t
@@ -74,7 +74,7 @@ __PYTHON__
 #-------------------------------------------------------------------------------
 if ! ${IS_AT_T_HOST:-false}; then
     RC_ITEM='[test battery][batch systems][pbs]host'
-    T_HOST=$(cylc get-global-config -i "${RC_ITEM}")
+    T_HOST=$(cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
     if [[ -z $T_HOST ]]; then
         skip_all "\"${RC_ITEM}\" not defined"
     fi
@@ -96,7 +96,8 @@ fi
 T_DIRECTIVES_MORE=
 if ! ${HAS_READ_T_DIRECTIVES_MORE:-false}; then
     RC_ITEM='[test battery][batch systems][pbs][directives]'
-    export T_DIRECTIVES_MORE=$(cylc get-global-config -i "${RC_ITEM}")
+    export T_DIRECTIVES_MORE=$( \
+        cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
     export HAS_READ_T_DIRECTIVES_MORE=true
 fi
 FAKE_JOB_ID=$(get_fake_job_id)
diff --git a/tests/cylc-job-poll/05-lsf.t b/tests/cylc-job-poll/05-lsf.t
index 1c1eeea..81e3db8 100755
--- a/tests/cylc-job-poll/05-lsf.t
+++ b/tests/cylc-job-poll/05-lsf.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test cylc job-poll, "pbs" jobs
+# Test cylc job-poll, "lsf" jobs
 . $(dirname $0)/test_header
 SSH='ssh -oBatchMode=yes'
 #-------------------------------------------------------------------------------
@@ -75,7 +75,7 @@ __PYTHON__
 #-------------------------------------------------------------------------------
 if ! ${IS_AT_T_HOST:-false}; then
     RC_ITEM='[test battery][batch systems][lsf]host'
-    T_HOST=$(cylc get-global-config -i "${RC_ITEM}")
+    T_HOST=$(cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
     if [[ -z $T_HOST ]]; then
         skip_all "\"${RC_ITEM}\" not defined"
     fi
@@ -97,7 +97,8 @@ fi
 T_DIRECTIVES_MORE=
 if ! ${HAS_READ_T_DIRECTIVES_MORE:-false}; then
     RC_ITEM='[test battery][batch systems][lsf][directives]'
-    export T_DIRECTIVES_MORE=$(cylc get-global-config -i "${RC_ITEM}")
+    export T_DIRECTIVES_MORE=$( \
+        cylc get-global-config -i "${RC_ITEM}" 2>'/dev/null')
     export HAS_READ_T_DIRECTIVES_MORE=true
 fi
 FAKE_JOB_ID=$(get_fake_job_id)
diff --git a/tests/cylc-kill/00-kill-multi-hosts.t b/tests/cylc-kill/00-kill-multi-hosts.t
new file mode 100755
index 0000000..d5015f9
--- /dev/null
+++ b/tests/cylc-kill/00-kill-multi-hosts.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test kill multiple jobs on localhost and a remote host
+. "$(dirname "$0")/test_header"
+export CYLC_TEST_HOST=$(
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
+if [[ -z "${CYLC_TEST_HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+
+set_test_number 3
+
+export CYLC_CONF_PATH=
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${CYLC_TEST_HOST}" \
+    "mkdir -p .cylc/${SUITE_NAME}/ && cat >.cylc/${SUITE_NAME}/passphrase" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate "${SUITE_NAME}" -s "CYLC_TEST_HOST=${CYLC_TEST_HOST}"
+
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug "${SUITE_NAME}" \
+    -s "CYLC_TEST_HOST=${CYLC_TEST_HOST}"
+
+RUN_DIR="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}"
+LOG="${RUN_DIR}/log/suite/log"
+sed -n 's/^.*\(cylc jobs-kill\)/\1/p' "${LOG}" | sort >'edited-suite-log'
+
+sort >'edited-suite-log-ref' <<__LOG__
+cylc jobs-kill --debug --host=${CYLC_TEST_HOST} -- '\$HOME/cylc-run/${SUITE_NAME}/log/job' 1/remote-1/01 1/remote-2/01
+cylc jobs-kill --debug -- ${RUN_DIR}/log/job 1/local-1/01 1/local-2/01 1/local-3/01
+__LOG__
+cmp_ok 'edited-suite-log' 'edited-suite-log-ref'
+
+$SSH -n "$CYLC_TEST_HOST" "rm -rf '.cylc/$SUITE_NAME' 'cylc-run/$SUITE_NAME'"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/cylc-kill/00-kill-multi-hosts/reference.log b/tests/cylc-kill/00-kill-multi-hosts/reference.log
new file mode 100644
index 0000000..2c14d37
--- /dev/null
+++ b/tests/cylc-kill/00-kill-multi-hosts/reference.log
@@ -0,0 +1,10 @@
+2015-08-24T15:20:15Z INFO - Run mode: live
+2015-08-24T15:20:15Z INFO - Initial point: 1
+2015-08-24T15:20:15Z INFO - Final point: 1
+2015-08-24T15:20:15Z INFO - Cold Start 1
+2015-08-24T15:20:16Z INFO - [remote-1.1] -triggered off []
+2015-08-24T15:20:16Z INFO - [local-1.1] -triggered off []
+2015-08-24T15:20:16Z INFO - [local-2.1] -triggered off []
+2015-08-24T15:20:16Z INFO - [remote-2.1] -triggered off []
+2015-08-24T15:20:16Z INFO - [local-3.1] -triggered off []
+2015-08-24T15:20:19Z INFO - [killer.1] -triggered off ['local-1.1', 'local-2.1', 'local-3.1', 'remote-1.1', 'remote-2.1']
diff --git a/tests/cylc-kill/00-kill-multi-hosts/suite.rc b/tests/cylc-kill/00-kill-multi-hosts/suite.rc
new file mode 100644
index 0000000..03eefaa
--- /dev/null
+++ b/tests/cylc-kill/00-kill-multi-hosts/suite.rc
@@ -0,0 +1,26 @@
+#!Jinja2
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT1M
+        expected task failures = local-1.1, local-2.1, local-3.1, remote-1.1, remote-2.1
+[scheduling]
+    [[dependencies]]
+        graph="""
+KILLABLE:start-all => killer
+"""
+[runtime]
+    [[KILLABLE]]
+        script=sleep 60
+    [[local-1, local-2, local-3]]
+        inherit = KILLABLE
+    [[remote-1, remote-2]]
+        inherit = KILLABLE
+        [[[remote]]]
+            host={{CYLC_TEST_HOST}}
+    [[killer]]
+        script="""
+cylc kill -m "${CYLC_SUITE_NAME}" KILLABLE 1
+cylc stop "${CYLC_SUITE_NAME}"
+"""
diff --git a/tests/purge/test_header b/tests/cylc-kill/test_header
similarity index 100%
copy from tests/purge/test_header
copy to tests/cylc-kill/test_header
diff --git a/tests/cylc-message/00-ssh.t b/tests/cylc-message/00-ssh.t
new file mode 100755
index 0000000..9093134
--- /dev/null
+++ b/tests/cylc-message/00-ssh.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test "cylc message" in SSH mode, test needs to have compatible version
+# installed on the remote host.
+. "$(dirname "$0")/test_header"
+#-------------------------------------------------------------------------------
+CYLC_TEST_HOST="$( \
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')"
+if [[ -z "${CYLC_TEST_HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+set_test_number 3
+
+mkdir 'conf'
+cat >>'conf/global.rc' <<__GLOBAL_RC__
+[hosts]
+    [[${CYLC_TEST_HOST}]]
+        task communication method = ssh
+__GLOBAL_RC__
+export CYLC_CONF_PATH="${PWD}/conf"
+
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+# Note: Don't install passphrase on remote host. Message should only return via
+# SSH.
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate "${SUITE_NAME}" -s "CYLC_TEST_HOST=${CYLC_TEST_HOST}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --debug --reference-test "${SUITE_NAME}" \
+    -s "CYLC_TEST_HOST=${CYLC_TEST_HOST}"
+
+run_fail "${TEST_NAME_BASE}-grep-DENIED-suite-log" \
+    grep -q "\\[client-connect\\] DENIED .*@${CYLC_TEST_HOST}:cylc-message" \
+    "$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/log/suite/log"
+
+ssh -oBatchMode=yes -oConnectTimeout=5 "${CYLC_TEST_HOST}" \
+    "rm -rf '.cylc/${SUITE_NAME}' 'cylc-run/${SUITE_NAME}'"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/cylc-message/00-ssh/reference.log b/tests/cylc-message/00-ssh/reference.log
new file mode 100644
index 0000000..68f280b
--- /dev/null
+++ b/tests/cylc-message/00-ssh/reference.log
@@ -0,0 +1,5 @@
+2015-08-26T10:10:10Z INFO - Run mode: live
+2015-08-26T10:10:10Z INFO - Initial point: 1
+2015-08-26T10:10:10Z INFO - Final point: 1
+2015-08-26T10:10:10Z INFO - Cold Start 1
+2015-08-26T10:10:10Z INFO - [t0.1] -triggered off []
diff --git a/tests/cylc-message/00-ssh/suite.rc b/tests/cylc-message/00-ssh/suite.rc
new file mode 100644
index 0000000..daccf88
--- /dev/null
+++ b/tests/cylc-message/00-ssh/suite.rc
@@ -0,0 +1,15 @@
+#!jinja2
+[cylc]
+    UTC mode = True # Ignore DST
+    [[reference test]]
+        live mode suite timeout = PT1M
+
+[scheduling]
+    [[dependencies]]
+        graph=t0
+
+[runtime]
+    [[t0]]
+        script = true
+        [[[remote]]]
+            host = {{CYLC_TEST_HOST}}
diff --git a/tests/purge/test_header b/tests/cylc-message/test_header
similarity index 100%
copy from tests/purge/test_header
copy to tests/cylc-message/test_header
diff --git a/tests/cylc-poll/00-basic.t b/tests/cylc-poll/00-basic.t
old mode 100644
new mode 100755
diff --git a/tests/cylc-poll/01-task-failed.t b/tests/cylc-poll/01-task-failed.t
old mode 100644
new mode 100755
diff --git a/tests/cylc-poll/02-task-submit-failed.t b/tests/cylc-poll/02-task-submit-failed.t
old mode 100644
new mode 100755
diff --git a/tests/cylc-poll/03-poll-all.t b/tests/cylc-poll/03-poll-all.t
old mode 100644
new mode 100755
diff --git a/tests/cylc-poll/04-poll-multi-hosts.t b/tests/cylc-poll/04-poll-multi-hosts.t
new file mode 100755
index 0000000..6a7a2cd
--- /dev/null
+++ b/tests/cylc-poll/04-poll-multi-hosts.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test poll multiple jobs on localhost and a remote host
+. "$(dirname "$0")/test_header"
+export CYLC_TEST_HOST=$( \
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
+if [[ -z "${CYLC_TEST_HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+
+set_test_number 3
+
+export CYLC_CONF_PATH=
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${CYLC_TEST_HOST}" \
+    "mkdir -p .cylc/${SUITE_NAME}/ && cat >.cylc/${SUITE_NAME}/passphrase" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate "${SUITE_NAME}" -s "CYLC_TEST_HOST=${CYLC_TEST_HOST}"
+
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug "${SUITE_NAME}" \
+    -s "CYLC_TEST_HOST=${CYLC_TEST_HOST}"
+
+RUN_DIR="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}"
+LOG="${RUN_DIR}/log/suite/log"
+sed -n 's/^.*\(cylc jobs-poll\)/\1/p' "${LOG}" | sort >'edited-suite-log'
+
+sort >'edited-suite-log-ref' <<__LOG__
+cylc jobs-poll --debug --host=${CYLC_TEST_HOST} -- '\$HOME/cylc-run/${SUITE_NAME}/log/job' 1/remote-fail-1/01 1/remote-success-1/01 1/remote-success-2/01
+cylc jobs-poll --debug -- ${RUN_DIR}/log/job 1/local-fail-1/01 1/local-fail-2/01 1/local-success-1/01
+__LOG__
+cmp_ok 'edited-suite-log' 'edited-suite-log-ref'
+
+$SSH -n "$CYLC_TEST_HOST" "rm -rf '.cylc/$SUITE_NAME' 'cylc-run/$SUITE_NAME'"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/cylc-poll/04-poll-multi-hosts/reference.log b/tests/cylc-poll/04-poll-multi-hosts/reference.log
new file mode 100644
index 0000000..d2cc075
--- /dev/null
+++ b/tests/cylc-poll/04-poll-multi-hosts/reference.log
@@ -0,0 +1,11 @@
+2015-08-24T13:53:25Z INFO - Run mode: live
+2015-08-24T13:53:25Z INFO - Initial point: 1
+2015-08-24T13:53:25Z INFO - Final point: 1
+2015-08-24T13:53:25Z INFO - Cold Start 1
+2015-08-24T13:53:26Z INFO - [remote-success-1.1] -triggered off []
+2015-08-24T13:53:26Z INFO - [remote-fail-1.1] -triggered off []
+2015-08-24T13:53:26Z INFO - [local-fail-2.1] -triggered off []
+2015-08-24T13:53:26Z INFO - [remote-success-2.1] -triggered off []
+2015-08-24T13:53:26Z INFO - [local-success-1.1] -triggered off []
+2015-08-24T13:53:26Z INFO - [local-fail-1.1] -triggered off []
+2015-08-24T13:53:31Z INFO - [poller.1] -triggered off ['local-fail-1.1', 'local-fail-2.1', 'local-success-1.1', 'remote-fail-1.1', 'remote-success-1.1', 'remote-success-2.1']
diff --git a/tests/cylc-poll/04-poll-multi-hosts/suite.rc b/tests/cylc-poll/04-poll-multi-hosts/suite.rc
new file mode 100644
index 0000000..aa966af
--- /dev/null
+++ b/tests/cylc-poll/04-poll-multi-hosts/suite.rc
@@ -0,0 +1,52 @@
+#!Jinja2
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT1M
+        expected task failures = local-fail-1.1, local-fail-2.1, remote-fail-1.1
+[scheduling]
+    [[dependencies]]
+        graph="""
+POLLABLE:start-all => poller
+"""
+[runtime]
+    [[POLLABLE]]
+        pre-script="""
+# Stop script from reporting anything back
+trap '' 'EXIT'
+trap '' 'ERR'
+"""
+    [[FAIL]]
+        inherit = POLLABLE
+        script="""
+echo 'I am failing...' >&2
+exit 1
+"""
+    [[local-fail-1, local-fail-2]]
+        inherit = FAIL
+    [[remote-fail-1]]
+        inherit = FAIL
+        [[[remote]]]
+            host={{CYLC_TEST_HOST}}
+    [[SUCCESS]]
+        inherit = POLLABLE
+        script="""
+echo 'I am OK.'
+{
+    echo 'CYLC_JOB_EXIT=SUCCEEDED'
+    echo "CYLC_JOB_EXIT_TIME=$(date +%FT%H:%M:%SZ)"
+} >>"${CYLC_TASK_LOG_ROOT}.status"
+exit
+"""
+    [[local-success-1]]
+        inherit = SUCCESS
+    [[remote-success-1, remote-success-2]]
+        inherit = SUCCESS
+        [[[remote]]]
+            host={{CYLC_TEST_HOST}}
+    [[poller]]
+        script="""
+cylc poll -m "${CYLC_SUITE_NAME}" POLLABLE 1
+cylc stop "${CYLC_SUITE_NAME}"
+"""
diff --git a/lib/parsec/tests/nullcfg/00-missing-file.t b/tests/cylc-poll/05-poll-multi-messages.t
similarity index 69%
rename from lib/parsec/tests/nullcfg/00-missing-file.t
rename to tests/cylc-poll/05-poll-multi-messages.t
index 9627174..1e99552 100755
--- a/lib/parsec/tests/nullcfg/00-missing-file.t
+++ b/tests/cylc-poll/05-poll-multi-messages.t
@@ -15,13 +15,17 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test that missing config files yield empty config dicts
-. $(dirname $0)/test_header
+# Test poll multiple messages
+. "$(dirname "$0")/test_header"
+set_test_number 2
 
-#-------------------------------------------------------------------------------
-set_test_number 1
+export CYLC_CONF_PATH=
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 
-install_test $TEST_NAME_BASE
-#-------------------------------------------------------------------------------
-TEST_NAME=${TEST_NAME_BASE}
-run_ok $TEST_NAME missing.py
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug "${SUITE_NAME}"
+
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/cylc-poll/05-poll-multi-messages/reference.log b/tests/cylc-poll/05-poll-multi-messages/reference.log
new file mode 100644
index 0000000..aece01f
--- /dev/null
+++ b/tests/cylc-poll/05-poll-multi-messages/reference.log
@@ -0,0 +1,8 @@
+2015-08-24T16:19:01Z INFO - Run mode: live
+2015-08-24T16:19:01Z INFO - Initial point: 1
+2015-08-24T16:19:01Z INFO - Final point: 1
+2015-08-24T16:19:01Z INFO - Cold Start 1
+2015-08-24T16:19:01Z INFO - [speaker1.1] -triggered off []
+2015-08-24T16:19:01Z INFO - [speaker2.1] -triggered off []
+2015-08-24T16:19:04Z INFO - [poller.1] -triggered off ['speaker1.1', 'speaker2.1']
+2015-08-24T16:19:07Z INFO - [finisher.1] -triggered off ['speaker1.1', 'speaker1.1', 'speaker2.1']
diff --git a/tests/cylc-poll/05-poll-multi-messages/suite.rc b/tests/cylc-poll/05-poll-multi-messages/suite.rc
new file mode 100644
index 0000000..5673ec7
--- /dev/null
+++ b/tests/cylc-poll/05-poll-multi-messages/suite.rc
@@ -0,0 +1,45 @@
+#!Jinja2
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT1M
+[scheduling]
+    [[dependencies]]
+        graph="""
+speaker1:start & speaker2:start => poller
+speaker1:hello1 & speaker1:hello2 & speaker2:greet => finisher
+"""
+[runtime]
+    [[speaker1]]
+        script="""
+# Wait for "cylc task message started" command
+wait
+# Simulate "cylc task message", messages written to status file but failed to
+# get sent back to the suite
+{
+    echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|NORMAL|hello1 ${CYLC_TASK_CYCLE_POINT}"
+    echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|NORMAL|hello2 ${CYLC_TASK_CYCLE_POINT}"
+} >>"${CYLC_TASK_LOG_ROOT}.status"
+sleep 30
+"""
+        [[[outputs]]]
+            hello1 = "hello1 []"
+            hello2 = "hello2 []"
+    [[speaker2]]
+        script="""
+# Wait for "cylc task message started" command
+wait
+# Simulate "cylc task message", messages written to status file but failed to
+# get sent back to the suite
+{
+    echo "CYLC_MESSAGE=$(date +%FT%H:%M:%SZ)|NORMAL|greet ${CYLC_TASK_CYCLE_POINT}"
+} >>"${CYLC_TASK_LOG_ROOT}.status"
+sleep 30
+"""
+        [[[outputs]]]
+            greet = "greet []"
+    [[finisher]]
+        script=true
+    [[poller]]
+        script=cylc poll "${CYLC_SUITE_NAME}" 'speaker[12]' '1'
diff --git a/tests/job-kill/02-loadleveler.t b/tests/cylc-poll/06-loadleveler.t
similarity index 81%
copy from tests/job-kill/02-loadleveler.t
copy to tests/cylc-poll/06-loadleveler.t
index 0f02a1e..8f5b5d2 100755
--- a/tests/job-kill/02-loadleveler.t
+++ b/tests/cylc-poll/06-loadleveler.t
@@ -15,14 +15,15 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test killing of jobs submitted to loadleveler, slurm, pbs...
+# Test "cylc poll" for loadleveler, slurm, or pbs jobs.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 BATCH_SYS_NAME="${TEST_NAME_BASE##??-}"
-export CYLC_TEST_BATCH_TASK_HOST=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME]host")
-export CYLC_TEST_BATCH_SITE_DIRECTIVES=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME][directives]")
+RC_PREF="[test battery][batch systems][$BATCH_SYS_NAME]"
+export CYLC_TEST_BATCH_TASK_HOST=$( \
+    cylc get-global-config -i "${RC_PREF}host" 2>'/dev/null')
+export CYLC_TEST_BATCH_SITE_DIRECTIVES=$( \
+    cylc get-global-config -i "${RC_PREF}[directives]" 2>'/dev/null')
 if [[ -z "${CYLC_TEST_BATCH_TASK_HOST}" || "${CYLC_TEST_BATCH_TASK_HOST}" == None ]]
 then
     skip_all "\"[test battery][batch systems][$BATCH_SYS_NAME]host\" not defined"
@@ -33,7 +34,7 @@ then
     skip_all "Host "$CYLC_TEST_BATCH_TASK_HOST" unreachable"
 fi
 set_test_number 2
-
+#-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 # copy across passphrase as not all remote hosts will have a shared file system
@@ -51,9 +52,4 @@ run_ok $TEST_NAME cylc validate $SUITE_NAME
 TEST_NAME=$TEST_NAME_BASE-run
 suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
-purge_suite "${SUITE_NAME}"
-if [[ $CYLC_TEST_BATCH_TASK_HOST != 'localhost' ]]; then
-    ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" \
-        "rm -fr .cylc/${SUITE_NAME} cylc-run/${SUITE_NAME}"
-fi
-exit
+purge_suite $SUITE_NAME
diff --git a/tests/cylc-poll/06-loadleveler/reference.log b/tests/cylc-poll/06-loadleveler/reference.log
new file mode 100644
index 0000000..425be53
--- /dev/null
+++ b/tests/cylc-poll/06-loadleveler/reference.log
@@ -0,0 +1,40 @@
+2013/12/09 16:14:26 INFO - Thread-2 start (Event Handlers)
+2013/12/09 16:14:26 INFO - Thread-3 start (Poll & Kill Commands)
+2013/12/09 16:14:26 INFO - port:7766
+2013/12/09 16:14:26 INFO - Suite starting at 2013-12-09 16:14:26.247948
+2013/12/09 16:14:26 INFO - Log event clock: real time
+2013/12/09 16:14:26 INFO - Run mode: live
+2013/12/09 16:14:26 INFO - Initial point: 1
+2013/12/09 16:14:26 INFO - Final point: 1
+2013/12/09 16:14:26 INFO - Thread-4 start (Job Submission)
+2013/12/09 16:14:26 INFO - Thread-5 start (Request Handling)
+2013/12/09 16:14:26 INFO - [a.1] -(setting:queued)
+2013/12/09 16:14:26 INFO - [a.1] -(setting:submitting)
+2013/12/09 16:14:26 INFO - [a.1] -triggered off []
+2013/12/09 16:14:27 INFO - [a.1] -(current:submitting)> a.1 submitting now
+2013/12/09 16:14:28 INFO - [a.1] -(current:submitting)> a.1 submission succeeded
+2013/12/09 16:14:28 INFO - [a.1] -(setting:submitted)
+2013/12/09 16:14:28 INFO - [a.1] -(current:submitted)> a.1 submit_method_id=24985
+2013/12/09 16:14:28 INFO - [a.1] -(current:submitted)> a.1 started at 2013-12-09T16:14:28
+2013/12/09 16:14:28 INFO - [a.1] -(setting:running)
+2013/12/09 16:14:29 INFO - [b.1] -(setting:queued)
+2013/12/09 16:14:29 INFO - [b.1] -(setting:submitting)
+2013/12/09 16:14:29 INFO - [b.1] -triggered off ['a.1']
+2013/12/09 16:14:30 INFO - [b.1] -(current:submitting)> b.1 submitting now
+2013/12/09 16:14:30 INFO - [b.1] -(current:submitting)> b.1 submission succeeded
+2013/12/09 16:14:30 INFO - [b.1] -(setting:submitted)
+2013/12/09 16:14:30 INFO - [b.1] -(current:submitted)> b.1 submit_method_id=25081
+2013/12/09 16:14:31 INFO - [b.1] -(current:submitted)> b.1 started at 2013-12-09T16:14:30
+2013/12/09 16:14:31 INFO - [b.1] -(setting:running)
+2013/12/09 16:14:31 INFO - [b.1] -(current:running)> b.1 succeeded at 2013-12-09T16:14:31
+2013/12/09 16:14:31 INFO - [b.1] -(setting:succeeded)
+2013/12/09 16:14:31 INFO - [a.1] -polling now
+2013/12/09 16:14:31 INFO - Command succeeded: poll tasks(a,1,False)
+2013/12/09 16:14:33 INFO - [a.1] -(current:running)> polled a.1 started at 2013-12-09T16:14:27
+2013/12/09 16:14:49 INFO - [a.1] -(current:running)> a.1 succeeded at 2013-12-09T16:14:48
+2013/12/09 16:14:49 INFO - [a.1] -(setting:succeeded)
+2013/12/09 16:14:49 INFO - Stopping: 
+  + all non-cycling tasks have succeeded
+2013/12/09 16:14:49 INFO - Thread-4 exit (Job Submission)
+2013/12/09 16:14:50 INFO - Thread-2 exit (Event Handlers)
+2013/12/09 16:14:50 INFO - Thread-3 exit (Poll & Kill Commands)
diff --git a/tests/job-kill/02-loadleveler/suite.rc b/tests/cylc-poll/06-loadleveler/suite.rc
similarity index 51%
copy from tests/job-kill/02-loadleveler/suite.rc
copy to tests/cylc-poll/06-loadleveler/suite.rc
index a3da41a..d0cb220 100644
--- a/tests/job-kill/02-loadleveler/suite.rc
+++ b/tests/cylc-poll/06-loadleveler/suite.rc
@@ -1,32 +1,30 @@
 #!Jinja2
 [cylc]
    [[reference test]]
-       required run mode=live
-       expected task failures = t1.1
-       live mode suite timeout=5 # minutes
+       required run mode = live
+       live mode suite timeout = PT1M
 [scheduling]
     [[dependencies]]
-        graph=t1:start=>stop
+        graph = a:start => b
 [runtime]
-    [[t1]]
-        script=sleep 120
+    [[a]]
+        script = sleep 20
 {% if "CYLC_TEST_BATCH_TASK_HOST" in environ and environ["CYLC_TEST_BATCH_TASK_HOST"] %}
         [[[remote]]]
             host={{environ["CYLC_TEST_BATCH_TASK_HOST"]}}
 {% endif %}
         [[[job submission]]]
-            method=loadleveler
+            method = loadleveler
         [[[directives]]]
             class=serial
             job_type=serial
             notification=never
             resources=ConsumableCpus(1) ConsumableMemory(64mb)
             wall_clock_limit=180,120
-{% if "CYLC_TEST_BATCH_SITE_DIRECTIVES" in environ and environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"] %}
-            {{environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"]}}
+{% if "CYLC_TEST_BATCH_SITE_DIRECTIVES" in environ and
+        environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"] %}
+    {{environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"]}}
 {% endif %}
-    [[stop]]
-        script="""
-cylc kill $CYLC_SUITE_REG_NAME t1 1 || true
-cylc stop $CYLC_SUITE_REG_NAME
-"""
+
+    [[b]]
+        script = cylc poll $CYLC_SUITE_REG_NAME a 1
diff --git a/tests/cylc-poll/07-pbs.t b/tests/cylc-poll/07-pbs.t
new file mode 120000
index 0000000..e20c1f7
--- /dev/null
+++ b/tests/cylc-poll/07-pbs.t
@@ -0,0 +1 @@
+06-loadleveler.t
\ No newline at end of file
diff --git a/tests/cylc-poll/07-pbs/reference.log b/tests/cylc-poll/07-pbs/reference.log
new file mode 100644
index 0000000..49aa07f
--- /dev/null
+++ b/tests/cylc-poll/07-pbs/reference.log
@@ -0,0 +1,5 @@
+2013/12/09 16:14:26 INFO - Run mode: live
+2013/12/09 16:14:26 INFO - Initial point: 1
+2013/12/09 16:14:26 INFO - Final point: 1
+2013/12/09 16:14:26 INFO - [a.1] -triggered off []
+2013/12/09 16:14:29 INFO - [b.1] -triggered off ['a.1']
diff --git a/tests/cylc-poll/07-pbs/suite.rc b/tests/cylc-poll/07-pbs/suite.rc
new file mode 100644
index 0000000..66fbb49
--- /dev/null
+++ b/tests/cylc-poll/07-pbs/suite.rc
@@ -0,0 +1,25 @@
+#!Jinja2
+[cylc]
+   [[reference test]]
+       required run mode = live
+       live mode suite timeout = PT5M
+[scheduling]
+    [[dependencies]]
+        graph = a:start => b
+[runtime]
+    [[a]]
+        script = sleep 20
+{% if "CYLC_TEST_BATCH_TASK_HOST" in environ and environ["CYLC_TEST_BATCH_TASK_HOST"] %}
+        [[[remote]]]
+            host={{environ["CYLC_TEST_BATCH_TASK_HOST"]}}
+{% endif %}
+        [[[job submission]]]
+            method = pbs
+        [[[directives]]]
+{% if "CYLC_TEST_BATCH_SITE_DIRECTIVES" in environ and
+        environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"] %}
+    {{environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"]}}
+{% endif %}
+
+    [[b]]
+        script = cylc poll $CYLC_SUITE_REG_NAME a 1
diff --git a/tests/cylc-poll/08-slurm.t b/tests/cylc-poll/08-slurm.t
new file mode 120000
index 0000000..e20c1f7
--- /dev/null
+++ b/tests/cylc-poll/08-slurm.t
@@ -0,0 +1 @@
+06-loadleveler.t
\ No newline at end of file
diff --git a/tests/cylc-poll/08-slurm/reference.log b/tests/cylc-poll/08-slurm/reference.log
new file mode 100644
index 0000000..49aa07f
--- /dev/null
+++ b/tests/cylc-poll/08-slurm/reference.log
@@ -0,0 +1,5 @@
+2013/12/09 16:14:26 INFO - Run mode: live
+2013/12/09 16:14:26 INFO - Initial point: 1
+2013/12/09 16:14:26 INFO - Final point: 1
+2013/12/09 16:14:26 INFO - [a.1] -triggered off []
+2013/12/09 16:14:29 INFO - [b.1] -triggered off ['a.1']
diff --git a/tests/cylc-poll/08-slurm/suite.rc b/tests/cylc-poll/08-slurm/suite.rc
new file mode 100644
index 0000000..34ca029
--- /dev/null
+++ b/tests/cylc-poll/08-slurm/suite.rc
@@ -0,0 +1,25 @@
+#!Jinja2
+[cylc]
+   [[reference test]]
+       required run mode = live
+       live mode suite timeout = PT1M
+[scheduling]
+    [[dependencies]]
+        graph = a:start => b
+[runtime]
+    [[a]]
+        script = sleep 20
+{% if "CYLC_TEST_BATCH_TASK_HOST" in environ and environ["CYLC_TEST_BATCH_TASK_HOST"] %}
+        [[[remote]]]
+            host={{environ["CYLC_TEST_BATCH_TASK_HOST"]}}
+{% endif %}
+        [[[job submission]]]
+            method = slurm
+        [[[directives]]]
+{% if "CYLC_TEST_BATCH_SITE_DIRECTIVES" in environ and
+        environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"] %}
+    {{environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"]}}
+{% endif %}
+
+    [[b]]
+        script = cylc poll $CYLC_SUITE_REG_NAME a 1
diff --git a/tests/cylc-poll/09-lsf.t b/tests/cylc-poll/09-lsf.t
new file mode 120000
index 0000000..e20c1f7
--- /dev/null
+++ b/tests/cylc-poll/09-lsf.t
@@ -0,0 +1 @@
+06-loadleveler.t
\ No newline at end of file
diff --git a/tests/cylc-poll/09-lsf/reference.log b/tests/cylc-poll/09-lsf/reference.log
new file mode 100644
index 0000000..49aa07f
--- /dev/null
+++ b/tests/cylc-poll/09-lsf/reference.log
@@ -0,0 +1,5 @@
+2013/12/09 16:14:26 INFO - Run mode: live
+2013/12/09 16:14:26 INFO - Initial point: 1
+2013/12/09 16:14:26 INFO - Final point: 1
+2013/12/09 16:14:26 INFO - [a.1] -triggered off []
+2013/12/09 16:14:29 INFO - [b.1] -triggered off ['a.1']
diff --git a/tests/cylc-poll/09-lsf/suite.rc b/tests/cylc-poll/09-lsf/suite.rc
new file mode 100644
index 0000000..dc98581
--- /dev/null
+++ b/tests/cylc-poll/09-lsf/suite.rc
@@ -0,0 +1,25 @@
+#!Jinja2
+[cylc]
+   [[reference test]]
+       required run mode = live
+       live mode suite timeout = PT1M
+[scheduling]
+    [[dependencies]]
+        graph = a:start => b
+[runtime]
+    [[a]]
+        script = sleep 20
+{% if "CYLC_TEST_BATCH_TASK_HOST" in environ and environ["CYLC_TEST_BATCH_TASK_HOST"] %}
+        [[[remote]]]
+            host={{environ["CYLC_TEST_BATCH_TASK_HOST"]}}
+{% endif %}
+        [[[job submission]]]
+            method = lsf
+        [[[directives]]]
+{% if "CYLC_TEST_BATCH_SITE_DIRECTIVES" in environ and
+        environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"] %}
+    {{environ["CYLC_TEST_BATCH_SITE_DIRECTIVES"]}}
+{% endif %}
+
+    [[b]]
+        script = cylc poll $CYLC_SUITE_REG_NAME a 1
diff --git a/tests/cylc-scan/00-simple.t b/tests/cylc-scan/00-simple.t
index 987f6b3..9a3fdd6 100644
--- a/tests/cylc-scan/00-simple.t
+++ b/tests/cylc-scan/00-simple.t
@@ -19,6 +19,7 @@
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
+export CYLC_CONF_PATH=
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE simple
 #-------------------------------------------------------------------------------
diff --git a/tests/cylc-scan/01-hosts.t b/tests/cylc-scan/01-hosts.t
index ef156b2..7bcb951 100644
--- a/tests/cylc-scan/01-hosts.t
+++ b/tests/cylc-scan/01-hosts.t
@@ -17,7 +17,8 @@
 #-------------------------------------------------------------------------------
 # Test cylc scan with multiple hosts
 . "$(dirname "$0")/test_header"
-HOSTS="$(cylc get-global-config '--item=[suite host scanning]hosts')"
+HOSTS="$( \
+    cylc get-global-config '--item=[suite host scanning]hosts' 2>'/dev/null')"
 if [[ -z "${HOSTS}" || "${HOSTS}" == 'localhost' ]]; then
     skip_all '"[suite host scanning]hosts" not defined with remote suite hosts'
 fi
@@ -62,6 +63,7 @@ for ITEM in $(<'host-work-dirs.list'); do
         rm -fr "$(cylc get-global-config '--print-run-dir')/${UUID}-${HOST}"
     fi
     rm -fr "${HOME}/.cylc/${UUID}-${HOST}"
+    cylc unregister "--host=${HOST}" "${UUID}-${HOST}"
 done
 #-------------------------------------------------------------------------------
 exit
diff --git a/tests/cylc-submit/00-bg.t b/tests/cylc-submit/00-bg.t
index 8aa6e4c..79adbbb 100755
--- a/tests/cylc-submit/00-bg.t
+++ b/tests/cylc-submit/00-bg.t
@@ -24,9 +24,10 @@ if [[ "${TEST_NAME_BASE}" == *remote* ]]; then
     if [[ "${TEST_NAME_BASE}" == *remote-with-shared-fs* ]]; then
         CONF_KEY='remote host with shared fs'
     fi
-    HOST="$(cylc get-global-config "--item=[test battery]${CONF_KEY}")"
+    RC_ITEM="[test battery]${CONF_KEY}"
+    HOST="$(cylc get-global-config "--item=${RC_ITEM}" 2>'/dev/null')"
     if [[ -z "${HOST}" ]]; then
-        skip_all "[test battery]${CONF_KEY} not set"
+        skip_all "\"[test battery]${CONF_KEY}\" not set"
     fi
     CYLC_TEST_HOST="${HOST}"
 fi
@@ -44,12 +45,14 @@ then
 fi
 if [[ -n "${CONFIGURED_SYS_NAME}" ]]; then
     ITEM_KEY="[test battery][batch systems][$CONFIGURED_SYS_NAME]host"
-    CYLC_TEST_HOST="$(cylc get-global-config "--item=${ITEM_KEY}")"
+    CYLC_TEST_HOST="$( \
+        cylc get-global-config "--item=${ITEM_KEY}" 2>'/dev/null')"
     if [[ -z "${CYLC_TEST_HOST}" ]]; then
-        skip_all "${ITEM_KEY} not set"
+        skip_all "\"${ITEM_KEY}\" not set"
     fi
     ITEM_KEY="[test battery][batch systems][$CONFIGURED_SYS_NAME][directives]"
-    CYLC_TEST_DIRECTIVES="$(cylc get-global-config "--item=${ITEM_KEY}")"
+    export CYLC_TEST_DIRECTIVES="$( \
+        cylc get-global-config "--item=${ITEM_KEY}" 2>'/dev/null')"
     CYLC_TEST_BATCH_SYS_NAME=$CONFIGURED_SYS_NAME
 fi
 export CYLC_CONF_DIR=
diff --git a/tests/cylc-submit/00-bg/suite.rc b/tests/cylc-submit/00-bg/suite.rc
index 965b78b..d184e84 100644
--- a/tests/cylc-submit/00-bg/suite.rc
+++ b/tests/cylc-submit/00-bg/suite.rc
@@ -14,9 +14,11 @@
             method = {{CYLC_TEST_BATCH_SYS_NAME}}
 {% if CYLC_TEST_BATCH_SYS_NAME == "loadleveler" %}
         [[[directives]]]
-            class = serial
             job_type = serial
             notification = never
             wall_clock_limit = 120,60
+{% if "CYLC_TEST_DIRECTIVES" in environ and environ["CYLC_TEST_DIRECTIVES"] %}
+    {{environ["CYLC_TEST_DIRECTIVES"]}}
+{% endif %}
 {% endif %}
 {% endif %}
diff --git a/tests/cylc-trigger/03-edit-run.t b/tests/cylc-trigger/03-edit-run.t
index 9f5ffa0..ef9fd1d 100644
--- a/tests/cylc-trigger/03-edit-run.t
+++ b/tests/cylc-trigger/03-edit-run.t
@@ -38,7 +38,7 @@ sed -i 's/^--- original $/--- original/; s/^+++ edited $/+++ edited/' $DIFF_LOG
 cmp_ok $DIFF_LOG - <<__END__
 --- original
 +++ edited
-@@ -129,7 +129,7 @@
+@@ -125,7 +125,7 @@
  echo
  
  # SCRIPT:
diff --git a/tests/cylc-trigger/03-edit-run/suite.rc b/tests/cylc-trigger/03-edit-run/suite.rc
index f4996f3..1e68cab 100644
--- a/tests/cylc-trigger/03-edit-run/suite.rc
+++ b/tests/cylc-trigger/03-edit-run/suite.rc
@@ -13,5 +13,7 @@ second task fixes and retriggers it with an edit-run."""
     [[broken-task]]
         script = /bin/false
     [[fixer-task]]
-        script = """
-cylc trigger --edit $CYLC_SUITE_NAME broken-task 1"""
+        command scripting = """
+cylc trigger --edit $CYLC_SUITE_NAME broken-task 1 << __END__
+y
+__END__"""
diff --git a/tests/cylc-trigger/basic/reference.log b/tests/cylc-trigger/basic/reference.log
index dbd9886..9a0334d 100644
--- a/tests/cylc-trigger/basic/reference.log
+++ b/tests/cylc-trigger/basic/reference.log
@@ -13,7 +13,7 @@
 2014-10-14T16:38:15+01 INFO - [foo.1] -(current:submitted)> foo.1 started at 2014-10-14T16:38:15+01
 2014-10-14T16:38:16+01 INFO - Command succeeded: trigger task(bar,1,False)
 2014-10-14T16:38:17+01 INFO - [bar.1] -initiate job-submit
-2014-10-14T16:38:17+01 INFO - [bar.1] -triggered off [None]
+2014-10-14T16:38:17+01 INFO - [bar.1] -triggered off []
 2014-10-14T16:38:18+01 INFO - 11353
 2014-10-14T16:38:18+01 INFO - [bar.1] -submit_method_id=11353
 2014-10-14T16:38:18+01 INFO - [bar.1] -submission succeeded
diff --git a/tests/database/00-simple.t b/tests/database/00-simple.t
index 06039db..bb80897 100644
--- a/tests/database/00-simple.t
+++ b/tests/database/00-simple.t
@@ -15,29 +15,44 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# basic tests for suite database contents
-. $(dirname $0)/test_header
-#-------------------------------------------------------------------------------
-set_test_number 5
-#-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE simple
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --debug $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-db-schema
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db ".schema" > schema
-cmp_ok $TEST_SOURCE_DIR/simple/db-schema schema
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-db-states
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db "select name, cycle, status from task_states order by name" > states
-cmp_ok $TEST_SOURCE_DIR/simple/db-states states
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-db-events
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db "select name, cycle, event, message, misc from task_events" > events
-cmp_ok $TEST_SOURCE_DIR/simple/db-events events
-#-------------------------------------------------------------------------------
-purge_suite $SUITE_NAME
+# Suite database content, a basic non-cycling suite of 3 tasks
+. "$(dirname "$0")/test_header"
+set_test_number 7
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" cylc run --debug "${SUITE_NAME}"
+
+DB_FILE="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}/cylc-suite.db"
+
+NAME='schema.out'
+sqlite3 "${DB_FILE}" ".schema" | sort >"${NAME}"
+cmp_ok "${TEST_SOURCE_DIR}/${TEST_NAME_BASE}/${NAME}" "${NAME}"
+
+NAME='select-task-states.out'
+sqlite3 "${DB_FILE}" 'SELECT name, cycle, status FROM task_states ORDER BY name' \
+    >"${NAME}"
+cmp_ok "${TEST_SOURCE_DIR}/${TEST_NAME_BASE}/${NAME}" "${NAME}"
+
+NAME='select-task-events.out'
+sqlite3 "${DB_FILE}" 'SELECT name, cycle, event, message, misc FROM task_events' \
+    >"${NAME}"
+cmp_ok "${TEST_SOURCE_DIR}/${TEST_NAME_BASE}/${NAME}" "${NAME}"
+
+NAME='select-task-job-logs.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT cycle, name, submit_num, filename
+     FROM task_job_logs ORDER BY name, filename' \
+    >"${NAME}"
+cmp_ok "${TEST_SOURCE_DIR}/${TEST_NAME_BASE}/${NAME}" "${NAME}"
+
+NAME='select-task-jobs.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT cycle, name, submit_num, try_num, submit_status, run_status,
+            user_at_host, batch_sys_name
+     FROM task_jobs ORDER BY name' \
+    >"${NAME}"
+cmp_ok "${TEST_SOURCE_DIR}/${TEST_NAME_BASE}/${NAME}" "${NAME}"
+
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/database/00-simple/schema.out b/tests/database/00-simple/schema.out
new file mode 100644
index 0000000..6dee512
--- /dev/null
+++ b/tests/database/00-simple/schema.out
@@ -0,0 +1,6 @@
+CREATE TABLE broadcast_events(time TEXT, change TEXT, point TEXT, namespace TEXT, key TEXT, value TEXT);
+CREATE TABLE broadcast_states(point TEXT, namespace TEXT, key TEXT, value TEXT, PRIMARY KEY(point, namespace, key));
+CREATE TABLE task_events(name TEXT, cycle TEXT, time TEXT, submit_num INTEGER, event TEXT, message TEXT, misc TEXT);
+CREATE TABLE task_job_logs(cycle TEXT, name TEXT, submit_num INTEGER, filename TEXT, location TEXT, mtime TEXT, size INTEGER, PRIMARY KEY(cycle, name, submit_num, filename));
+CREATE TABLE task_jobs(cycle TEXT, name TEXT, submit_num INTEGER, is_manual_submit INTEGER, try_num INTEGER, time_submit TEXT, time_submit_exit TEXT, submit_status INTEGER, time_run TEXT, time_run_exit TEXT, run_signal TEXT, run_status INTEGER, user_at_host TEXT, batch_sys_name TEXT, batch_sys_job_id TEXT, PRIMARY KEY(cycle, name, submit_num));
+CREATE TABLE task_states(name TEXT, cycle TEXT, time_created TEXT, time_updated TEXT, submit_num INTEGER, is_manual_submit INTEGER, try_num INTEGER, host TEXT, submit_method TEXT, submit_method_id TEXT, status TEXT, PRIMARY KEY(name, cycle));
diff --git a/tests/database/simple/db-events b/tests/database/00-simple/select-task-events.out
similarity index 100%
rename from tests/database/simple/db-events
rename to tests/database/00-simple/select-task-events.out
diff --git a/tests/database/00-simple/select-task-job-logs.out b/tests/database/00-simple/select-task-job-logs.out
new file mode 100644
index 0000000..03d8bd5
--- /dev/null
+++ b/tests/database/00-simple/select-task-job-logs.out
@@ -0,0 +1,15 @@
+1|bar|1|job
+1|bar|1|job-activity.log
+1|bar|1|job.err
+1|bar|1|job.out
+1|bar|1|job.status
+1|baz|1|job
+1|baz|1|job-activity.log
+1|baz|1|job.err
+1|baz|1|job.out
+1|baz|1|job.status
+1|foo|1|job
+1|foo|1|job-activity.log
+1|foo|1|job.err
+1|foo|1|job.out
+1|foo|1|job.status
diff --git a/tests/database/00-simple/select-task-jobs.out b/tests/database/00-simple/select-task-jobs.out
new file mode 100644
index 0000000..bbbefc5
--- /dev/null
+++ b/tests/database/00-simple/select-task-jobs.out
@@ -0,0 +1,3 @@
+1|bar|1|1|0|0|localhost|background
+1|baz|1|1|0|0|localhost|background
+1|foo|1|1|0|0|localhost|background
diff --git a/tests/database/simple/db-states b/tests/database/00-simple/select-task-states.out
similarity index 100%
rename from tests/database/simple/db-states
rename to tests/database/00-simple/select-task-states.out
diff --git a/tests/database/simple/suite.rc b/tests/database/00-simple/suite.rc
similarity index 79%
rename from tests/database/simple/suite.rc
rename to tests/database/00-simple/suite.rc
index f80cfba..6d5f478 100644
--- a/tests/database/simple/suite.rc
+++ b/tests/database/00-simple/suite.rc
@@ -3,4 +3,4 @@
         graph = "foo => bar => baz"
 [runtime]
     [[foo, bar, baz]]
-        script = "sleep 5"
+        script = true
diff --git a/tests/job-submission/05-activity-log.t b/tests/database/01-broadcast.t
similarity index 50%
copy from tests/job-submission/05-activity-log.t
copy to tests/database/01-broadcast.t
index 6544823..9e73636 100755
--- a/tests/job-submission/05-activity-log.t
+++ b/tests/database/01-broadcast.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-#
+# 
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,25 +15,42 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test writing various messages to the job activity log.
-. $(dirname $0)/test_header
-#-------------------------------------------------------------------------------
-set_test_number 7
-#-------------------------------------------------------------------------------
+# Suite database content, broadcast + manual trigger to recover a failure.
+. "$(dirname "$0")/test_header"
+set_test_number 5
 install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
-#-------------------------------------------------------------------------------
+
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
 suite_run_ok "${TEST_NAME_BASE}-run" \
     cylc run --debug --reference-test "${SUITE_NAME}"
 
-SUITE_RUN_DIR="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}"
-T1_ACTIVITY_LOG="${SUITE_RUN_DIR}/log/job/1/t1/NN/job-activity.log"
+DB_FILE="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}/cylc-suite.db"
+
+NAME='select-broadcast-events.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT change, point, namespace, key, value FROM broadcast_events' >"${NAME}"
+cmp_ok "${NAME}" <<'__SELECT__'
++|1|t1|[environment]HELLO|Hello
+__SELECT__
+
+NAME='select-broadcast-states.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT point, namespace, key, value FROM broadcast_states' >"${NAME}"
+cmp_ok "${NAME}" <<'__SELECT__'
+1|t1|[environment]HELLO|Hello
+__SELECT__
+
+NAME='select-task-jobs.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT cycle, name, submit_num, is_manual_submit, submit_status, run_status,
+            user_at_host, batch_sys_name
+     FROM task_jobs ORDER BY name' \
+    >"${NAME}"
+cmp_ok "${NAME}" <<'__SELECT__'
+1|recover-t1|1|0|0|0|localhost|background
+1|t1|1|0|0|1|localhost|background
+1|t1|2|1|0|0|localhost|background
+__SELECT__
 
-grep_ok 'SUBMIT-OUT:' "${T1_ACTIVITY_LOG}"
-grep_ok 'KILL-ERR:' "${T1_ACTIVITY_LOG}"
-grep_ok 'OSError: \[Errno 3\] No such process' "${T1_ACTIVITY_LOG}"
-grep_ok 'POLL-OUT: polled t1\.1 failed at unknown-time' "${T1_ACTIVITY_LOG}"
-grep_ok "EVENT-OUT: failed ${SUITE_NAME} t1\\.1 job failed" "${T1_ACTIVITY_LOG}"
-#-------------------------------------------------------------------------------
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/database/01-broadcast/reference.log b/tests/database/01-broadcast/reference.log
new file mode 100644
index 0000000..32b650b
--- /dev/null
+++ b/tests/database/01-broadcast/reference.log
@@ -0,0 +1,7 @@
+2015-06-10T11:08:30+01 INFO - Run mode: live
+2015-06-10T11:08:30+01 INFO - Initial point: 1
+2015-06-10T11:08:30+01 INFO - Final point: 1
+2015-06-10T11:08:30+01 INFO - Cold Start 1
+2015-06-10T11:08:30+01 INFO - [t1.1] -triggered off []
+2015-06-10T11:08:31+01 INFO - [recover-t1.1] -triggered off ['t1.1']
+2015-06-10T11:08:35+01 INFO - [t1.1] -triggered off []
diff --git a/tests/database/01-broadcast/suite.rc b/tests/database/01-broadcast/suite.rc
new file mode 100644
index 0000000..7539a9b
--- /dev/null
+++ b/tests/database/01-broadcast/suite.rc
@@ -0,0 +1,20 @@
+[cylc]
+    [[reference test]]
+        allow task failures = True
+        live mode suite timeout = PT1M
+[scheduling]
+    [[dependencies]]
+        graph = """
+t1:submit => recover-t1
+"""
+[runtime]
+    [[t1]]
+        script=test -n "${HELLO}"
+        retry delays=PT1M
+        [[[environment]]]
+            HELLO=
+    [[recover-t1]]
+        script="""
+cylc broadcast -p 1 -n t1 -s'[environment]HELLO=Hello' "${CYLC_SUITE_NAME}"
+cylc trigger "${CYLC_SUITE_NAME}" t1 1
+"""
diff --git a/tests/events/00-suite.t b/tests/database/02-retry.t
old mode 100644
new mode 100755
similarity index 57%
copy from tests/events/00-suite.t
copy to tests/database/02-retry.t
index d9dcf9b..94200e9
--- a/tests/events/00-suite.t
+++ b/tests/database/02-retry.t
@@ -15,16 +15,28 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Suite database content, "task_jobs" table after a task retries.
 . "$(dirname "$0")/test_header"
-set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
+set_test_number 3
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
 suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+    cylc run --debug --reference-test "${SUITE_NAME}"
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+DB_FILE="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}/cylc-suite.db"
+
+NAME='select-task-jobs.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT cycle, name, submit_num, try_num, submit_status, run_status,
+            user_at_host, batch_sys_name
+     FROM task_jobs ORDER BY name' \
+    >"${NAME}"
+cmp_ok "${NAME}" <<'__SELECT__'
+20200101T0000Z|t1|1|1|0|1|localhost|background
+20200101T0000Z|t1|2|2|0|1|localhost|background
+20200101T0000Z|t1|3|3|0|0|localhost|background
+__SELECT__
+
+purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/database/02-retry/reference.log b/tests/database/02-retry/reference.log
new file mode 100644
index 0000000..d3ddb1c
--- /dev/null
+++ b/tests/database/02-retry/reference.log
@@ -0,0 +1,8 @@
+2015-06-10T12:41:58Z INFO - port:7766
+2015-06-10T12:41:58Z INFO - Run mode: live
+2015-06-10T12:41:58Z INFO - Initial point: 20200101T0000Z
+2015-06-10T12:41:58Z INFO - Final point: 20200101T0000Z
+2015-06-10T12:41:58Z INFO - Cold Start 20200101T0000Z
+2015-06-10T12:41:58Z INFO - [t1.20200101T0000Z] -triggered off []
+2015-06-10T12:42:02Z INFO - [t1.20200101T0000Z] -triggered off []
+2015-06-10T12:42:05Z INFO - [t1.20200101T0000Z] -triggered off []
diff --git a/tests/database/02-retry/suite.rc b/tests/database/02-retry/suite.rc
new file mode 100644
index 0000000..c83030d
--- /dev/null
+++ b/tests/database/02-retry/suite.rc
@@ -0,0 +1,15 @@
+[cylc]
+    UTC mode=True
+    [[reference test]]
+        allow task failures = True
+        live mode suite timeout = PT1M
+[scheduling]
+    initial cycle point=2020
+    final cycle point=2020
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = t1
+[runtime]
+    [[t1]]
+        script=test "${CYLC_TASK_SUBMIT_NUMBER}" -gt 2
+        retry delays=2*PT0S
diff --git a/tests/database/03-remote.t b/tests/database/03-remote.t
new file mode 100755
index 0000000..d7c4c5b
--- /dev/null
+++ b/tests/database/03-remote.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Suite database content, "task_jobs" table with a remote job.
+. "$(dirname "$0")/test_header"
+export CYLC_TEST_HOST=$( \
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
+if [[ -z "${CYLC_TEST_HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+set_test_number 3
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+# Install suite passphrase.
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${CYLC_TEST_HOST}" \
+    "mkdir -p .cylc/${SUITE_NAME}/ && cat >.cylc/${SUITE_NAME}/passphrase" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --debug --reference-test "${SUITE_NAME}"
+
+DB_FILE="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}/cylc-suite.db"
+
+NAME='select-task-jobs.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT cycle, name, submit_num, try_num, submit_status, run_status,
+            user_at_host, batch_sys_name
+     FROM task_jobs ORDER BY name' \
+    >"${NAME}"
+cmp_ok "${NAME}" <<__SELECT__
+20200101T0000Z|t1|1|1|0|0|localhost|background
+20200101T0000Z|t2|1|1|0|0|${CYLC_TEST_HOST}|background
+__SELECT__
+
+if [[ "$CYLC_TEST_HOST" != 'localhost' ]]; then
+    $SSH -n "$CYLC_TEST_HOST" "rm -rf '.cylc/$SUITE_NAME' 'cylc-run/$SUITE_NAME'"
+fi
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/database/03-remote/reference.log b/tests/database/03-remote/reference.log
new file mode 100644
index 0000000..e8f06d6
--- /dev/null
+++ b/tests/database/03-remote/reference.log
@@ -0,0 +1,6 @@
+2015-06-10T12:02:21Z INFO - Run mode: live
+2015-06-10T12:02:21Z INFO - Initial point: 20200101T0000Z
+2015-06-10T12:02:21Z INFO - Final point: 20200101T0000Z
+2015-06-10T12:02:21Z INFO - Cold Start 20200101T0000Z
+2015-06-10T12:02:22Z INFO - [t2.20200101T0000Z] -triggered off []
+2015-06-10T12:02:22Z INFO - [t1.20200101T0000Z] -triggered off []
diff --git a/tests/database/03-remote/suite.rc b/tests/database/03-remote/suite.rc
new file mode 100644
index 0000000..c15e15c
--- /dev/null
+++ b/tests/database/03-remote/suite.rc
@@ -0,0 +1,20 @@
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        live mode suite timeout = PT1M
+[scheduling]
+    initial cycle point=2020
+    final cycle point=2020
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = """
+t1
+t2
+"""
+[runtime]
+    [[t1]]
+        script=true
+    [[t2]]
+        script=true
+        [[[remote]]]
+            host=$CYLC_TEST_HOST
diff --git a/tests/events/00-suite.t b/tests/database/04-lock-recover.t
old mode 100644
new mode 100755
similarity index 58%
copy from tests/events/00-suite.t
copy to tests/database/04-lock-recover.t
index d9dcf9b..0935da2
--- a/tests/events/00-suite.t
+++ b/tests/database/04-lock-recover.t
@@ -15,16 +15,35 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Suite database content, "task_jobs" table after a task retries.
 . "$(dirname "$0")/test_header"
-set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
+set_test_number 3
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
 suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+    cylc run --debug --reference-test "${SUITE_NAME}"
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+DB_FILE="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}/cylc-suite.db"
+
+NAME='select-task-states.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT cycle, name, status FROM task_states ORDER BY name' \
+    >"${NAME}"
+cmp_ok "${NAME}" <<'__SELECT__'
+1|done|succeeded
+1|locker|succeeded
+1|t0|succeeded
+1|t1|succeeded
+1|t2|succeeded
+1|t3|succeeded
+1|t4|succeeded
+1|t5|succeeded
+1|t6|succeeded
+1|t7|succeeded
+1|t8|succeeded
+1|t9|succeeded
+__SELECT__
+
+purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/database/04-lock-recover/bin/cylc-db-lock b/tests/database/04-lock-recover/bin/cylc-db-lock
new file mode 100755
index 0000000..1073fb8
--- /dev/null
+++ b/tests/database/04-lock-recover/bin/cylc-db-lock
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+"""Lock a suite's database file."""
+
+from fcntl import lockf, LOCK_SH
+import os
+from subprocess import call
+
+def main():
+    handle = open(
+        os.path.join(os.getenv("CYLC_SUITE_RUN_DIR"), "cylc-suite.db"))
+    lockf(handle, LOCK_SH)
+    call([
+        "cylc", "task", "message",
+        "%s I have locked the public database file" % (
+            os.getenv("CYLC_TASK_CYCLE_POINT"))])
+    suite_log_dir = os.getenv("CYLC_SUITE_LOG_DIR")
+    while True:
+        for line in open(os.path.join(suite_log_dir, "log")):
+            if "write attempt (1) did not complete" in line:
+                return
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/database/04-lock-recover/reference.log b/tests/database/04-lock-recover/reference.log
new file mode 100644
index 0000000..90f35dc
--- /dev/null
+++ b/tests/database/04-lock-recover/reference.log
@@ -0,0 +1,16 @@
+2015-06-10T21:54:26Z INFO - Run mode: live
+2015-06-10T21:54:26Z INFO - Initial point: 1
+2015-06-10T21:54:26Z INFO - Final point: 1
+2015-06-10T21:54:26Z INFO - Cold Start 1
+2015-06-10T21:54:26Z INFO - [locker.1] -triggered off []
+2015-06-10T21:54:47Z INFO - [t6.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t5.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t0.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t1.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t8.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t9.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t4.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t7.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t2.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t3.1] -triggered off ['locker.1']
+2015-06-10T21:54:59Z INFO - [done.1] -triggered off ['t0.1', 't1.1', 't2.1', 't3.1', 't4.1', 't5.1', 't6.1', 't7.1', 't8.1', 't9.1']
diff --git a/tests/database/04-lock-recover/suite.rc b/tests/database/04-lock-recover/suite.rc
new file mode 100644
index 0000000..bdc1ee4
--- /dev/null
+++ b/tests/database/04-lock-recover/suite.rc
@@ -0,0 +1,25 @@
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT2M
+[scheduling]
+    [[dependencies]]
+        graph = """
+locker:lock => TRUES:succeed-all => done
+"""
+[runtime]
+    [[locker]]
+        script = timeout 60 cylc-db-lock
+        [[[outputs]]]
+            lock = "[] I have locked the public database file"
+    [[TRUES]]
+        script = true
+    [[t0, t1, t2, t3, t4, t5, t6, t7, t8, t9]]
+        inherit = TRUES
+    [[done]]
+        script = """
+while ! grep -F -q 'cylc-suite.db: recovered' "${CYLC_SUITE_LOG_DIR}/log"; do
+    sleep 1
+done
+"""
diff --git a/tests/events/00-suite.t b/tests/database/05-lock-recover-100.t
old mode 100644
new mode 100755
similarity index 59%
copy from tests/events/00-suite.t
copy to tests/database/05-lock-recover-100.t
index d9dcf9b..e01b2ab
--- a/tests/events/00-suite.t
+++ b/tests/database/05-lock-recover-100.t
@@ -15,16 +15,34 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Suite database content, "task_jobs" table after a task retries.
 . "$(dirname "$0")/test_header"
-set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
+set_test_number 3
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
 
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
 suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+    cylc run --debug --reference-test "${SUITE_NAME}"
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+DB_FILE="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}/cylc-suite.db"
+
+NAME='select-task-states.out'
+sqlite3 "${DB_FILE}" \
+    'SELECT cycle, name, status FROM task_states ORDER BY name' \
+    >"${NAME}"
+cmp_ok "${NAME}" <<'__SELECT__'
+1|locker|succeeded
+1|t0|succeeded
+1|t1|succeeded
+1|t2|succeeded
+1|t3|succeeded
+1|t4|succeeded
+1|t5|succeeded
+1|t6|succeeded
+1|t7|succeeded
+1|t8|succeeded
+1|t9|succeeded
+__SELECT__
+
+purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/database/05-lock-recover-100/bin/cylc-db-lock b/tests/database/05-lock-recover-100/bin/cylc-db-lock
new file mode 100755
index 0000000..ec781c3
--- /dev/null
+++ b/tests/database/05-lock-recover-100/bin/cylc-db-lock
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""Lock a suite's database file."""
+
+from fcntl import lockf, LOCK_SH
+import os
+import time
+from subprocess import call
+
+def main():
+    handle = open(
+        os.path.join(os.getenv("CYLC_SUITE_RUN_DIR"), "cylc-suite.db"))
+    lockf(handle, LOCK_SH)
+    call([
+        "cylc", "task", "message",
+        "%s I have locked the public database file" % (
+            os.getenv("CYLC_TASK_CYCLE_POINT"))])
+    suite_log_dir = os.getenv("CYLC_SUITE_LOG_DIR")
+    while True:
+        for line in open(os.path.join(suite_log_dir, "log")):
+            if "cylc-suite.db: recovered from" in line:
+                return
+        time.sleep(1)
+
+if __name__ == "__main__":
+    main()
diff --git a/tests/database/05-lock-recover-100/reference.log b/tests/database/05-lock-recover-100/reference.log
new file mode 100644
index 0000000..8bdf060
--- /dev/null
+++ b/tests/database/05-lock-recover-100/reference.log
@@ -0,0 +1,15 @@
+2015-06-10T21:54:26Z INFO - Run mode: live
+2015-06-10T21:54:26Z INFO - Initial point: 1
+2015-06-10T21:54:26Z INFO - Final point: 1
+2015-06-10T21:54:26Z INFO - Cold Start 1
+2015-06-10T21:54:26Z INFO - [locker.1] -triggered off []
+2015-06-10T21:54:47Z INFO - [t6.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t5.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t0.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t1.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t8.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t9.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t4.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t7.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t2.1] -triggered off ['locker.1']
+2015-06-10T21:54:47Z INFO - [t3.1] -triggered off ['locker.1']
diff --git a/tests/database/05-lock-recover-100/suite.rc b/tests/database/05-lock-recover-100/suite.rc
new file mode 100644
index 0000000..622d26c
--- /dev/null
+++ b/tests/database/05-lock-recover-100/suite.rc
@@ -0,0 +1,19 @@
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT6M
+[scheduling]
+    [[dependencies]]
+        graph = """
+locker:lock => TRUES:succeed-all
+"""
+[runtime]
+    [[locker]]
+        script = timeout 400 cylc-db-lock
+        [[[outputs]]]
+            lock = "[] I have locked the public database file"
+    [[TRUES]]
+        script = true
+    [[t0, t1, t2, t3, t4, t5, t6, t7, t8, t9]]
+        inherit = TRUES
diff --git a/tests/database/simple/db-schema b/tests/database/simple/db-schema
deleted file mode 100644
index d45198d..0000000
--- a/tests/database/simple/db-schema
+++ /dev/null
@@ -1,3 +0,0 @@
-CREATE TABLE broadcast_settings(timestamp TEXT, broadcast TEXT);
-CREATE TABLE task_events(name TEXT, cycle TEXT, time INTEGER, submit_num INTEGER, event TEXT, message TEXT, misc TEXT);
-CREATE TABLE task_states(name TEXT, cycle TEXT, time_created TEXT, time_updated TEXT, submit_num INTEGER, is_manual_submit INTEGER, try_num INTEGER, host TEXT, submit_method TEXT, submit_method_id TEXT, status TEXT, PRIMARY KEY(name, cycle));
diff --git a/tests/deprecations/00-all/suite.rc b/tests/deprecations/00-all/suite.rc
index bfa75cb..3ece16a 100644
--- a/tests/deprecations/00-all/suite.rc
+++ b/tests/deprecations/00-all/suite.rc
@@ -33,6 +33,8 @@
         pre-command scripting = "echo pre-script" # deprecate
         command scripting = "echo script" # deprecate
         post-command scripting = "echo post-script" # deprecate
+        [[[dummy mode]]]
+        command scripting = "echo script" # deprecate
 
 [visualization]
     enable live graph movie = True # obsolete
diff --git a/tests/directives/00-loadleveler.t b/tests/directives/00-loadleveler.t
index 6c3bedd..4e8ab0a 100644
--- a/tests/directives/00-loadleveler.t
+++ b/tests/directives/00-loadleveler.t
@@ -24,18 +24,19 @@
 # export an environment variable for this - allows a script to be used to 
 # select a compute node and have that same host used by the suite.
 BATCH_SYS_NAME="${TEST_NAME_BASE##??-}"
-export CYLC_TEST_BATCH_TASK_HOST=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME]host")
-export CYLC_TEST_BATCH_SITE_DIRECTIVES=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME][directives]")
+RC_PREF="[test battery][batch systems][${BATCH_SYS_NAME}]"
+export CYLC_TEST_BATCH_TASK_HOST=$( \
+    cylc get-global-config -i "${RC_PREF}host" 2>'/dev/null')
+export CYLC_TEST_BATCH_SITE_DIRECTIVES=$( \
+    cylc get-global-config -i "${RC_PREF}[directives]" 2>'/dev/null')
 if [[ -z "${CYLC_TEST_BATCH_TASK_HOST}" || "${CYLC_TEST_BATCH_TASK_HOST}" == None ]]
 then
-    skip_all "[directive tests]$BATCH_SYS_NAME host not defined"
+    skip_all "\"[test battery][batch systems][$BATCH_SYS_NAME]host\" not defined"
 fi
 # check the host is reachable
 if ! ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" true 1>/dev/null 2>&1
 then
-    skip_all "Host "$CYLC_TEST_BATCH_TASK_HOST" unreachable"
+    skip_all "Host \"$CYLC_TEST_BATCH_TASK_HOST\" unreachable"
 fi
 
 set_test_number 2
diff --git a/tests/documentation/00-make.t b/tests/documentation/00-make.t
index 39526e2..4d10923 100644
--- a/tests/documentation/00-make.t
+++ b/tests/documentation/00-make.t
@@ -19,7 +19,7 @@
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 if [[ ! -w $CYLC_DIR/doc ]]; then
-    skip_all '$CYLC_DIR/doc: not writable'
+    skip_all '"$CYLC_DIR/doc": not writable'
 fi
 #-------------------------------------------------------------------------------
 set_test_number 1
diff --git a/tests/events/00-suite.t b/tests/events/00-suite.t
old mode 100644
new mode 100755
diff --git a/tests/events/01-task.t b/tests/events/01-task.t
old mode 100644
new mode 100755
index 032d36d..714618b
--- a/tests/events/01-task.t
+++ b/tests/events/01-task.t
@@ -21,6 +21,7 @@
 set_test_number 2
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE task
+export CYLC_CONF_PATH=
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
diff --git a/tests/events/02-multi.t b/tests/events/02-multi.t
old mode 100644
new mode 100755
diff --git a/tests/events/02-multi/reference.log b/tests/events/02-multi/reference.log
index 69b2f5f..954fa26 100644
--- a/tests/events/02-multi/reference.log
+++ b/tests/events/02-multi/reference.log
@@ -15,7 +15,7 @@
 2014/02/11 10:12:36 INFO - [fixer.1] -(current:submitted)> fixer.1 started at 2014-02-11T10:12:36
 2014/02/11 10:12:38 INFO - pre-trigger state dump: /home/oliverh/cylc-run/multi/state/state.2014:2:11:10:12:38
 2014/02/11 10:12:38 INFO - Command succeeded: trigger task(bar,1,False)
-2014/02/11 10:12:39 INFO - [bar.1] -triggered off [None]
+2014/02/11 10:12:39 INFO - [bar.1] -triggered off []
 2014/02/11 10:12:40 INFO - [bar.1] -(current:ready)> bar.1 submitting now
 2014/02/11 10:12:40 INFO - [bar.1] -(current:ready)> bar.1 submission succeeded
 2014/02/11 10:12:40 INFO - [bar.1] -(current:submitted)> bar.1 submit_method_id=29685
diff --git a/tests/events/03-timeout.t b/tests/events/03-timeout.t
old mode 100644
new mode 100755
diff --git a/tests/events/04-timeout-ref-live.t b/tests/events/04-timeout-ref-live.t
old mode 100644
new mode 100755
diff --git a/tests/events/05-timeout-ref-dummy.t b/tests/events/05-timeout-ref-dummy.t
old mode 100644
new mode 100755
diff --git a/tests/events/06-timeout-ref-simulation.t b/tests/events/06-timeout-ref-simulation.t
old mode 100644
new mode 100755
diff --git a/tests/events/07-task-iso.t b/tests/events/07-task-iso.t
old mode 100644
new mode 100755
index 11a0e43..3d7547c
--- a/tests/events/07-task-iso.t
+++ b/tests/events/07-task-iso.t
@@ -21,6 +21,7 @@
 set_test_number 2
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE task-iso
+export CYLC_CONF_PATH=
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
diff --git a/tests/events/08-task-event-handler-retry.t b/tests/events/08-task-event-handler-retry.t
new file mode 100755
index 0000000..c88967b
--- /dev/null
+++ b/tests/events/08-task-event-handler-retry.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test general task event handler + retry.
+. "$(dirname "$0")/test_header"
+set_test_number 3
+
+OPT_SET=
+if [[ "${TEST_NAME_BASE}" == *-globalcfg ]]; then
+    mkdir 'conf'
+cat >'conf/global.rc' <<'__GLOBALCFG__'
+[task events]
+    handlers=hello-event-handler '%(name)s' '%(event)s'
+    handler events=succeeded, failed
+    handler retry delays=PT0S, 2*PT1S
+__GLOBALCFG__
+    export CYLC_CONF_PATH="${PWD}/conf"
+    OPT_SET='-s GLOBALCFG=True'
+fi
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+run_ok "${TEST_NAME_BASE}-validate" cylc validate ${OPT_SET} "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug ${OPT_SET} "${SUITE_NAME}"
+
+SUITE_RUN_DIR="$(cylc get-global-config '--print-run-dir')"
+LOG="${SUITE_RUN_DIR}/${SUITE_NAME}/log/job/1/t1/NN/job-activity.log"
+sed "/(('event-handler-00', 'succeeded'), 1)/!d; s/^.* \[/[/" "${LOG}" \
+    >'edited-job-activity.log'
+cmp_ok 'edited-job-activity.log' <<'__LOG__'
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+[(('event-handler-00', 'succeeded'), 1) ret_code] 1
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+[(('event-handler-00', 'succeeded'), 1) ret_code] 1
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+[(('event-handler-00', 'succeeded'), 1) ret_code] 0
+[(('event-handler-00', 'succeeded'), 1) out] hello
+__LOG__
+
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/events/08-task-event-handler-retry/bin/hello-event-handler b/tests/events/08-task-event-handler-retry/bin/hello-event-handler
new file mode 100755
index 0000000..9d2f624
--- /dev/null
+++ b/tests/events/08-task-event-handler-retry/bin/hello-event-handler
@@ -0,0 +1,20 @@
+#!/bin/bash
+set -eu
+NAME="$1"
+EVENT="$2"
+cd "${CYLC_SUITE_RUN_DIR}"
+FILE="${NAME}-${EVENT}.out"
+HELLO='hello'
+if [[ -e "${FILE}" && "$(<"${FILE}")" == "${HELLO}" ]]; then
+    # 3rd attempt
+    cat "${FILE}"
+    rm -f "${FILE}"
+    exit 0
+elif [[ -e "${FILE}" ]]; then
+    # 2nd attempt
+    echo "${HELLO}" >"${FILE}"
+else
+    # 1st attempt
+    touch "${FILE}"
+fi
+exit 1
diff --git a/tests/events/08-task-event-handler-retry/reference.log b/tests/events/08-task-event-handler-retry/reference.log
new file mode 100644
index 0000000..ca698bf
--- /dev/null
+++ b/tests/events/08-task-event-handler-retry/reference.log
@@ -0,0 +1,6 @@
+2015-06-19T14:47:30+01 INFO - Run mode: live
+2015-06-19T14:47:30+01 INFO - Initial point: 1
+2015-06-19T14:47:30+01 INFO - Final point: 1
+2015-06-19T14:47:30+01 INFO - Cold Start 1
+2015-06-19T14:47:30+01 INFO - [t1.1] -triggered off []
+2015-06-19T14:47:35+01 INFO - [t2.1] -triggered off ['t1.1']
diff --git a/tests/events/08-task-event-handler-retry/suite.rc b/tests/events/08-task-event-handler-retry/suite.rc
new file mode 100644
index 0000000..5afcc95
--- /dev/null
+++ b/tests/events/08-task-event-handler-retry/suite.rc
@@ -0,0 +1,31 @@
+#!jinja2
+
+title=Task Event Handler Retry
+
+[cylc]
+    [[reference test]]
+        live mode suite timeout=PT1M
+
+[scheduling]
+    [[dependencies]]
+        graph="t1 => t2"
+
+[runtime]
+    [[t1]]
+        script=true
+{% if HOST is defined %}
+        [[[remote]]]
+            host = {{HOST}}
+{% endif %}
+{% if GLOBALCFG is not defined %}
+        [[[events]]]
+            handlers=hello-event-handler '%(name)s' '%(event)s'
+            handler events=succeeded, failed
+            handler retry delays=PT0S, 2*PT1S
+{% endif %}{# not GLOBALCFG is not defined #}
+    [[t2]]
+        script="""
+LOG="${CYLC_SUITE_LOG_DIR}/../job/1/t1/NN/job-activity.log"
+STR="[(('event-handler-00', 'succeeded'), 1) ret_code] 0"
+timeout 30 bash -c "while ! grep -q -F \"${STR}\" '${LOG}'; do sleep 1; done"
+"""
diff --git a/tests/events/09-task-event-mail.t b/tests/events/09-task-event-mail.t
new file mode 100755
index 0000000..f5dd10d
--- /dev/null
+++ b/tests/events/09-task-event-mail.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test event mail.
+. "$(dirname "$0")/test_header"
+if ! mail -V 2>'/dev/null'; then
+    skip_all '"mail" command not available'
+fi
+set_test_number 5
+mock_smtpd_init
+OPT_SET=
+if [[ "${TEST_NAME_BASE}" == *-globalcfg ]]; then
+    mkdir 'conf'
+    cat >'conf/global.rc' <<__GLOBALCFG__
+[task events]
+    mail events = failed, retry, succeeded
+    mail smtp = ${TEST_SMTPD_HOST}
+__GLOBALCFG__
+    export CYLC_CONF_PATH="${PWD}/conf"
+    OPT_SET='-s GLOBALCFG=True'
+else
+    OPT_SET="-s MAIL_SMTP=${TEST_SMTPD_HOST}"
+fi
+
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate ${OPT_SET} "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug ${OPT_SET} "${SUITE_NAME}"
+
+grep_ok 'Subject: \[1 task(s) retry\]' "${TEST_SMTPD_LOG}"
+grep_ok 'Subject: \[1 task(s) succeeded\]' "${TEST_SMTPD_LOG}"
+grep_ok '^1/t1/\(01: retry\|02: succeeded\)' "${TEST_SMTPD_LOG}"
+
+purge_suite "${SUITE_NAME}"
+mock_smtpd_kill
+exit
diff --git a/tests/events/09-task-event-mail/reference.log b/tests/events/09-task-event-mail/reference.log
new file mode 100644
index 0000000..7c025d8
--- /dev/null
+++ b/tests/events/09-task-event-mail/reference.log
@@ -0,0 +1,6 @@
+2015-06-19T14:47:30+01 INFO - Run mode: live
+2015-06-19T14:47:30+01 INFO - Initial point: 1
+2015-06-19T14:47:30+01 INFO - Final point: 1
+2015-06-19T14:47:30+01 INFO - Cold Start 1
+2015-06-19T14:47:30+01 INFO - [t1.1] -triggered off []
+2015-06-19T14:47:30+01 INFO - [t1.1] -triggered off []
diff --git a/tests/events/09-task-event-mail/suite.rc b/tests/events/09-task-event-mail/suite.rc
new file mode 100644
index 0000000..649f2be
--- /dev/null
+++ b/tests/events/09-task-event-mail/suite.rc
@@ -0,0 +1,21 @@
+#!jinja2
+
+title=Task Event Mail
+
+[cylc]
+    [[reference test]]
+        live mode suite timeout=PT1M
+
+[scheduling]
+    [[dependencies]]
+        graph=t1
+
+[runtime]
+    [[t1]]
+        script=test "${CYLC_TASK_TRY_NUMBER}" -eq 2
+        retry delays = PT1S
+{% if GLOBALCFG is not defined %}
+        [[[events]]]
+            mail events = failed, retry, succeeded
+            mail smtp = {{MAIL_SMTP}}
+{% endif %}{# not GLOBALCFG is not defined #}
diff --git a/tests/events/10-task-event-job-logs-retrieve.t b/tests/events/10-task-event-job-logs-retrieve.t
new file mode 100755
index 0000000..fd9280c
--- /dev/null
+++ b/tests/events/10-task-event-job-logs-retrieve.t
@@ -0,0 +1,104 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test remote job logs retrieval, requires compatible version of cylc on remote
+# job host.
+. "$(dirname "$0")/test_header"
+HOST=$(cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
+if [[ -z "${HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+set_test_number 5
+OPT_SET=
+if [[ "${TEST_NAME_BASE}" == *-globalcfg ]]; then
+    mkdir 'conf'
+    cat >'conf/global.rc' <<__GLOBALCFG__
+[hosts]
+    [[${HOST}]]
+        retrieve job logs = True
+        retrieve job logs retry delays = PT5S
+__GLOBALCFG__
+    export CYLC_CONF_PATH="${PWD}/conf"
+    OPT_SET='-s GLOBALCFG=True'
+else
+    export CYLC_CONF_PATH=
+fi
+
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${HOST}" \
+    "mkdir -p .cylc/${SUITE_NAME}/ && cat >.cylc/${SUITE_NAME}/passphrase" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate ${OPT_SET} -s "HOST=${HOST}" "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug ${OPT_SET} -s "HOST=${HOST}" "${SUITE_NAME}"
+
+SUITE_RUN_DIR="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}"
+sqlite3 "${SUITE_RUN_DIR}/cylc-suite.db" \
+    'select cycle,name,submit_num,filename,location from task_job_logs
+     ORDER BY cycle,name,submit_num,filename' >'select-task-job-logs.out'
+cmp_ok 'select-task-job-logs.out' <<'__OUT__'
+1|t1|1|job|1/t1/01/job
+1|t1|1|job-activity.log|1/t1/01/job-activity.log
+1|t1|1|job.err|1/t1/01/job.err
+1|t1|1|job.out|1/t1/01/job.out
+1|t1|1|job.status|1/t1/01/job.status
+1|t1|2|job|1/t1/02/job
+1|t1|2|job-activity.log|1/t1/02/job-activity.log
+1|t1|2|job.err|1/t1/02/job.err
+1|t1|2|job.out|1/t1/02/job.out
+1|t1|2|job.status|1/t1/02/job.status
+1|t1|3|job|1/t1/03/job
+1|t1|3|job-activity.log|1/t1/03/job-activity.log
+1|t1|3|job.err|1/t1/03/job.err
+1|t1|3|job.out|1/t1/03/job.out
+1|t1|3|job.status|1/t1/03/job.status
+__OUT__
+
+sed "/'job-logs-retrieve'/!d; s/^[^ ]* //" \
+    "${SUITE_RUN_DIR}/log/job/1/t1/"{01,02,03}"/job-activity.log" \
+    >'edited-activities.log'
+cmp_ok 'edited-activities.log' <<'__LOG__'
+[('job-logs-retrieve', 1) ret_code] 0
+[('job-logs-retrieve', 2) ret_code] 0
+[('job-logs-retrieve', 3) ret_code] 0
+__LOG__
+
+grep -F 'will run after' "${SUITE_RUN_DIR}/log/suite/log" \
+    | cut -d' ' -f 4-10 | sort >"edited-log"
+if [[ "${TEST_NAME_BASE}" == *-globalcfg ]]; then
+    cmp_ok 'edited-log' <<'__LOG__'
+[t1.1] -('job-logs-retrieve', 1) will run after PT5S
+[t1.1] -('job-logs-retrieve', 2) will run after PT5S
+[t1.1] -('job-logs-retrieve', 3) will run after PT5S
+__LOG__
+else
+    cmp_ok 'edited-log' <<'__LOG__'
+[t1.1] -('job-logs-retrieve', 1) will run after P0Y
+[t1.1] -('job-logs-retrieve', 2) will run after P0Y
+[t1.1] -('job-logs-retrieve', 3) will run after P0Y
+__LOG__
+fi
+
+${SSH} "${HOST}" \
+    "rm -rf '.cylc/${SUITE_NAME}' 'cylc-run/${SUITE_NAME}'"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/events/10-task-event-job-logs-retrieve/reference.log b/tests/events/10-task-event-job-logs-retrieve/reference.log
new file mode 100644
index 0000000..44ec474
--- /dev/null
+++ b/tests/events/10-task-event-job-logs-retrieve/reference.log
@@ -0,0 +1,7 @@
+2015-06-19T14:47:30+01 INFO - Run mode: live
+2015-06-19T14:47:30+01 INFO - Initial point: 1
+2015-06-19T14:47:30+01 INFO - Final point: 1
+2015-06-19T14:47:30+01 INFO - Cold Start 1
+2015-06-19T14:47:30+01 INFO - [t1.1] -triggered off []
+2015-06-19T14:47:30+01 INFO - [t1.1] -triggered off []
+2015-06-19T14:47:30+01 INFO - [t1.1] -triggered off []
diff --git a/tests/events/10-task-event-job-logs-retrieve/suite.rc b/tests/events/10-task-event-job-logs-retrieve/suite.rc
new file mode 100644
index 0000000..829c6f6
--- /dev/null
+++ b/tests/events/10-task-event-job-logs-retrieve/suite.rc
@@ -0,0 +1,21 @@
+#!jinja2
+
+title=Task Event Job Log Retrieve
+
+[cylc]
+    [[reference test]]
+        live mode suite timeout=PT1M
+
+[scheduling]
+    [[dependencies]]
+        graph=t1
+
+[runtime]
+    [[t1]]
+        script=test "${CYLC_TASK_TRY_NUMBER}" -eq 3
+        retry delays=PT0S, 2*PT1S
+        [[[remote]]]
+            host = {{HOST}}
+{% if GLOBALCFG is not defined %}
+            retrieve job logs = True
+{% endif %}{# not GLOBALCFG is not defined #}
diff --git a/tests/events/11-cycle-task-event-job-logs-retrieve.t b/tests/events/11-cycle-task-event-job-logs-retrieve.t
new file mode 100755
index 0000000..805165d
--- /dev/null
+++ b/tests/events/11-cycle-task-event-job-logs-retrieve.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test remote job logs retrieval, requires compatible version of cylc on remote
+# job host.
+. "$(dirname "$0")/test_header"
+HOST=$(cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
+if [[ -z "${HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+set_test_number 4
+export CYLC_CONF_PATH=
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${HOST}" \
+    "mkdir -p .cylc/${SUITE_NAME}/ && cat >.cylc/${SUITE_NAME}/passphrase" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate -s "HOST=${HOST}" "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug -s "HOST=${HOST}" "${SUITE_NAME}"
+
+# There are 2 remote tasks. One with "retrieve job logs = True", one without.
+# Only t1 should have job.err and job.out retrieved.
+SUITE_RUN_DIR="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}"
+sqlite3 "${SUITE_RUN_DIR}/cylc-suite.db" \
+    'select cycle,name,submit_num,filename,location from task_job_logs
+     ORDER BY cycle,name,submit_num,filename' >'select-task-job-logs.out'
+cmp_ok 'select-task-job-logs.out' <<'__OUT__'
+2020-02-02T02:02Z|t1|1|job|2020-02-02T02:02Z/t1/01/job
+2020-02-02T02:02Z|t1|1|job-activity.log|2020-02-02T02:02Z/t1/01/job-activity.log
+2020-02-02T02:02Z|t1|1|job.err|2020-02-02T02:02Z/t1/01/job.err
+2020-02-02T02:02Z|t1|1|job.out|2020-02-02T02:02Z/t1/01/job.out
+2020-02-02T02:02Z|t1|1|job.status|2020-02-02T02:02Z/t1/01/job.status
+2020-02-02T02:02Z|t1|2|job|2020-02-02T02:02Z/t1/02/job
+2020-02-02T02:02Z|t1|2|job-activity.log|2020-02-02T02:02Z/t1/02/job-activity.log
+2020-02-02T02:02Z|t1|2|job.err|2020-02-02T02:02Z/t1/02/job.err
+2020-02-02T02:02Z|t1|2|job.out|2020-02-02T02:02Z/t1/02/job.out
+2020-02-02T02:02Z|t1|2|job.status|2020-02-02T02:02Z/t1/02/job.status
+2020-02-02T02:02Z|t1|3|job|2020-02-02T02:02Z/t1/03/job
+2020-02-02T02:02Z|t1|3|job-activity.log|2020-02-02T02:02Z/t1/03/job-activity.log
+2020-02-02T02:02Z|t1|3|job.err|2020-02-02T02:02Z/t1/03/job.err
+2020-02-02T02:02Z|t1|3|job.out|2020-02-02T02:02Z/t1/03/job.out
+2020-02-02T02:02Z|t1|3|job.status|2020-02-02T02:02Z/t1/03/job.status
+2020-02-02T02:02Z|t2|1|job|2020-02-02T02:02Z/t2/01/job
+2020-02-02T02:02Z|t2|1|job-activity.log|2020-02-02T02:02Z/t2/01/job-activity.log
+2020-02-02T02:02Z|t2|2|job|2020-02-02T02:02Z/t2/02/job
+2020-02-02T02:02Z|t2|2|job-activity.log|2020-02-02T02:02Z/t2/02/job-activity.log
+2020-02-02T02:02Z|t2|3|job|2020-02-02T02:02Z/t2/03/job
+2020-02-02T02:02Z|t2|3|job-activity.log|2020-02-02T02:02Z/t2/03/job-activity.log
+__OUT__
+
+sed "/'job-logs-retrieve'/!d; s/^[^ ]* //" \
+    "${SUITE_RUN_DIR}/log/job/2020-02-02T02:02Z/t"{1,2}'/'{01,02,03}'/job-activity.log' \
+    >'edited-activities.log'
+cmp_ok 'edited-activities.log' <<__LOG__
+[('job-logs-retrieve', 1) ret_code] 0
+[('job-logs-retrieve', 2) ret_code] 0
+[('job-logs-retrieve', 3) ret_code] 0
+__LOG__
+
+${SSH} "${HOST}" \
+    "rm -rf '.cylc/${SUITE_NAME}' 'cylc-run/${SUITE_NAME}'"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/events/11-cycle-task-event-job-logs-retrieve/reference.log b/tests/events/11-cycle-task-event-job-logs-retrieve/reference.log
new file mode 100644
index 0000000..7306187
--- /dev/null
+++ b/tests/events/11-cycle-task-event-job-logs-retrieve/reference.log
@@ -0,0 +1,10 @@
+2015-06-19T14:47:30+01 INFO - Run mode: live
+2015-06-19T14:47:30+01 INFO - Initial point: 2020-02-02T02:02Z
+2015-06-19T14:47:30+01 INFO - Final point: 2020-02-02T02:02Z
+2015-06-19T14:47:30+01 INFO - Cold Start 2020-02-02T02:02Z
+2015-06-19T14:47:30+01 INFO - [t1.2020-02-02T02:02Z] -triggered off []
+2015-06-19T14:47:30+01 INFO - [t1.2020-02-02T02:02Z] -triggered off []
+2015-06-19T14:47:30+01 INFO - [t1.2020-02-02T02:02Z] -triggered off []
+2015-06-19T14:47:30+01 INFO - [t2.2020-02-02T02:02Z] -triggered off []
+2015-06-19T14:47:30+01 INFO - [t2.2020-02-02T02:02Z] -triggered off []
+2015-06-19T14:47:30+01 INFO - [t2.2020-02-02T02:02Z] -triggered off []
diff --git a/tests/events/11-cycle-task-event-job-logs-retrieve/suite.rc b/tests/events/11-cycle-task-event-job-logs-retrieve/suite.rc
new file mode 100644
index 0000000..aba6a3d
--- /dev/null
+++ b/tests/events/11-cycle-task-event-job-logs-retrieve/suite.rc
@@ -0,0 +1,29 @@
+#!jinja2
+
+title=Task Event Job Log Retrieve
+
+[cylc]
+    UTC mode = True
+    cycle point format=%Y-%m-%dT%H:%MZ
+    [[reference test]]
+        live mode suite timeout=PT1M
+
+[scheduling]
+    initial cycle point=2020-02-02T02:02Z
+    final cycle point=2020-02-02T02:02Z
+    [[dependencies]]
+        [[[R1]]]
+            graph=T
+
+[runtime]
+    [[T]]
+        script=test "${CYLC_TASK_TRY_NUMBER}" -eq 3
+        retry delays=PT0S, 2*PT1S
+        [[[remote]]]
+            host = {{HOST}}
+    [[t1]]
+        inherit = T
+        [[[remote]]]
+            retrieve job logs = True
+    [[t2]]
+        inherit = T
diff --git a/tests/events/12-task-event-handler-retry-globalcfg b/tests/events/12-task-event-handler-retry-globalcfg
new file mode 120000
index 0000000..4bd8b11
--- /dev/null
+++ b/tests/events/12-task-event-handler-retry-globalcfg
@@ -0,0 +1 @@
+08-task-event-handler-retry
\ No newline at end of file
diff --git a/tests/events/12-task-event-handler-retry-globalcfg.t b/tests/events/12-task-event-handler-retry-globalcfg.t
new file mode 120000
index 0000000..ea4285d
--- /dev/null
+++ b/tests/events/12-task-event-handler-retry-globalcfg.t
@@ -0,0 +1 @@
+08-task-event-handler-retry.t
\ No newline at end of file
diff --git a/tests/events/13-task-event-mail-globalcfg b/tests/events/13-task-event-mail-globalcfg
new file mode 120000
index 0000000..8db2231
--- /dev/null
+++ b/tests/events/13-task-event-mail-globalcfg
@@ -0,0 +1 @@
+09-task-event-mail
\ No newline at end of file
diff --git a/tests/events/13-task-event-mail-globalcfg.t b/tests/events/13-task-event-mail-globalcfg.t
new file mode 120000
index 0000000..8b303bd
--- /dev/null
+++ b/tests/events/13-task-event-mail-globalcfg.t
@@ -0,0 +1 @@
+09-task-event-mail.t
\ No newline at end of file
diff --git a/tests/events/14-task-event-job-logs-retrieve-globalcfg b/tests/events/14-task-event-job-logs-retrieve-globalcfg
new file mode 120000
index 0000000..e8e9bc3
--- /dev/null
+++ b/tests/events/14-task-event-job-logs-retrieve-globalcfg
@@ -0,0 +1 @@
+10-task-event-job-logs-retrieve
\ No newline at end of file
diff --git a/tests/events/14-task-event-job-logs-retrieve-globalcfg.t b/tests/events/14-task-event-job-logs-retrieve-globalcfg.t
new file mode 120000
index 0000000..22b5a5b
--- /dev/null
+++ b/tests/events/14-task-event-job-logs-retrieve-globalcfg.t
@@ -0,0 +1 @@
+10-task-event-job-logs-retrieve.t
\ No newline at end of file
diff --git a/tests/events/15-host-task-event-handler-retry-globalcfg b/tests/events/15-host-task-event-handler-retry-globalcfg
new file mode 120000
index 0000000..4bd8b11
--- /dev/null
+++ b/tests/events/15-host-task-event-handler-retry-globalcfg
@@ -0,0 +1 @@
+08-task-event-handler-retry
\ No newline at end of file
diff --git a/tests/events/15-host-task-event-handler-retry-globalcfg.t b/tests/events/15-host-task-event-handler-retry-globalcfg.t
new file mode 100755
index 0000000..d53a366
--- /dev/null
+++ b/tests/events/15-host-task-event-handler-retry-globalcfg.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test general task event handler + retry.
+. "$(dirname "$0")/test_header"
+HOST=$(cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
+if [[ -z "${HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+set_test_number 4
+
+mkdir 'conf'
+cat >'conf/global.rc' <<__GLOBALCFG__
+[hosts]
+    [[${HOST}]]
+        task event handler retry delays=3*PT1S
+[task events]
+    handlers=hello-event-handler '%(name)s' '%(event)s'
+    handler events=succeeded, failed
+__GLOBALCFG__
+
+export CYLC_CONF_PATH="${PWD}/conf"
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${HOST}" \
+    "mkdir -p .cylc/${SUITE_NAME}/ && cat >.cylc/${SUITE_NAME}/passphrase" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate -s "HOST=${HOST}" -s 'GLOBALCFG=True' "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug -s "HOST=${HOST}" -s 'GLOBALCFG=True' \
+    "${SUITE_NAME}"
+
+SUITE_RUN_DIR="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}"
+LOG="${SUITE_RUN_DIR}/log/job/1/t1/NN/job-activity.log"
+sed "/(('event-handler-00', 'succeeded'), 1)/!d; s/^.* \[/[/" "${LOG}" \
+    >'edited-job-activity.log'
+cmp_ok 'edited-job-activity.log' <<'__LOG__'
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+[(('event-handler-00', 'succeeded'), 1) ret_code] 1
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+[(('event-handler-00', 'succeeded'), 1) ret_code] 1
+[(('event-handler-00', 'succeeded'), 1) cmd] hello-event-handler 't1' 'succeeded'
+[(('event-handler-00', 'succeeded'), 1) ret_code] 0
+[(('event-handler-00', 'succeeded'), 1) out] hello
+__LOG__
+
+grep 'event-handler-00.*will run after' "${SUITE_RUN_DIR}/log/suite/log" \
+    | cut -d' ' -f 4-11 >'edited-log'
+cmp_ok 'edited-log' <<'__LOG__'
+[t1.1] -(('event-handler-00', 'succeeded'), 1) will run after PT1S
+[t2.1] -(('event-handler-00', 'succeeded'), 1) will run after P0Y
+__LOG__
+
+${SSH} "${HOST}" "rm -rf '.cylc/${SUITE_NAME}' 'cylc-run/${SUITE_NAME}'"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/events/16-task-event-job-logs-register-globalcfg.t b/tests/events/16-task-event-job-logs-register-globalcfg.t
new file mode 100755
index 0000000..900849c
--- /dev/null
+++ b/tests/events/16-task-event-job-logs-register-globalcfg.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test remote job logs retrieval, requires compatible version of cylc on remote
+# job host.
+. "$(dirname "$0")/test_header"
+set_test_number 5
+
+mkdir 'conf'
+cat >'conf/global.rc' <<__GLOBALCFG__
+[task events]
+    register job logs retry delays = PT0S, PT15S
+__GLOBALCFG__
+export CYLC_CONF_PATH="${PWD}/conf"
+
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug "${SUITE_NAME}"
+
+SUITE_RUN_DIR="$(cylc get-global-config '--print-run-dir')/${SUITE_NAME}"
+sqlite3 "${SUITE_RUN_DIR}/cylc-suite.db" \
+    'select cycle,name,submit_num,filename,location from task_job_logs
+     ORDER BY cycle,name,submit_num,filename' >'select-task-job-logs.out'
+cmp_ok 'select-task-job-logs.out' <<'__OUT__'
+1|t1|1|job|1/t1/01/job
+1|t1|1|job-activity.log|1/t1/01/job-activity.log
+1|t1|1|job.err|1/t1/01/job.err
+1|t1|1|job.out|1/t1/01/job.out
+1|t1|1|job.out.keep|1/t1/01/job.out.keep
+1|t1|1|job.status|1/t1/01/job.status
+__OUT__
+
+sed "/'job-logs-register'/!d; s/^[^ ]* //" \
+    "${SUITE_RUN_DIR}/log/job/1/t1/01/job-activity.log" \
+    >'edited-activities.log'
+cmp_ok 'edited-activities.log' <<'__LOG__'
+[('job-logs-register', 1) ret_code] 0
+__LOG__
+
+grep -F 'failed, retrying in' "${SUITE_RUN_DIR}/log/suite/log" \
+    | cut -d' ' -f 4-10 | sort >"edited-log"
+    cmp_ok 'edited-log' <<'__LOG__'
+[t1.1] -('job-logs-register', 1) failed, retrying in PT15S
+__LOG__
+
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/events/16-task-event-job-logs-register-globalcfg/reference.log b/tests/events/16-task-event-job-logs-register-globalcfg/reference.log
new file mode 100644
index 0000000..c165380
--- /dev/null
+++ b/tests/events/16-task-event-job-logs-register-globalcfg/reference.log
@@ -0,0 +1,5 @@
+2015-06-19T14:47:30+01 INFO - Run mode: live
+2015-06-19T14:47:30+01 INFO - Initial point: 1
+2015-06-19T14:47:30+01 INFO - Final point: 1
+2015-06-19T14:47:30+01 INFO - Cold Start 1
+2015-06-19T14:47:30+01 INFO - [t1.1] -triggered off []
diff --git a/tests/events/16-task-event-job-logs-register-globalcfg/suite.rc b/tests/events/16-task-event-job-logs-register-globalcfg/suite.rc
new file mode 100644
index 0000000..8daff61
--- /dev/null
+++ b/tests/events/16-task-event-job-logs-register-globalcfg/suite.rc
@@ -0,0 +1,23 @@
+#!jinja2
+
+title=Task Event Job Log Retrieve
+
+[cylc]
+    [[reference test]]
+        live mode suite timeout=PT1M
+
+[scheduling]
+    [[dependencies]]
+        graph=t1
+
+[runtime]
+    [[t1]]
+        script="""
+wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true
+mv "$0.out" "$0.out.keep"
+cylc task message 'succeeded' >>"$0.out.keep"
+sleep 5
+cp -p "$0.out.keep" "$0.out"
+
+trap '' EXIT
+"""
diff --git a/tests/events/17-task-event-job-logs-retrieve-command b/tests/events/17-task-event-job-logs-retrieve-command
new file mode 120000
index 0000000..e8e9bc3
--- /dev/null
+++ b/tests/events/17-task-event-job-logs-retrieve-command
@@ -0,0 +1 @@
+10-task-event-job-logs-retrieve
\ No newline at end of file
diff --git a/tests/events/17-task-event-job-logs-retrieve-command.t b/tests/events/17-task-event-job-logs-retrieve-command.t
new file mode 100755
index 0000000..66b405c
--- /dev/null
+++ b/tests/events/17-task-event-job-logs-retrieve-command.t
@@ -0,0 +1,75 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test remote job logs retrieval custom command, requires compatible version of
+# cylc on remote job host.
+. "$(dirname "$0")/test_header"
+HOST=$(cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
+if [[ -z "${HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+set_test_number 3
+
+mkdir 'conf'
+cat >'conf/global.rc' <<__GLOBALCFG__
+[hosts]
+    [[${HOST}]]
+        retrieve job logs = True
+        retrieve job logs command = my-rsync
+__GLOBALCFG__
+export CYLC_CONF_PATH="${PWD}/conf"
+OPT_SET='-s GLOBALCFG=True'
+
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${HOST}" \
+    "mkdir -p .cylc/${SUITE_NAME}/ && cat >.cylc/${SUITE_NAME}/passphrase" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+mkdir -p "${TEST_DIR}/${SUITE_NAME}/bin"
+cat >"${TEST_DIR}/${SUITE_NAME}/bin/my-rsync" <<'__BASH__'
+#!/bin/bash
+set -eu
+echo "$@" >>"${CYLC_SUITE_LOG_DIR}/my-rsync.log"
+exec rsync -a "$@"
+__BASH__
+chmod +x "${TEST_DIR}/${SUITE_NAME}/bin/my-rsync"
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate ${OPT_SET} -s "HOST=${HOST}" "${SUITE_NAME}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --reference-test --debug ${OPT_SET} -s "HOST=${HOST}" \
+    "${SUITE_NAME}"
+
+SUITE_LOG_D="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}/log"
+sed 's/^.* -v //' "${SUITE_LOG_D}/suite/my-rsync.log" >'my-rsync.log.edited'
+
+OPT_HEAD='--include=/1 --include=/1/t1'
+OPT_TAIL='--exclude=/**'
+ARGS="${HOST}:\$HOME/cylc-run/${SUITE_NAME}/log/job/ ${SUITE_LOG_D}/job/"
+cmp_ok 'my-rsync.log.edited' <<__LOG__
+${OPT_HEAD} --include=/1/t1/01 --include=/1/t1/01/** ${OPT_TAIL} ${ARGS}
+${OPT_HEAD} --include=/1/t1/02 --include=/1/t1/02/** ${OPT_TAIL} ${ARGS}
+${OPT_HEAD} --include=/1/t1/03 --include=/1/t1/03/** ${OPT_TAIL} ${ARGS}
+__LOG__
+
+${SSH} "${HOST}" \
+    "rm -rf '.cylc/${SUITE_NAME}' 'cylc-run/${SUITE_NAME}'"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/events/task-iso/bin/log-check.sh b/tests/events/task-iso/bin/log-check.sh
index 76385b4..55145b4 100755
--- a/tests/events/task-iso/bin/log-check.sh
+++ b/tests/events/task-iso/bin/log-check.sh
@@ -10,9 +10,9 @@ REF_LOG=$CYLC_SUITE_DEF_PATH/events.log
 
 # difference with 'sort -u' (unique) because polling on timeouts may now
 # result in multiple 'started' messages etc.
-if ! diff <(sort -u $NEW_LOG) <(sort -u $REF_LOG); then 
+if ! diff -u <(sort -u $NEW_LOG) <(sort -u $REF_LOG); then 
     echo "ERROR: event handler output logs differ" >&2
-    diff <(sort -u $NEW_LOG) <(sort -u $REF_LOG) >&2
+    diff -u <(sort -u $NEW_LOG) <(sort -u $REF_LOG) >&2
     exit 1
 else
     echo "OK: event handler output logs agree"
diff --git a/tests/events/task-iso/events.log b/tests/events/task-iso/events.log
index 56701e4..dfbb232 100644
--- a/tests/events/task-iso/events.log
+++ b/tests/events/task-iso/events.log
@@ -2,7 +2,7 @@ EVENT                TASK     MESSAGE
 retry                foo.1    job failed, retrying in PT3S
 submission retry     bar.1    job submission failed, submit-retrying in PT3S
 submitted            baz.1    job submitted
-submission timeout   baz.1    job submitted PT3S ago, but has not started
+submission timeout   baz.1    job submitted PT1S ago, but has not started
 submission failed    bar.1    job submission failed
 execution timeout    foo.1    job started PT3S ago, but has not finished
 started              baz.1    job started
diff --git a/tests/events/task-iso/suite.rc b/tests/events/task-iso/suite.rc
index 0b1b8aa..68332c3 100644
--- a/tests/events/task-iso/suite.rc
+++ b/tests/events/task-iso/suite.rc
@@ -10,7 +10,7 @@ title = "test all event hooks"
     [[environment]]
         EVNTLOG = {{ EVNTLOG }}
     [[reference test]]
-        live mode suite timeout = PT45S
+        live mode suite timeout = PT1M
         suite shutdown event handler = log-check.sh
         expected task failures = bar.1, baz.1
 
@@ -40,7 +40,7 @@ printf "%-20s %-8s %s\n" EVENT TASK MESSAGE > $EVNTLOG
 if [[ $CYLC_TASK_TRY_NUMBER == 1 ]]; then
     false
 else
-    sleep 5; cylc task message -p WARNING 'this is a user-defined warning message'
+    sleep 10; cylc task message -p WARNING 'this is a user-defined warning message'
 fi"""
         [[[event hooks]]]
             succeeded handler = {{ HANDLER }}
@@ -61,11 +61,11 @@ fi"""
 
      [[baz]]
         # submitted, submission timeout, started, failed
-        init-script = "sleep 5"
-        script = "false"
+        init-script = sleep 5
+        script = false
         [[[event hooks]]]
             submitted handler = {{ HANDLER }}
             started handler = {{ HANDLER }}
             failed handler = {{ HANDLER }}
-            submission timeout = PT3S
+            submission timeout = PT1S
             submission timeout handler = {{ HANDLER }}
diff --git a/tests/events/task/bin/log-check.sh b/tests/events/task/bin/log-check.sh
index 76385b4..55145b4 100755
--- a/tests/events/task/bin/log-check.sh
+++ b/tests/events/task/bin/log-check.sh
@@ -10,9 +10,9 @@ REF_LOG=$CYLC_SUITE_DEF_PATH/events.log
 
 # difference with 'sort -u' (unique) because polling on timeouts may now
 # result in multiple 'started' messages etc.
-if ! diff <(sort -u $NEW_LOG) <(sort -u $REF_LOG); then 
+if ! diff -u <(sort -u $NEW_LOG) <(sort -u $REF_LOG); then 
     echo "ERROR: event handler output logs differ" >&2
-    diff <(sort -u $NEW_LOG) <(sort -u $REF_LOG) >&2
+    diff -u <(sort -u $NEW_LOG) <(sort -u $REF_LOG) >&2
     exit 1
 else
     echo "OK: event handler output logs agree"
diff --git a/tests/events/task/suite.rc b/tests/events/task/suite.rc
index 8b26fdb..6a84921 100644
--- a/tests/events/task/suite.rc
+++ b/tests/events/task/suite.rc
@@ -10,7 +10,7 @@ title = "test all event hooks"
     [[environment]]
         EVNTLOG = {{ EVNTLOG }}
     [[reference test]]
-        live mode suite timeout = 0.75
+        live mode suite timeout = 1.0
         suite shutdown event handler = log-check.sh
         expected task failures = bar.1, baz.1
  
@@ -38,7 +38,7 @@ printf "%-20s %-8s %s\n" EVENT TASK MESSAGE > {{ EVNTLOG }}
 if [[ $CYLC_TASK_TRY_NUMBER == 1 ]]; then
     false
 else
-    sleep 5; cylc task message -p WARNING 'this is a user-defined warning message'
+    sleep 10; cylc task message -p WARNING 'this is a user-defined warning message'
 fi"""
         [[[event hooks]]]
             succeeded handler = {{ HANDLER }}
@@ -59,8 +59,8 @@ fi"""
 
      [[baz]]
         # submitted, submission timeout, started, failed
-        init-script = "sleep 5"
-        script = "false"
+        init-script = sleep 15
+        script = false
         [[[event hooks]]]
             submitted handler = {{ HANDLER }}
             started handler = {{ HANDLER }}
diff --git a/tests/events/00-suite.t b/tests/ext-trigger/00-satellite.t
similarity index 84%
copy from tests/events/00-suite.t
copy to tests/ext-trigger/00-satellite.t
index d9dcf9b..dd099d6 100644
--- a/tests/events/00-suite.t
+++ b/tests/ext-trigger/00-satellite.t
@@ -15,16 +15,16 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Validate and run the external trigger test suite
 . "$(dirname "$0")/test_header"
+
 set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" 
 
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+
 suite_run_ok "${TEST_NAME_BASE}-run" \
     cylc run --reference-test --debug "${SUITE_NAME}"
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/ext-trigger/00-satellite/reference.log b/tests/ext-trigger/00-satellite/reference.log
new file mode 100644
index 0000000..299374d
--- /dev/null
+++ b/tests/ext-trigger/00-satellite/reference.log
@@ -0,0 +1,204 @@
+2015-03-19T02:11:39Z INFO - port:7766
+2015-03-19T02:11:39Z INFO - Suite starting at 2015-03-19T02:11:39Z
+2015-03-19T02:11:39Z INFO - Run mode: live
+2015-03-19T02:11:39Z INFO - Initial point: 1
+2015-03-19T02:11:39Z INFO - Final point: 5
+2015-03-19T02:11:39Z INFO - Cold Start 1
+2015-03-19T02:11:39Z INFO - [prep.1] -initiate job-submit
+2015-03-19T02:11:39Z INFO - [prep.1] -triggered off []
+2015-03-19T02:11:40Z INFO - 8613
+
+2015-03-19T02:11:40Z INFO - [prep.1] -submit_method_id=8613
+2015-03-19T02:11:40Z INFO - [prep.1] -submission succeeded
+2015-03-19T02:11:40Z INFO - [prep.1] -(current:submitted)> prep.1 started at 2015-03-19T02:11:40Z
+2015-03-19T02:11:41Z INFO - [prep.1] -(current:running)> prep.1 succeeded at 2015-03-19T02:11:41Z
+2015-03-19T02:11:42Z INFO - [satsim.1] -initiate job-submit
+2015-03-19T02:11:42Z INFO - [satsim.1] -triggered off ['prep.1']
+2015-03-19T02:11:43Z INFO - 8673
+
+2015-03-19T02:11:43Z INFO - [satsim.1] -submit_method_id=8673
+2015-03-19T02:11:43Z INFO - [satsim.1] -submission succeeded
+2015-03-19T02:11:43Z INFO - [satsim.1] -(current:submitted)> satsim.1 started at 2015-03-19T02:11:43Z
+2015-03-19T02:11:43Z INFO - External trigger received
+new dataset ready for processing (1426731103.19)
+2015-03-19T02:11:43Z INFO - External trigger received
+new dataset ready for processing (1426731103.76)
+2015-03-19T02:11:44Z INFO - External trigger received
+new dataset ready for processing (1426731103.97)
+2015-03-19T02:11:44Z INFO - External trigger received
+new dataset ready for processing (1426731104.68)
+2015-03-19T02:11:44Z INFO - [get_data.1] -initiate job-submit
+2015-03-19T02:11:44Z INFO - [get_data.1] -triggered off ['prep.1']
+2015-03-19T02:11:44Z INFO - External trigger received
+new dataset ready for processing (1426731104.40)
+2015-03-19T02:11:45Z INFO - 8780
+
+2015-03-19T02:11:45Z INFO - [get_data.1] -submit_method_id=8780
+2015-03-19T02:11:45Z INFO - [get_data.1] -submission succeeded
+2015-03-19T02:11:45Z INFO - [get_data.1] -(current:submitted)> get_data.1 started at 2015-03-19T02:11:45Z
+2015-03-19T02:11:45Z INFO - [satsim.1] -(current:running)> satsim.1 succeeded at 2015-03-19T02:11:45Z
+2015-03-19T02:11:51Z INFO - [get_data.1] -(current:running)> get_data.1 succeeded at 2015-03-19T02:11:50Z
+2015-03-19T02:11:52Z INFO - [get_data.2] -initiate job-submit
+2015-03-19T02:11:52Z INFO - [proc1.1] -initiate job-submit
+2015-03-19T02:11:52Z INFO - [get_data.2] -triggered off ['get_data.1']
+2015-03-19T02:11:52Z INFO - [proc1.1] -triggered off ['get_data.1']
+2015-03-19T02:11:53Z INFO - 8939
+
+2015-03-19T02:11:53Z INFO - [get_data.2] -submit_method_id=8939
+2015-03-19T02:11:53Z INFO - [get_data.2] -submission succeeded
+2015-03-19T02:11:53Z INFO - 8940
+
+2015-03-19T02:11:53Z INFO - [proc1.1] -submit_method_id=8940
+2015-03-19T02:11:53Z INFO - [proc1.1] -submission succeeded
+2015-03-19T02:11:53Z INFO - [get_data.2] -(current:submitted)> get_data.2 started at 2015-03-19T02:11:53Z
+2015-03-19T02:11:53Z INFO - [proc1.1] -(current:submitted)> proc1.1 started at 2015-03-19T02:11:53Z
+2015-03-19T02:11:58Z INFO - [get_data.2] -(current:running)> get_data.2 succeeded at 2015-03-19T02:11:58Z
+2015-03-19T02:11:58Z INFO - [proc1.1] -(current:running)> proc1.1 succeeded at 2015-03-19T02:11:58Z
+2015-03-19T02:11:59Z INFO - [proc2.1] -initiate job-submit
+2015-03-19T02:11:59Z INFO - [get_data.3] -initiate job-submit
+2015-03-19T02:11:59Z INFO - [proc1.2] -initiate job-submit
+2015-03-19T02:11:59Z INFO - [proc2.1] -triggered off ['proc1.1']
+2015-03-19T02:11:59Z INFO - [get_data.3] -triggered off ['get_data.2']
+2015-03-19T02:11:59Z INFO - [proc1.2] -triggered off ['get_data.2']
+2015-03-19T02:12:00Z INFO - 9070
+
+2015-03-19T02:12:00Z INFO - [proc2.1] -submit_method_id=9070
+2015-03-19T02:12:00Z INFO - [proc2.1] -submission succeeded
+2015-03-19T02:12:00Z INFO - 9080
+
+2015-03-19T02:12:00Z INFO - [get_data.3] -submit_method_id=9080
+2015-03-19T02:12:00Z INFO - [get_data.3] -submission succeeded
+2015-03-19T02:12:00Z INFO - 9082
+
+2015-03-19T02:12:00Z INFO - [proc1.2] -submit_method_id=9082
+2015-03-19T02:12:00Z INFO - [proc1.2] -submission succeeded
+2015-03-19T02:12:00Z INFO - [proc2.1] -(current:submitted)> proc2.1 started at 2015-03-19T02:12:00Z
+2015-03-19T02:12:00Z INFO - [get_data.3] -(current:submitted)> get_data.3 started at 2015-03-19T02:12:00Z
+2015-03-19T02:12:00Z INFO - [proc1.2] -(current:submitted)> proc1.2 started at 2015-03-19T02:12:00Z
+2015-03-19T02:12:05Z INFO - [proc2.1] -(current:running)> proc2.1 succeeded at 2015-03-19T02:12:05Z
+2015-03-19T02:12:05Z INFO - [get_data.3] -(current:running)> get_data.3 succeeded at 2015-03-19T02:12:05Z
+2015-03-19T02:12:05Z INFO - [proc1.2] -(current:running)> proc1.2 succeeded at 2015-03-19T02:12:05Z
+2015-03-19T02:12:06Z INFO - [get_data.4] -initiate job-submit
+2015-03-19T02:12:06Z INFO - [products.1] -initiate job-submit
+2015-03-19T02:12:06Z INFO - [proc1.3] -initiate job-submit
+2015-03-19T02:12:06Z INFO - [proc2.2] -initiate job-submit
+2015-03-19T02:12:06Z INFO - [get_data.4] -triggered off ['get_data.3']
+2015-03-19T02:12:06Z INFO - [products.1] -triggered off ['proc2.1']
+2015-03-19T02:12:06Z INFO - [proc1.3] -triggered off ['get_data.3']
+2015-03-19T02:12:06Z INFO - [proc2.2] -triggered off ['proc1.2']
+2015-03-19T02:12:07Z INFO - 9382
+
+2015-03-19T02:12:07Z INFO - [get_data.4] -submit_method_id=9382
+2015-03-19T02:12:07Z INFO - [get_data.4] -submission succeeded
+2015-03-19T02:12:07Z INFO - 9378
+
+2015-03-19T02:12:07Z INFO - [products.1] -submit_method_id=9378
+2015-03-19T02:12:07Z INFO - [products.1] -submission succeeded
+2015-03-19T02:12:07Z INFO - 9379
+
+2015-03-19T02:12:07Z INFO - [proc1.3] -submit_method_id=9379
+2015-03-19T02:12:07Z INFO - [proc1.3] -submission succeeded
+2015-03-19T02:12:07Z INFO - 9408
+
+2015-03-19T02:12:07Z INFO - [proc2.2] -submit_method_id=9408
+2015-03-19T02:12:07Z INFO - [proc2.2] -submission succeeded
+2015-03-19T02:12:07Z INFO - [get_data.4] -(current:submitted)> get_data.4 started at 2015-03-19T02:12:07Z
+2015-03-19T02:12:07Z INFO - [products.1] -(current:submitted)> products.1 started at 2015-03-19T02:12:07Z
+2015-03-19T02:12:07Z INFO - [proc1.3] -(current:submitted)> proc1.3 started at 2015-03-19T02:12:07Z
+2015-03-19T02:12:07Z INFO - [proc2.2] -(current:submitted)> proc2.2 started at 2015-03-19T02:12:07Z
+2015-03-19T02:12:12Z INFO - [get_data.4] -(current:running)> get_data.4 succeeded at 2015-03-19T02:12:12Z
+2015-03-19T02:12:12Z INFO - [products.1] -(current:running)> products.1 succeeded at 2015-03-19T02:12:12Z
+2015-03-19T02:12:12Z INFO - [proc1.3] -(current:running)> proc1.3 succeeded at 2015-03-19T02:12:12Z
+2015-03-19T02:12:12Z INFO - [proc2.2] -(current:running)> proc2.2 succeeded at 2015-03-19T02:12:12Z
+2015-03-19T02:12:13Z INFO - [get_data.5] -initiate job-submit
+2015-03-19T02:12:13Z INFO - [products.2] -initiate job-submit
+2015-03-19T02:12:13Z INFO - [proc2.3] -initiate job-submit
+2015-03-19T02:12:13Z INFO - [proc1.4] -initiate job-submit
+2015-03-19T02:12:13Z INFO - [get_data.5] -triggered off ['get_data.4']
+2015-03-19T02:12:13Z INFO - [products.2] -triggered off ['proc2.2']
+2015-03-19T02:12:13Z INFO - [proc2.3] -triggered off ['proc1.3']
+2015-03-19T02:12:13Z INFO - [proc1.4] -triggered off ['get_data.4']
+2015-03-19T02:12:13Z INFO - Expiring 1 broadcast settings now
+2015-03-19T02:12:14Z INFO - 9627
+
+2015-03-19T02:12:14Z INFO - [get_data.5] -submit_method_id=9627
+2015-03-19T02:12:14Z INFO - [get_data.5] -submission succeeded
+2015-03-19T02:12:14Z INFO - 9626
+
+2015-03-19T02:12:14Z INFO - [products.2] -submit_method_id=9626
+2015-03-19T02:12:14Z INFO - [products.2] -submission succeeded
+2015-03-19T02:12:14Z INFO - 9642
+
+2015-03-19T02:12:14Z INFO - [proc2.3] -submit_method_id=9642
+2015-03-19T02:12:14Z INFO - [proc2.3] -submission succeeded
+2015-03-19T02:12:14Z INFO - 9643
+
+2015-03-19T02:12:14Z INFO - [proc1.4] -submit_method_id=9643
+2015-03-19T02:12:14Z INFO - [proc1.4] -submission succeeded
+2015-03-19T02:12:14Z INFO - [get_data.5] -(current:submitted)> get_data.5 started at 2015-03-19T02:12:14Z
+2015-03-19T02:12:14Z INFO - [products.2] -(current:submitted)> products.2 started at 2015-03-19T02:12:14Z
+2015-03-19T02:12:14Z INFO - [proc2.3] -(current:submitted)> proc2.3 started at 2015-03-19T02:12:14Z
+2015-03-19T02:12:14Z INFO - [proc1.4] -(current:submitted)> proc1.4 started at 2015-03-19T02:12:14Z
+2015-03-19T02:12:20Z INFO - [get_data.5] -(current:running)> get_data.5 succeeded at 2015-03-19T02:12:20Z
+2015-03-19T02:12:20Z INFO - [products.2] -(current:running)> products.2 succeeded at 2015-03-19T02:12:20Z
+2015-03-19T02:12:20Z INFO - [proc2.3] -(current:running)> proc2.3 succeeded at 2015-03-19T02:12:20Z
+2015-03-19T02:12:20Z INFO - [proc1.4] -(current:running)> proc1.4 succeeded at 2015-03-19T02:12:20Z
+2015-03-19T02:12:21Z INFO - [proc2.4] -initiate job-submit
+2015-03-19T02:12:21Z INFO - [products.3] -initiate job-submit
+2015-03-19T02:12:21Z INFO - [proc1.5] -initiate job-submit
+2015-03-19T02:12:21Z INFO - [proc2.4] -triggered off ['proc1.4']
+2015-03-19T02:12:21Z INFO - [products.3] -triggered off ['proc2.3']
+2015-03-19T02:12:21Z INFO - [proc1.5] -triggered off ['get_data.5']
+2015-03-19T02:12:21Z INFO - Expiring 2 broadcast settings now
+2015-03-19T02:12:22Z INFO - 9871
+
+2015-03-19T02:12:22Z INFO - [proc2.4] -submit_method_id=9871
+2015-03-19T02:12:22Z INFO - [proc2.4] -submission succeeded
+2015-03-19T02:12:22Z INFO - 9878
+
+2015-03-19T02:12:22Z INFO - [products.3] -submit_method_id=9878
+2015-03-19T02:12:22Z INFO - [products.3] -submission succeeded
+2015-03-19T02:12:22Z INFO - 9889
+
+2015-03-19T02:12:22Z INFO - [proc1.5] -submit_method_id=9889
+2015-03-19T02:12:22Z INFO - [proc1.5] -submission succeeded
+2015-03-19T02:12:22Z INFO - [proc2.4] -(current:submitted)> proc2.4 started at 2015-03-19T02:12:22Z
+2015-03-19T02:12:22Z INFO - [products.3] -(current:submitted)> products.3 started at 2015-03-19T02:12:22Z
+2015-03-19T02:12:22Z INFO - [proc1.5] -(current:submitted)> proc1.5 started at 2015-03-19T02:12:22Z
+2015-03-19T02:12:28Z INFO - [proc2.4] -(current:running)> proc2.4 succeeded at 2015-03-19T02:12:27Z
+2015-03-19T02:12:28Z INFO - [products.3] -(current:running)> products.3 succeeded at 2015-03-19T02:12:27Z
+2015-03-19T02:12:28Z INFO - [proc1.5] -(current:running)> proc1.5 succeeded at 2015-03-19T02:12:27Z
+2015-03-19T02:12:29Z INFO - [products.4] -initiate job-submit
+2015-03-19T02:12:29Z INFO - [proc2.5] -initiate job-submit
+2015-03-19T02:12:29Z INFO - [products.4] -triggered off ['proc2.4']
+2015-03-19T02:12:29Z INFO - [proc2.5] -triggered off ['proc1.5']
+2015-03-19T02:12:29Z INFO - Expiring 3 broadcast settings now
+2015-03-19T02:12:30Z INFO - 10051
+
+2015-03-19T02:12:30Z INFO - [products.4] -submit_method_id=10051
+2015-03-19T02:12:30Z INFO - [products.4] -submission succeeded
+2015-03-19T02:12:30Z INFO - 10054
+
+2015-03-19T02:12:30Z INFO - [proc2.5] -submit_method_id=10054
+2015-03-19T02:12:30Z INFO - [proc2.5] -submission succeeded
+2015-03-19T02:12:30Z INFO - [products.4] -(current:submitted)> products.4 started at 2015-03-19T02:12:29Z
+2015-03-19T02:12:30Z INFO - [proc2.5] -(current:submitted)> proc2.5 started at 2015-03-19T02:12:29Z
+2015-03-19T02:12:35Z INFO - [products.4] -(current:running)> products.4 succeeded at 2015-03-19T02:12:34Z
+2015-03-19T02:12:35Z INFO - [proc2.5] -(current:running)> proc2.5 succeeded at 2015-03-19T02:12:34Z
+2015-03-19T02:12:36Z INFO - [products.5] -initiate job-submit
+2015-03-19T02:12:36Z INFO - [products.5] -triggered off ['proc2.5']
+2015-03-19T02:12:37Z INFO - 10171
+
+2015-03-19T02:12:37Z INFO - [products.5] -submit_method_id=10171
+2015-03-19T02:12:37Z INFO - [products.5] -submission succeeded
+2015-03-19T02:12:37Z INFO - [products.5] -(current:submitted)> products.5 started at 2015-03-19T02:12:36Z
+2015-03-19T02:12:43Z INFO - [products.5] -(current:running)> products.5 succeeded at 2015-03-19T02:12:42Z
+2015-03-19T02:12:44Z INFO - [collate.5] -initiate job-submit
+2015-03-19T02:12:44Z INFO - [collate.5] -triggered off ['products.5']
+2015-03-19T02:12:45Z INFO - 10236
+
+2015-03-19T02:12:45Z INFO - [collate.5] -submit_method_id=10236
+2015-03-19T02:12:45Z INFO - [collate.5] -submission succeeded
+2015-03-19T02:12:45Z INFO - [collate.5] -(current:submitted)> collate.5 started at 2015-03-19T02:12:44Z
+2015-03-19T02:13:06Z INFO - [collate.5] -(current:running)> collate.5 succeeded at 2015-03-19T02:13:05Z
+2015-03-19T02:13:07Z INFO - Suite shutting down at 2015-03-19T02:13:07Z
diff --git a/examples/satellite/suite.rc b/tests/ext-trigger/00-satellite/suite.rc
similarity index 52%
rename from examples/satellite/suite.rc
rename to tests/ext-trigger/00-satellite/suite.rc
index c0d1ddb..dff1cca 100644
--- a/examples/satellite/suite.rc
+++ b/tests/ext-trigger/00-satellite/suite.rc
@@ -1,10 +1,18 @@
 #!Jinja2
 
-title = Demonstrates real time satellite data processing
+# TEST SUITE ADAPTED FROM examples/satellite/ext-trigger/
+
+title = Real time satellite data processing demo, variant 3 of 3
+
 description = """
-Each successive integer cycle retrieves and processes the next
-arbitrarily timed and arbitrarily labelled dataset, in parallel
-with previous cycles if the data comes in quickly."""
+Successive cycle points retrieve and processe the next arbitrarily timed and
+labelled dataset, in parallel if the data comes in quickly. This variant of the
+suite has initial get_data tasks with external triggers: they do not submit
+until triggered by an external system."""
+
+# Note that the satellite simulator task here that supplies the external event
+# trigger happens to be a suite task - i.e. it is not really "external" - but
+# this is only a convenience - an easy route to a self-contained example suite.
 
 # you can monitor output processing with:
 # $ watch -n 1 \
@@ -16,12 +24,20 @@ with previous cycles if the data comes in quickly."""
 {% set DATA_IN_DIR = "$CYLC_SUITE_SHARE_DIR/incoming" %}
 {% set PRODUCT_DIR = "$CYLC_SUITE_SHARE_DIR/products" %}
 
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT1M
+
 [scheduling]
     cycling mode = integer
     initial cycle point = 1
     final cycle point = {{N_DATASETS}}
     max active cycle points = 5
     # runahead limit = P5 # (alternative limiting method)
+    [[special tasks]]
+        external-triggered = get_data("new dataset ready for processing")
     [[dependencies]]
         [[[R1]]] # first cycle
             graph = prep => satsim & get_data
@@ -37,70 +53,68 @@ with previous cycles if the data comes in quickly."""
 [runtime]
     [[prep]]
         title = clean the suite output directories
-        script = \
+        command scripting = \
 rm -rf $CYLC_SUITE_SHARE_DIR $CYLC_SUITE_WORK_DIR
 
     [[satsim]]
         title = simulate a satellite data feed
         description = """Generates {{N_DATASETS}} arbitrarily labelled
-datasets after random durations."""
-        pre-script = mkdir -p {{DATA_IN_DIR}}
-        script = """
+datasets very quickly, to show parallel processing streams."""
+        pre-command scripting = mkdir -p {{DATA_IN_DIR}}
+        command scripting = """
 COUNT=0
 while true; do
-    (( COUNT == {{N_DATASETS}} )) && break
-    sleep $(( 1 + RANDOM % 10 ))
-    touch {{DATA_IN_DIR}}/dataset-$(date +%s).raw
-    (( COUNT += 1 ))
+    ((COUNT == {{N_DATASETS}})) && break
+    # sleep $((RANDOM % 20))
+    # Generate datasets very quickly to test parallel processing.
+    DATA_ID=$(date +%s).$((RANDOM % 100))
+    DATA_FILE=dataset-${DATA_ID}.raw
+    touch {{DATA_IN_DIR}}/$DATA_FILE
+    ((COUNT += 1))
+    # (required to distinguish fast-arriving messages).
+    # Trigger downstream processing in the suite.
+    cylc ext-trigger $CYLC_SUITE_NAME \
+       "new dataset ready for processing" $DATA_ID
 done"""
 
     [[WORKDIR]]
         # Define a common cycle-point-specific work-directory for all
         # processing tasks so that they all work on the same dataset.
         work sub-directory = proc-$CYLC_TASK_CYCLE_POINT
-        pre-script = sleep 10
+        pre-command scripting = "DATASET=dataset-$CYLC_EXT_TRIGGER_ID"
+        ##post-command scripting = sleep 5
 
     [[get_data]]
         inherit = WORKDIR
-        title = grab one new dataset, waiting if necessary
-        script = """
-while true; do
-    DATASET=$( ls {{DATA_IN_DIR}}/dataset-*.raw 2>/dev/null | head -n 1 )
-    if [[ -z $DATASET ]]; then
-        sleep 1
-        continue
-    fi
-    break
-done
-mv $DATASET $PWD"""
+        title = retrieve next dataset
+        description = just do it - we know it exists already
+        command scripting = mv {{DATA_IN_DIR}}/${DATASET}.raw $PWD
 
     [[proc1]]
         inherit = WORKDIR
         title = convert .raw dataset to .proc1 form
-        script = """
-DATASET=$(ls dataset-*.raw)
-mv $DATASET ${DATASET%raw}proc1"""
+        command scripting = mv ${DATASET}.raw ${DATASET}.proc1
 
     [[proc2]]
         inherit = WORKDIR
         title = convert .proc1 dataset to .proc2 form
-        script = """
-DATASET=$(ls dataset-*.proc1)
-mv $DATASET ${DATASET%proc1}proc2"""
+        command scripting = mv ${DATASET}.proc1 ${DATASET}.proc2
 
     [[products]]
         inherit = WORKDIR
         title = generate products from .proc2 processed dataset
-        pre-script = mkdir -p {{PRODUCT_DIR}}
-        script = """
-DATASET=$( ls dataset-*.proc2 )
-mv $DATASET {{PRODUCT_DIR}}/${DATASET%proc2}prod"""
+        command scripting = """
+mkdir -p {{PRODUCT_DIR}}
+mv ${DATASET}.proc2 {{PRODUCT_DIR}}/${DATASET}.prod"""
 
     [[collate]]
         title = collate all products from the suite run
         # Note you might want to use "cylc suite-state" to check that
         # _all_ product tasks have finished before collating results.
-        script = ls {{PRODUCT_DIR}}
+        command scripting = """
+echo PRODUCTS:
+ls {{PRODUCT_DIR}}
+##sleep 20"""
 
 [visualization]
     default node attributes = "style=filled", "shape=box"
diff --git a/tests/events/00-suite.t b/tests/ext-trigger/01-no-nudge.t
similarity index 70%
copy from tests/events/00-suite.t
copy to tests/ext-trigger/01-no-nudge.t
index d9dcf9b..fac9568 100644
--- a/tests/events/00-suite.t
+++ b/tests/ext-trigger/01-no-nudge.t
@@ -14,17 +14,22 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Test that external trigger events stimulate task processing even when nothing
+# else is happening in the # suite. If not, the test suite will stall until
+# manually nudged.  Note this test will probably become irrelevant once we go
+# to entirely event-driven scheduling.
+
 . "$(dirname "$0")/test_header"
+
 set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}" 
 
 run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
+
 suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+    cylc run --no-detach "${SUITE_NAME}"
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/ext-trigger/01-no-nudge/suite.rc b/tests/ext-trigger/01-no-nudge/suite.rc
new file mode 100644
index 0000000..be190f4
--- /dev/null
+++ b/tests/ext-trigger/01-no-nudge/suite.rc
@@ -0,0 +1,29 @@
+title = "Test for Github Issue 1543"
+description = """
+External trigger events should stimulate task processing without requiring a
+manual suite nudge, even when nothing else is happening in the suite.  Here,
+long-running task bar ext-triggers foo when nothing else is happening.  If 
+task processing occurs foo will submit and kill bar, allowing the suite
+to shutdown.  Otherwise, foo won't submit, bar will keep running, and the suite
+will time out."""
+
+[cylc]
+    [[event hooks]]
+        abort on timeout = True
+        timeout = PT30S
+[scheduling]
+    [[special tasks]]
+        external-trigger = foo("drugs and money")
+    [[dependencies]]
+        graph = """foo & bar
+                   bar:fail => !bar"""
+[runtime]
+    [[foo]]
+        # If triggered, remove the long-running bar task
+        # to allow the suite to shut down quickly.
+        script = cylc kill $CYLC_SUITE_NAME bar 1
+    [[bar]]
+        script = """
+sleep 5
+cylc ext-trigger $CYLC_SUITE_NAME "drugs and money" 12345
+sleep 60"""
diff --git a/tests/purge/test_header b/tests/ext-trigger/test_header
similarity index 100%
copy from tests/purge/test_header
copy to tests/ext-trigger/test_header
diff --git a/tests/graph-equivalence/00-oneline.t b/tests/graph-equivalence/00-oneline.t
index ff0eacf..40cf705 100644
--- a/tests/graph-equivalence/00-oneline.t
+++ b/tests/graph-equivalence/00-oneline.t
@@ -32,6 +32,7 @@ suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-check-a
 cylc run $SUITE_NAME --hold
+sleep 5
 cylc show $SUITE_NAME a.1 | sed -n "/prerequisites/,/outputs/p" > a-prereqs
 cmp_ok $TEST_SOURCE_DIR/splitline_refs/a-ref a-prereqs
 #-------------------------------------------------------------------------------
diff --git a/tests/graph-equivalence/01-twolines.t b/tests/graph-equivalence/01-twolines.t
index d3616ef..fbf2ae5 100644
--- a/tests/graph-equivalence/01-twolines.t
+++ b/tests/graph-equivalence/01-twolines.t
@@ -32,6 +32,7 @@ suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-check-a
 cylc run $SUITE_NAME --hold
+sleep 5
 cylc show $SUITE_NAME a.1 | sed -n "/prerequisites/,/outputs/p" > a-prereqs
 cmp_ok $TEST_SOURCE_DIR/splitline_refs/a-ref a-prereqs
 #-------------------------------------------------------------------------------
diff --git a/tests/graph-equivalence/02-splitline.t b/tests/graph-equivalence/02-splitline.t
index c88d3fb..7cc99b3 100644
--- a/tests/graph-equivalence/02-splitline.t
+++ b/tests/graph-equivalence/02-splitline.t
@@ -33,6 +33,7 @@ suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-check-a
 cylc run $SUITE_NAME --hold
+sleep 5
 cylc show $SUITE_NAME a.1 | sed -n "/prerequisites/,/outputs/p" > a-prereqs
 cmp_ok $TEST_SOURCE_DIR/splitline_refs/a-ref a-prereqs
 #-------------------------------------------------------------------------------
diff --git a/tests/graph-equivalence/03-multiline_and1.t b/tests/graph-equivalence/03-multiline_and1.t
index 91f79fa..62a9d84 100644
--- a/tests/graph-equivalence/03-multiline_and1.t
+++ b/tests/graph-equivalence/03-multiline_and1.t
@@ -33,6 +33,7 @@ suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-check-c
 cylc run $SUITE_NAME --hold
+sleep 5
 cylc show $SUITE_NAME c.1 | sed -n "/prerequisites/,/outputs/p" > c-prereqs
 cmp_ok $TEST_SOURCE_DIR/multiline_and_refs/c-ref c-prereqs
 cylc shutdown $SUITE_NAME --now -f
diff --git a/tests/graph-equivalence/04-multiline_and2.t b/tests/graph-equivalence/04-multiline_and2.t
index d62a905..73ee6b7 100644
--- a/tests/graph-equivalence/04-multiline_and2.t
+++ b/tests/graph-equivalence/04-multiline_and2.t
@@ -34,8 +34,9 @@ suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-check-c
 cylc run $SUITE_NAME --hold
+sleep 5
 cylc show $SUITE_NAME c.1 | sed -n "/prerequisites/,/outputs/p" > c-prereqs
-cmp_ok $TEST_SOURCE_DIR/multiline_and_refs/c-ref c-prereqs
+cmp_ok $TEST_SOURCE_DIR/multiline_and_refs/c-ref-2 c-prereqs
 cylc shutdown $SUITE_NAME --now -f
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/graph-equivalence/multiline_and_refs/c-ref-2 b/tests/graph-equivalence/multiline_and_refs/c-ref-2
new file mode 100644
index 0000000..31af0ec
--- /dev/null
+++ b/tests/graph-equivalence/multiline_and_refs/c-ref-2
@@ -0,0 +1,5 @@
+prerequisites (- => not satisfied):
+  - a.1 succeeded
+  - b.1 succeeded
+
+outputs (- => not completed):
diff --git a/tests/cyclers/34-implicit-back-compat.t b/tests/graphing/05-suicide-family.t
similarity index 79%
copy from tests/cyclers/34-implicit-back-compat.t
copy to tests/graphing/05-suicide-family.t
index 790ec2c..81782c9 100644
--- a/tests/cyclers/34-implicit-back-compat.t
+++ b/tests/graphing/05-suicide-family.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Check that implicit cycling is ok in cylc-5 back compat mode.
+# Test that a suicide-triggered family plots as a collapsed family node (#1526).
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 3
@@ -23,14 +23,12 @@ set_test_number 3
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
+run_ok $TEST_NAME cylc validate "$SUITE_NAME"
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-cmp
-cmp_ok $TEST_NAME_BASE-validate.stderr <<__ERR__
-WARNING, foo: not explicitly defined in dependency graphs (deprecated)
-__ERR__
+graph_suite $SUITE_NAME graph.plain
+cmp_ok graph.plain $TEST_SOURCE_DIR/$TEST_NAME_BASE/graph.plain.ref
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+graph_suite $SUITE_NAME graph.plain.suicide --show-suicide
+cmp_ok graph.plain.suicide $TEST_SOURCE_DIR/$TEST_NAME_BASE/graph.plain.suicide.ref
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/graphing/05-suicide-family/graph.plain.ref b/tests/graphing/05-suicide-family/graph.plain.ref
new file mode 100644
index 0000000..eb6582f
--- /dev/null
+++ b/tests/graphing/05-suicide-family/graph.plain.ref
@@ -0,0 +1,2 @@
+graph
+stop
diff --git a/tests/graphing/05-suicide-family/graph.plain.suicide.ref b/tests/graphing/05-suicide-family/graph.plain.suicide.ref
new file mode 100644
index 0000000..33f2272
--- /dev/null
+++ b/tests/graphing/05-suicide-family/graph.plain.suicide.ref
@@ -0,0 +1,5 @@
+edge "foo.1" "BAR.1" dashed
+graph
+node "BAR.1" "BAR\n1" unfilled doubleoctagon black
+node "foo.1" "foo\n1" unfilled box black
+stop
diff --git a/tests/graphing/05-suicide-family/suite.rc b/tests/graphing/05-suicide-family/suite.rc
new file mode 100644
index 0000000..432bb28
--- /dev/null
+++ b/tests/graphing/05-suicide-family/suite.rc
@@ -0,0 +1,8 @@
+[scheduling]
+    [[dependencies]]
+        graph = foo => !BAR
+[runtime]
+    [[foo]]
+    [[BAR]]
+    [[bar1, bar2, bar3]]
+        inherit = BAR
diff --git a/tests/graphing/06-family-or.t b/tests/graphing/06-family-or.t
new file mode 100755
index 0000000..4b69449
--- /dev/null
+++ b/tests/graphing/06-family-or.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test: family-OR logic pre-initial simplification bug (#1626).
+. "$(dirname "$0")/test_header"
+set_test_number 2
+
+cat >'suite.rc' <<'__SUITE_RC__'
+[cylc]
+    UTC mode = True
+[scheduling]
+    initial cycle point = 2000
+    [[dependencies]]
+        [[[T00]]]
+            graph = """
+                A
+                B
+                A[-PT24H]:fail-any | B[-PT24H]:fail-any => c"""
+[runtime]
+    [[A]]
+    [[B]]
+    [[a1]]
+        inherit = A
+    [[b1a, b2a, b3a]]
+        inherit = B
+    [[c]]
+__SUITE_RC__
+
+run_ok "${TEST_NAME_BASE}-validate" cylc validate "${PWD}/suite.rc"
+
+graph_suite "${PWD}/suite.rc" 'graph.plain'
+cmp_ok 'graph.plain' - <<'__GRAPH__'
+edge "A.20000101T0000Z" "c.20000102T0000Z" solid
+edge "A.20000102T0000Z" "c.20000103T0000Z" solid
+edge "B.20000101T0000Z" "c.20000102T0000Z" solid
+edge "B.20000102T0000Z" "c.20000103T0000Z" solid
+graph
+node "A.20000101T0000Z" "A\n20000101T0000Z" unfilled doubleoctagon black
+node "A.20000102T0000Z" "A\n20000102T0000Z" unfilled doubleoctagon black
+node "A.20000103T0000Z" "A\n20000103T0000Z" unfilled doubleoctagon black
+node "B.20000101T0000Z" "B\n20000101T0000Z" unfilled doubleoctagon black
+node "B.20000102T0000Z" "B\n20000102T0000Z" unfilled doubleoctagon black
+node "B.20000103T0000Z" "B\n20000103T0000Z" unfilled doubleoctagon black
+node "c.20000101T0000Z" "c\n20000101T0000Z" unfilled box black
+node "c.20000102T0000Z" "c\n20000102T0000Z" unfilled box black
+node "c.20000103T0000Z" "c\n20000103T0000Z" unfilled box black
+stop
+__GRAPH__
+
+exit
diff --git a/tests/cyclers/34-implicit-back-compat.t b/tests/graphing/07-stop-at-final-point.t
similarity index 73%
copy from tests/cyclers/34-implicit-back-compat.t
copy to tests/graphing/07-stop-at-final-point.t
index 790ec2c..89fa1ec 100644
--- a/tests/cyclers/34-implicit-back-compat.t
+++ b/tests/graphing/07-stop-at-final-point.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Check that implicit cycling is ok in cylc-5 back compat mode.
+# Test that graphing stops at suite final cycle point.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 3
@@ -23,14 +23,14 @@ set_test_number 3
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
+run_ok $TEST_NAME cylc validate "$SUITE_NAME"
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-cmp
-cmp_ok $TEST_NAME_BASE-validate.stderr <<__ERR__
-WARNING, foo: not explicitly defined in dependency graphs (deprecated)
-__ERR__
+TEST_NAME=$TEST_NAME_BASE-graph-npoints
+graph_suite $SUITE_NAME graph.plain.test1 --set="STOP_CRITERION=number of cycle points = 6"
+cmp_ok graph.plain.test1 $TEST_SOURCE_DIR/$TEST_NAME_BASE/graph.plain.ref
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+TEST_NAME=$TEST_NAME_BASE-graph-final-point
+graph_suite $SUITE_NAME graph.plain.test2 --set="STOP_CRITERION=final cycle point = 2015-01-05"
+cmp_ok graph.plain.test2 $TEST_SOURCE_DIR/$TEST_NAME_BASE/graph.plain.ref
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/graphing/07-stop-at-final-point/graph.plain.ref b/tests/graphing/07-stop-at-final-point/graph.plain.ref
new file mode 100644
index 0000000..4582483
--- /dev/null
+++ b/tests/graphing/07-stop-at-final-point/graph.plain.ref
@@ -0,0 +1,23 @@
+edge "baz.2015-01-03" "stop.2015-01-03" solid
+edge "foo.2015-01-01" "bar.2015-01-01" solid
+edge "foo.2015-01-01" "baz.2015-01-01" solid
+edge "foo.2015-01-01" "foo.2015-01-02" solid
+edge "foo.2015-01-02" "bar.2015-01-02" solid
+edge "foo.2015-01-02" "baz.2015-01-02" solid
+edge "foo.2015-01-02" "foo.2015-01-03" solid
+edge "foo.2015-01-03" "bar.2015-01-03" solid
+edge "foo.2015-01-03" "baz.2015-01-03" solid
+edge "start.2015-01-01" "foo.2015-01-01" solid
+graph
+node "bar.2015-01-01" "bar\n2015-01-01" unfilled box black
+node "bar.2015-01-02" "bar\n2015-01-02" unfilled box black
+node "bar.2015-01-03" "bar\n2015-01-03" unfilled box black
+node "baz.2015-01-01" "baz\n2015-01-01" unfilled box black
+node "baz.2015-01-02" "baz\n2015-01-02" unfilled box black
+node "baz.2015-01-03" "baz\n2015-01-03" unfilled box black
+node "foo.2015-01-01" "foo\n2015-01-01" unfilled box black
+node "foo.2015-01-02" "foo\n2015-01-02" unfilled box black
+node "foo.2015-01-03" "foo\n2015-01-03" unfilled box black
+node "start.2015-01-01" "start\n2015-01-01" filled box black
+node "stop.2015-01-03" "stop\n2015-01-03" filled box black
+stop
diff --git a/tests/graphing/07-stop-at-final-point/suite.rc b/tests/graphing/07-stop-at-final-point/suite.rc
new file mode 100644
index 0000000..188d15a
--- /dev/null
+++ b/tests/graphing/07-stop-at-final-point/suite.rc
@@ -0,0 +1,18 @@
+#!Jinja2
+[cylc]
+    cycle point format = %Y-%m-%d
+[scheduling]
+    initial cycle point = 2015-01-01
+    final cycle point   = 2015-01-03
+    [[dependencies]]
+      [[[R1]]]
+         graph = start => foo
+      [[[P1D]]]
+         graph = foo[-P1D] => foo => bar & baz
+      [[[R1/P0D]]]
+        graph = baz => stop
+[visualization]
+    {{STOP_CRITERION | default('')}}
+    [[node attributes]]
+        start = "style=filled", "fillcolor=slategray"
+        stop = "style=filled", "fillcolor=red"
diff --git a/tests/hold-release/12-hold-then-retry/suite.rc b/tests/hold-release/12-hold-then-retry/suite.rc
index 8cdb53f..074b1aa 100644
--- a/tests/hold-release/12-hold-then-retry/suite.rc
+++ b/tests/hold-release/12-hold-then-retry/suite.rc
@@ -21,7 +21,7 @@ T_ST_FILE="$(dirname "$0")/../../t-submit-retry-able/NN/job.status"
 atrm "$(awk -F= '$1 == "CYLC_BATCH_SYS_JOB_ID" {print $2}' "${T_ST_FILE}")"
 # Hold the suite
 cylc hold "${CYLC_SUITE_NAME}"
-timeout 15 my-log-grepper 'Command succeeded: hold suite now()'
+timeout 15 my-log-grepper 'Command succeeded: hold_suite'
 # Poll t-submit-retry-able, should return submit-fail
 cylc poll "${CYLC_SUITE_NAME}" 't-submit-retry-able' '1'
 # Allow t-retry-able to continue
diff --git a/tests/cylc-scan/00-simple.t b/tests/hold-release/17-hold-after-point.t
similarity index 86%
copy from tests/cylc-scan/00-simple.t
copy to tests/hold-release/17-hold-after-point.t
index 987f6b3..baa54a1 100644
--- a/tests/cylc-scan/00-simple.t
+++ b/tests/hold-release/17-hold-after-point.t
@@ -15,17 +15,18 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test cylc scan is picking up running suite
+# Test defining a hold after point in a suite.rc file
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
 #-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE simple
+install_suite $TEST_NAME_BASE hold-after-point
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
+TEST_NAME=$TEST_NAME_BASE-val
 run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+suite_run_ok $TEST_NAME cylc run --reference-test \
+    --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/hold-release/hold-after-point/reference.log b/tests/hold-release/hold-after-point/reference.log
new file mode 100644
index 0000000..78dff1a
--- /dev/null
+++ b/tests/hold-release/hold-after-point/reference.log
@@ -0,0 +1,36 @@
+2015-06-03T11:33:34Z INFO - port:7766
+2015-06-03T11:33:34Z INFO - Suite starting at 2015-06-03T11:33:34Z
+2015-06-03T11:33:34Z INFO - Run mode: live
+2015-06-03T11:33:34Z INFO - Initial point: 20100101T0000Z
+2015-06-03T11:33:34Z INFO - Final point: 20100110T0000Z
+2015-06-03T11:33:34Z INFO - Cold Start 20100101T0000Z
+2015-06-03T11:33:34Z INFO - Setting suite hold cycle point: 20100102T00Z
+2015-06-03T11:33:34Z INFO - [stopper.20100101T0000Z] -job(01) initiate job-submit
+2015-06-03T11:33:34Z INFO - [foo.20100101T0000Z] -job(01) initiate job-submit
+2015-06-03T11:33:34Z INFO - [stopper.20100101T0000Z] -triggered off []
+2015-06-03T11:33:34Z INFO - [foo.20100101T0000Z] -triggered off []
+2015-06-03T11:33:35Z INFO - 13512
+
+2015-06-03T11:33:35Z INFO - [stopper.20100101T0000Z] -submit_method_id=13512
+2015-06-03T11:33:35Z INFO - [stopper.20100101T0000Z] -submission succeeded
+2015-06-03T11:33:35Z INFO - 13522
+
+2015-06-03T11:33:35Z INFO - [foo.20100101T0000Z] -submit_method_id=13522
+2015-06-03T11:33:35Z INFO - [foo.20100101T0000Z] -submission succeeded
+2015-06-03T11:33:36Z INFO - [stopper.20100101T0000Z] -(current:submitted)> stopper.20100101T0000Z started at 2015-06-03T11:33:35Z
+2015-06-03T11:33:36Z INFO - [foo.20100101T0000Z] -(current:submitted)> foo.20100101T0000Z started at 2015-06-03T11:33:35Z
+2015-06-03T11:33:41Z INFO - [foo.20100101T0000Z] -(current:running)> foo.20100101T0000Z succeeded at 2015-06-03T11:33:41Z
+2015-06-03T11:33:42Z INFO - [foo.20100102T0000Z] -job(01) initiate job-submit
+2015-06-03T11:33:42Z INFO - [foo.20100102T0000Z] -triggered off ['foo.20100101T0000Z']
+2015-06-03T11:33:43Z INFO - 13852
+
+2015-06-03T11:33:43Z INFO - [foo.20100102T0000Z] -submit_method_id=13852
+2015-06-03T11:33:43Z INFO - [foo.20100102T0000Z] -submission succeeded
+2015-06-03T11:33:43Z INFO - [foo.20100103T0000Z] -holding (beyond suite hold point) 20100102T00Z
+2015-06-03T11:33:43Z INFO - [foo.20100103T0000Z] -waiting => held
+2015-06-03T11:33:44Z INFO - [foo.20100102T0000Z] -(current:submitted)> foo.20100102T0000Z started at 2015-06-03T11:33:43Z
+2015-06-03T11:33:50Z INFO - [foo.20100102T0000Z] -(current:running)> foo.20100102T0000Z succeeded at 2015-06-03T11:33:49Z
+2015-06-03T11:34:06Z INFO - Client command set_stop_cleanly
+2015-06-03T11:34:06Z INFO - Command succeeded: set_stop_cleanly(False)
+2015-06-03T11:34:07Z INFO - [stopper.20100101T0000Z] -(current:running)> stopper.20100101T0000Z succeeded at 2015-06-03T11:34:06Z
+2015-06-03T11:34:08Z INFO - Suite shutting down at 2015-06-03T11:34:08Z
diff --git a/tests/hold-release/hold-after-point/suite.rc b/tests/hold-release/hold-after-point/suite.rc
new file mode 100644
index 0000000..45b230f
--- /dev/null
+++ b/tests/hold-release/hold-after-point/suite.rc
@@ -0,0 +1,29 @@
+
+title = "cylc hold after point suite.rc test"
+
+description = """Define a hold after point in the suite.rc"""
+
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        live mode suite timeout = PT1M
+
+[scheduling]
+    initial cycle time  = 20100101T00Z
+    final cycle time    = 20100110T00Z
+    hold after point    = 20100102T00Z
+    [[dependencies]]
+        [[[R1]]]
+            graph = """
+                stopper
+            """
+        [[[T00]]]
+            graph = foo[-P1D] => foo
+[runtime]
+    [[stopper]]
+        script = sleep 30; cylc stop $CYLC_SUITE_NAME
+    [[foo]]
+        script = true
+[runtime]
+    [[foo]]
+        command scripting = sleep 5
diff --git a/tests/inheritance/00-namespace-list.t b/tests/inheritance/00-namespace-list.t
index 632e5a5..1eb4805 100755
--- a/tests/inheritance/00-namespace-list.t
+++ b/tests/inheritance/00-namespace-list.t
@@ -30,19 +30,19 @@ TEST_NAME=$TEST_NAME_BASE-get-config
 cylc get-config --sparse -i runtime $SUITE_NAME > runtime.out
 cmp_ok runtime.out <<'__DONE__'
 [[root]]
+[[FAMILY]]
 [[m1]]
    inherit = FAMILY
    [[[environment]]]
       FOO = foo
-[[m3]]
-   inherit = FAMILY
-   [[[environment]]]
-      FOO = foo
-[[FAMILY]]
 [[m2]]
    inherit = FAMILY
    [[[environment]]]
       FOO = bar
+[[m3]]
+   inherit = FAMILY
+   [[[environment]]]
+      FOO = foo
 __DONE__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/inheritance/01-circular.t b/tests/inheritance/01-circular.t
index 1ac3001..4b87e4a 100644
--- a/tests/inheritance/01-circular.t
+++ b/tests/inheritance/01-circular.t
@@ -27,7 +27,7 @@ run_fail $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-cmp
 cmp_ok $TEST_NAME_BASE-validate.stderr <<__ERR__
-ERROR: circular [runtime] inheritance?
+'ERROR: circular [runtime] inheritance?'
 __ERR__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/integer-cycling/00-satellite/suite.rc b/tests/integer-cycling/00-satellite/suite.rc
index 07fc2dc..ea95987 100644
--- a/tests/integer-cycling/00-satellite/suite.rc
+++ b/tests/integer-cycling/00-satellite/suite.rc
@@ -19,7 +19,7 @@ with previous cycles if the data comes in quickly."""
 [cylc]
     [[reference test]]
         required run mode = live
-        live mode suite timeout = PT40S
+        live mode suite timeout = PT1M
 
 [scheduling]
     cycling mode = integer
diff --git a/tests/cylc-scan/00-simple.t b/tests/jinja2/07-filters.t
similarity index 87%
copy from tests/cylc-scan/00-simple.t
copy to tests/jinja2/07-filters.t
index 987f6b3..1df9c5e 100644
--- a/tests/cylc-scan/00-simple.t
+++ b/tests/jinja2/07-filters.t
@@ -15,17 +15,17 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test cylc scan is picking up running suite
+# basic jinja2 expansion test
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
 #-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE simple
+install_suite $TEST_NAME_BASE filters
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+TEST_NAME=$TEST_NAME_BASE-check-expansion
+cmp_ok $TEST_DIR/$SUITE_NAME/suite.rc.processed $TEST_DIR/$SUITE_NAME/suite.rc-expanded
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/jinja2/filters/Jinja2Filters/hello.py b/tests/jinja2/filters/Jinja2Filters/hello.py
new file mode 100644
index 0000000..1804be3
--- /dev/null
+++ b/tests/jinja2/filters/Jinja2Filters/hello.py
@@ -0,0 +1,2 @@
+def hello(foo):
+    return 'Hello from a filter!'
diff --git a/tests/jinja2/filters/Jinja2Filters/truly.py b/tests/jinja2/filters/Jinja2Filters/truly.py
new file mode 100644
index 0000000..782a4a0
--- /dev/null
+++ b/tests/jinja2/filters/Jinja2Filters/truly.py
@@ -0,0 +1,2 @@
+def truly(bar):
+    return 'true'
diff --git a/tests/jinja2/filters/suite.rc b/tests/jinja2/filters/suite.rc
new file mode 100644
index 0000000..6c3452e
--- /dev/null
+++ b/tests/jinja2/filters/suite.rc
@@ -0,0 +1,10 @@
+#!jinja2
+# {{ ""|hello }}
+[scheduling]
+    [[dependencies]]
+        graph = foo => bar
+[runtime]
+    [[foo]]
+        pre-command scripting = {{ true|truly }}
+        command scripting = {{ false|truly }}
+    [[bar]]
diff --git a/tests/jinja2/filters/suite.rc-expanded b/tests/jinja2/filters/suite.rc-expanded
new file mode 100644
index 0000000..ec0cd53
--- /dev/null
+++ b/tests/jinja2/filters/suite.rc-expanded
@@ -0,0 +1,9 @@
+# Hello from a filter!
+[scheduling]
+    [[dependencies]]
+        graph = foo => bar
+[runtime]
+    [[foo]]
+        pre-command scripting = true
+        command scripting = true
+    [[bar]]
diff --git a/tests/jinja2/include/suite.rc-expanded b/tests/jinja2/include/suite.rc-expanded
index 38094b5..ff44629 100644
--- a/tests/jinja2/include/suite.rc-expanded
+++ b/tests/jinja2/include/suite.rc-expanded
@@ -21,4 +21,4 @@
         script = echo I am $TITLE 3
     [[member_4]]
         inherit = FAM
-        script = echo I am $TITLE 4
\ No newline at end of file
+        script = echo I am $TITLE 4
diff --git a/tests/jinja2/simple/suite.rc-expanded b/tests/jinja2/simple/suite.rc-expanded
index 38094b5..ff44629 100644
--- a/tests/jinja2/simple/suite.rc-expanded
+++ b/tests/jinja2/simple/suite.rc-expanded
@@ -21,4 +21,4 @@
         script = echo I am $TITLE 3
     [[member_4]]
         inherit = FAM
-        script = echo I am $TITLE 4
\ No newline at end of file
+        script = echo I am $TITLE 4
diff --git a/tests/job-kill/01-remote.t b/tests/job-kill/01-remote.t
index 1cc953d..06e1097 100755
--- a/tests/job-kill/01-remote.t
+++ b/tests/job-kill/01-remote.t
@@ -18,9 +18,10 @@
 # Test killing of jobs on a remote host.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-export CYLC_TEST_HOST=$(cylc get-global-config -i '[test battery]remote host')
+export CYLC_TEST_HOST=$( \
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
 if [[ -z $CYLC_TEST_HOST ]]; then
-    skip_all '[test battery]remote host: not defined'
+    skip_all '"[test battery]remote host": not defined'
 fi
 N_TESTS=3
 set_test_number $N_TESTS
diff --git a/tests/job-kill/01-remote/suite.rc b/tests/job-kill/01-remote/suite.rc
index 2bc1f5f..b1aba78 100644
--- a/tests/job-kill/01-remote/suite.rc
+++ b/tests/job-kill/01-remote/suite.rc
@@ -19,8 +19,6 @@ t2:start=>stop
 {% endif %}
     [[t1]]
         inherit=T
-        [[[job submission]]]
-            method=at
     [[t2]]
         inherit=T
     [[stop]]
diff --git a/tests/job-kill/02-loadleveler.t b/tests/job-kill/02-loadleveler.t
index 0f02a1e..f40f3fe 100755
--- a/tests/job-kill/02-loadleveler.t
+++ b/tests/job-kill/02-loadleveler.t
@@ -19,10 +19,11 @@
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 BATCH_SYS_NAME="${TEST_NAME_BASE##??-}"
-export CYLC_TEST_BATCH_TASK_HOST=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME]host")
-export CYLC_TEST_BATCH_SITE_DIRECTIVES=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME][directives]")
+RC_PREF="[test battery][batch systems][$BATCH_SYS_NAME]"
+export CYLC_TEST_BATCH_TASK_HOST=$( \
+    cylc get-global-config -i "${RC_PREF}host" 2>'/dev/null')
+export CYLC_TEST_BATCH_SITE_DIRECTIVES=$( \
+    cylc get-global-config -i "${RC_PREF}[directives]" 2>'/dev/null')
 if [[ -z "${CYLC_TEST_BATCH_TASK_HOST}" || "${CYLC_TEST_BATCH_TASK_HOST}" == None ]]
 then
     skip_all "\"[test battery][batch systems][$BATCH_SYS_NAME]host\" not defined"
@@ -30,7 +31,7 @@ fi
 # check the host is reachable
 if ! ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" true 1>/dev/null 2>&1
 then
-    skip_all "Host "$CYLC_TEST_BATCH_TASK_HOST" unreachable"
+    skip_all "Host \"$CYLC_TEST_BATCH_TASK_HOST\" unreachable"
 fi
 set_test_number 2
 
diff --git a/tests/job-kill/02-loadleveler/suite.rc b/tests/job-kill/02-loadleveler/suite.rc
index a3da41a..f4e9ae3 100644
--- a/tests/job-kill/02-loadleveler/suite.rc
+++ b/tests/job-kill/02-loadleveler/suite.rc
@@ -27,6 +27,6 @@
 {% endif %}
     [[stop]]
         script="""
-cylc kill $CYLC_SUITE_REG_NAME t1 1 || true
+cylc kill $CYLC_SUITE_REG_NAME t1 1
 cylc stop $CYLC_SUITE_REG_NAME
 """
diff --git a/tests/job-kill/03-slurm/suite.rc b/tests/job-kill/03-slurm/suite.rc
index 4a60d93..29e5d24 100644
--- a/tests/job-kill/03-slurm/suite.rc
+++ b/tests/job-kill/03-slurm/suite.rc
@@ -29,6 +29,6 @@
 {% endif %}
     [[stop]]
         script="""
-cylc kill $CYLC_SUITE_REG_NAME t1 1 || true
+cylc kill $CYLC_SUITE_REG_NAME t1 1
 cylc stop $CYLC_SUITE_REG_NAME
 """
diff --git a/tests/job-kill/04-pbs/suite.rc b/tests/job-kill/04-pbs/suite.rc
index 7134df8..9cf178d 100644
--- a/tests/job-kill/04-pbs/suite.rc
+++ b/tests/job-kill/04-pbs/suite.rc
@@ -24,6 +24,6 @@
 {% endif %}
     [[stop]]
         script="""
-cylc kill $CYLC_SUITE_REG_NAME t1 1 || true
+cylc kill $CYLC_SUITE_REG_NAME t1 1
 cylc stop $CYLC_SUITE_REG_NAME
 """
diff --git a/tests/job-poll/00-late/suite.rc b/tests/job-poll/00-late/suite.rc
index a030723..8c6a07f 100644
--- a/tests/job-poll/00-late/suite.rc
+++ b/tests/job-poll/00-late/suite.rc
@@ -14,7 +14,7 @@ description = "if not, the test will fail due to an unexpected task failure."
         script="""
 sleep 2
 # report task succeeded (but don't exit):
-cylc succeeded
+cylc task message succeeded
 sleep 2
 # fake a "polled-as-failed" result:
 # (real poll messages have "at <time>" or "at unknown-time" appended,
diff --git a/tests/job-submission/02-job-nn-remote-host.t b/tests/job-submission/02-job-nn-remote-host.t
index 0f87fcb..c5a40d3 100755
--- a/tests/job-submission/02-job-nn-remote-host.t
+++ b/tests/job-submission/02-job-nn-remote-host.t
@@ -19,9 +19,9 @@
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 export CYLC_TEST_HOST=$( \
-    cylc get-global-config -i '[test battery]remote host')
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
 if [[ -z "$CYLC_TEST_HOST" ]]; then
-    skip_all '[test battery]remote host: not defined'
+    skip_all '"[test battery]remote host": not defined'
 fi
 set_test_number 2
 #-------------------------------------------------------------------------------
diff --git a/tests/job-submission/03-job-nn-remote-host-with-shared-fs.t b/tests/job-submission/03-job-nn-remote-host-with-shared-fs.t
index 9666808..60a90c1 100755
--- a/tests/job-submission/03-job-nn-remote-host-with-shared-fs.t
+++ b/tests/job-submission/03-job-nn-remote-host-with-shared-fs.t
@@ -19,9 +19,10 @@
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 export CYLC_TEST_HOST=$( \
-    cylc get-global-config -i '[test battery]remote host with shared fs')
+    cylc get-global-config -i '[test battery]remote host with shared fs' \
+    2>'/dev/null')
 if [[ -z "$CYLC_TEST_HOST" ]]; then
-    skip_all '[test battery]remote host with shared fs: not defined'
+    skip_all '"[test battery]remote host with shared fs": not defined'
 fi
 set_test_number 2
 #-------------------------------------------------------------------------------
diff --git a/tests/job-submission/05-activity-log.t b/tests/job-submission/05-activity-log.t
index 6544823..1f6ab48 100755
--- a/tests/job-submission/05-activity-log.t
+++ b/tests/job-submission/05-activity-log.t
@@ -29,11 +29,14 @@ suite_run_ok "${TEST_NAME_BASE}-run" \
 SUITE_RUN_DIR="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}"
 T1_ACTIVITY_LOG="${SUITE_RUN_DIR}/log/job/1/t1/NN/job-activity.log"
 
-grep_ok 'SUBMIT-OUT:' "${T1_ACTIVITY_LOG}"
-grep_ok 'KILL-ERR:' "${T1_ACTIVITY_LOG}"
-grep_ok 'OSError: \[Errno 3\] No such process' "${T1_ACTIVITY_LOG}"
-grep_ok 'POLL-OUT: polled t1\.1 failed at unknown-time' "${T1_ACTIVITY_LOG}"
-grep_ok "EVENT-OUT: failed ${SUITE_NAME} t1\\.1 job failed" "${T1_ACTIVITY_LOG}"
+grep_ok '\[job-submit ret_code\] 0' "${T1_ACTIVITY_LOG}"
+grep_ok '\[job-kill ret_code\] 1' "${T1_ACTIVITY_LOG}"
+grep_ok '\[job-kill out\] [^|]*\|1/t1/01\|1' "${T1_ACTIVITY_LOG}"
+grep_ok '\[job-poll out\] [^|]*\|1/t1/01\|background\|[^|]*\|1\|\|\|\|[^|]*\|' \
+    "${T1_ACTIVITY_LOG}"
+grep_ok \
+    "\\[(('event-handler-00', 'failed'), 1) out\\] failed ${SUITE_NAME} t1\\.1 job failed" \
+    "${T1_ACTIVITY_LOG}"
 #-------------------------------------------------------------------------------
 purge_suite "${SUITE_NAME}"
 exit
diff --git a/tests/job-submission/06-garbage/suite.rc b/tests/job-submission/06-garbage/suite.rc
index e980d81..b173764 100644
--- a/tests/job-submission/06-garbage/suite.rc
+++ b/tests/job-submission/06-garbage/suite.rc
@@ -16,7 +16,7 @@
     [[t2]]
         script = """
 grep -q -F \
-    'OSError: [Errno 2] No such file or directory: '"'"'bad-bad-bad-submit'"'" \
+    '1/t1/01|[STDERR] [Errno 2] No such file or directory: '"'"'bad-bad-bad-submit'"'" \
     "${CYLC_SUITE_LOG_DIR}/log"
 cylc shutdown "${CYLC_SUITE_NAME}"
 """
diff --git a/tests/job-submission/07-multi.t b/tests/job-submission/07-multi.t
new file mode 100755
index 0000000..f374163
--- /dev/null
+++ b/tests/job-submission/07-multi.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test job submission, multiple jobs per host.
+. "$(dirname "$0")/test_header"
+CYLC_TEST_HOST="$( \
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')"
+if [[ -z "${CYLC_TEST_HOST}" ]]; then
+    skip_all '"[test battery]remote host": not defined'
+fi
+set_test_number 3
+
+install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+set -eu
+SSH='ssh -oBatchMode=yes -oConnectTimeout=5'
+${SSH} "${CYLC_TEST_HOST}" \
+    "mkdir -p '.cylc/${SUITE_NAME}/' && cat >'.cylc/${SUITE_NAME}/passphrase'" \
+    <"${TEST_DIR}/${SUITE_NAME}/passphrase"
+set +eu
+
+run_ok "${TEST_NAME_BASE}-validate" \
+    cylc validate "${SUITE_NAME}" -s "CYLC_TEST_HOST=${CYLC_TEST_HOST}"
+suite_run_ok "${TEST_NAME_BASE}-run" \
+    cylc run --debug --reference-test -s "CYLC_TEST_HOST=${CYLC_TEST_HOST}" \
+    "${SUITE_NAME}"
+
+RUN_DIR="$(cylc get-global-config --print-run-dir)/${SUITE_NAME}"
+LOG="${RUN_DIR}/log/suite/log"
+sed -n 's/^.*\(cylc jobs-submit\)/\1/p' "${LOG}" | sort >'edited-suite-log'
+
+sort >'edited-suite-log-ref' <<__LOG__
+cylc jobs-submit --debug -- ${RUN_DIR}/log/job 20200101T0000Z/t0/01 20200101T0000Z/t1/01 20200101T0000Z/t2/01 20200101T0000Z/t3/01
+cylc jobs-submit --debug -- ${RUN_DIR}/log/job 20210101T0000Z/t0/01 20210101T0000Z/t1/01 20210101T0000Z/t2/01 20210101T0000Z/t3/01
+cylc jobs-submit --debug -- ${RUN_DIR}/log/job 20220101T0000Z/t0/01 20220101T0000Z/t1/01 20220101T0000Z/t2/01 20220101T0000Z/t3/01
+cylc jobs-submit --debug -- ${RUN_DIR}/log/job 20230101T0000Z/t0/01 20230101T0000Z/t1/01 20230101T0000Z/t2/01 20230101T0000Z/t3/01
+cylc jobs-submit --debug -- ${RUN_DIR}/log/job 20240101T0000Z/t0/01 20240101T0000Z/t1/01 20240101T0000Z/t2/01 20240101T0000Z/t3/01
+cylc jobs-submit --debug -- ${RUN_DIR}/log/job 20250101T0000Z/t0/01 20250101T0000Z/t1/01 20250101T0000Z/t2/01 20250101T0000Z/t3/01
+cylc jobs-submit --debug --host=${CYLC_TEST_HOST} --remote-mode -- '\$HOME/cylc-run/${SUITE_NAME}/log/job' 20200101T0000Z/t4/01 20200101T0000Z/t5/01 20200101T0000Z/t6/01
+cylc jobs-submit --debug --host=${CYLC_TEST_HOST} --remote-mode -- '\$HOME/cylc-run/${SUITE_NAME}/log/job' 20210101T0000Z/t4/01 20210101T0000Z/t5/01 20210101T0000Z/t6/01
+cylc jobs-submit --debug --host=${CYLC_TEST_HOST} --remote-mode -- '\$HOME/cylc-run/${SUITE_NAME}/log/job' 20220101T0000Z/t4/01 20220101T0000Z/t5/01 20220101T0000Z/t6/01
+cylc jobs-submit --debug --host=${CYLC_TEST_HOST} --remote-mode -- '\$HOME/cylc-run/${SUITE_NAME}/log/job' 20230101T0000Z/t4/01 20230101T0000Z/t5/01 20230101T0000Z/t6/01
+cylc jobs-submit --debug --host=${CYLC_TEST_HOST} --remote-mode -- '\$HOME/cylc-run/${SUITE_NAME}/log/job' 20240101T0000Z/t4/01 20240101T0000Z/t5/01 20240101T0000Z/t6/01
+cylc jobs-submit --debug --host=${CYLC_TEST_HOST} --remote-mode -- '\$HOME/cylc-run/${SUITE_NAME}/log/job' 20250101T0000Z/t4/01 20250101T0000Z/t5/01 20250101T0000Z/t6/01
+__LOG__
+cmp_ok 'edited-suite-log' 'edited-suite-log-ref'
+
+${SSH} "${CYLC_TEST_HOST}" \
+    "rm -rf '.cylc/${SUITE_NAME}' 'cylc-run/${SUITE_NAME}'"
+purge_suite "${SUITE_NAME}"
+exit
diff --git a/tests/job-submission/07-multi/reference.log b/tests/job-submission/07-multi/reference.log
new file mode 100644
index 0000000..454f1c8
--- /dev/null
+++ b/tests/job-submission/07-multi/reference.log
@@ -0,0 +1,46 @@
+2015-09-02T13:21:42Z INFO - Run mode: live
+2015-09-02T13:21:42Z INFO - Initial point: 20200101T0000Z
+2015-09-02T13:21:42Z INFO - Final point: 20250101T0000Z
+2015-09-02T13:21:42Z INFO - Cold Start 20200101T0000Z
+2015-09-02T13:21:42Z INFO - [t3.20200101T0000Z] -triggered off []
+2015-09-02T13:21:42Z INFO - [t2.20200101T0000Z] -triggered off []
+2015-09-02T13:21:42Z INFO - [t1.20200101T0000Z] -triggered off []
+2015-09-02T13:21:42Z INFO - [t6.20200101T0000Z] -triggered off []
+2015-09-02T13:21:43Z INFO - [t4.20200101T0000Z] -triggered off []
+2015-09-02T13:21:43Z INFO - [t5.20200101T0000Z] -triggered off []
+2015-09-02T13:21:43Z INFO - [t0.20200101T0000Z] -triggered off []
+2015-09-02T13:21:47Z INFO - [t5.20210101T0000Z] -triggered off ['t0.20200101T0000Z', 't1.20200101T0000Z', 't2.20200101T0000Z', 't3.20200101T0000Z', 't4.20200101T0000Z', 't5.20200101T0000Z', 't6.20200101T0000Z']
+2015-09-02T13:21:47Z INFO - [t1.20210101T0000Z] -triggered off ['t0.20200101T0000Z', 't1.20200101T0000Z', 't2.20200101T0000Z', 't3.20200101T0000Z', 't4.20200101T0000Z', 't5.20200101T0000Z', 't6.20200101T0000Z']
+2015-09-02T13:21:47Z INFO - [t0.20210101T0000Z] -triggered off ['t0.20200101T0000Z', 't1.20200101T0000Z', 't2.20200101T0000Z', 't3.20200101T0000Z', 't4.20200101T0000Z', 't5.20200101T0000Z', 't6.20200101T0000Z']
+2015-09-02T13:21:47Z INFO - [t2.20210101T0000Z] -triggered off ['t0.20200101T0000Z', 't1.20200101T0000Z', 't2.20200101T0000Z', 't3.20200101T0000Z', 't4.20200101T0000Z', 't5.20200101T0000Z', 't6.20200101T0000Z']
+2015-09-02T13:21:47Z INFO - [t4.20210101T0000Z] -triggered off ['t0.20200101T0000Z', 't1.20200101T0000Z', 't2.20200101T0000Z', 't3.20200101T0000Z', 't4.20200101T0000Z', 't5.20200101T0000Z', 't6.20200101T0000Z']
+2015-09-02T13:21:47Z INFO - [t3.20210101T0000Z] -triggered off ['t0.20200101T0000Z', 't1.20200101T0000Z', 't2.20200101T0000Z', 't3.20200101T0000Z', 't4.20200101T0000Z', 't5.20200101T0000Z', 't6.20200101T0000Z']
+2015-09-02T13:21:47Z INFO - [t6.20210101T0000Z] -triggered off ['t0.20200101T0000Z', 't1.20200101T0000Z', 't2.20200101T0000Z', 't3.20200101T0000Z', 't4.20200101T0000Z', 't5.20200101T0000Z', 't6.20200101T0000Z']
+2015-09-02T13:21:52Z INFO - [t5.20220101T0000Z] -triggered off ['t0.20210101T0000Z', 't1.20210101T0000Z', 't2.20210101T0000Z', 't3.20210101T0000Z', 't4.20210101T0000Z', 't5.20210101T0000Z', 't6.20210101T0000Z']
+2015-09-02T13:21:52Z INFO - [t2.20220101T0000Z] -triggered off ['t0.20210101T0000Z', 't1.20210101T0000Z', 't2.20210101T0000Z', 't3.20210101T0000Z', 't4.20210101T0000Z', 't5.20210101T0000Z', 't6.20210101T0000Z']
+2015-09-02T13:21:52Z INFO - [t3.20220101T0000Z] -triggered off ['t0.20210101T0000Z', 't1.20210101T0000Z', 't2.20210101T0000Z', 't3.20210101T0000Z', 't4.20210101T0000Z', 't5.20210101T0000Z', 't6.20210101T0000Z']
+2015-09-02T13:21:52Z INFO - [t0.20220101T0000Z] -triggered off ['t0.20210101T0000Z', 't1.20210101T0000Z', 't2.20210101T0000Z', 't3.20210101T0000Z', 't4.20210101T0000Z', 't5.20210101T0000Z', 't6.20210101T0000Z']
+2015-09-02T13:21:52Z INFO - [t4.20220101T0000Z] -triggered off ['t0.20210101T0000Z', 't1.20210101T0000Z', 't2.20210101T0000Z', 't3.20210101T0000Z', 't4.20210101T0000Z', 't5.20210101T0000Z', 't6.20210101T0000Z']
+2015-09-02T13:21:52Z INFO - [t1.20220101T0000Z] -triggered off ['t0.20210101T0000Z', 't1.20210101T0000Z', 't2.20210101T0000Z', 't3.20210101T0000Z', 't4.20210101T0000Z', 't5.20210101T0000Z', 't6.20210101T0000Z']
+2015-09-02T13:21:52Z INFO - [t6.20220101T0000Z] -triggered off ['t0.20210101T0000Z', 't1.20210101T0000Z', 't2.20210101T0000Z', 't3.20210101T0000Z', 't4.20210101T0000Z', 't5.20210101T0000Z', 't6.20210101T0000Z']
+2015-09-02T13:21:56Z INFO - [t2.20230101T0000Z] -triggered off ['t0.20220101T0000Z', 't1.20220101T0000Z', 't2.20220101T0000Z', 't3.20220101T0000Z', 't4.20220101T0000Z', 't5.20220101T0000Z', 't6.20220101T0000Z']
+2015-09-02T13:21:56Z INFO - [t4.20230101T0000Z] -triggered off ['t0.20220101T0000Z', 't1.20220101T0000Z', 't2.20220101T0000Z', 't3.20220101T0000Z', 't4.20220101T0000Z', 't5.20220101T0000Z', 't6.20220101T0000Z']
+2015-09-02T13:21:57Z INFO - [t5.20230101T0000Z] -triggered off ['t0.20220101T0000Z', 't1.20220101T0000Z', 't2.20220101T0000Z', 't3.20220101T0000Z', 't4.20220101T0000Z', 't5.20220101T0000Z', 't6.20220101T0000Z']
+2015-09-02T13:21:57Z INFO - [t1.20230101T0000Z] -triggered off ['t0.20220101T0000Z', 't1.20220101T0000Z', 't2.20220101T0000Z', 't3.20220101T0000Z', 't4.20220101T0000Z', 't5.20220101T0000Z', 't6.20220101T0000Z']
+2015-09-02T13:21:57Z INFO - [t6.20230101T0000Z] -triggered off ['t0.20220101T0000Z', 't1.20220101T0000Z', 't2.20220101T0000Z', 't3.20220101T0000Z', 't4.20220101T0000Z', 't5.20220101T0000Z', 't6.20220101T0000Z']
+2015-09-02T13:21:57Z INFO - [t3.20230101T0000Z] -triggered off ['t0.20220101T0000Z', 't1.20220101T0000Z', 't2.20220101T0000Z', 't3.20220101T0000Z', 't4.20220101T0000Z', 't5.20220101T0000Z', 't6.20220101T0000Z']
+2015-09-02T13:21:57Z INFO - [t0.20230101T0000Z] -triggered off ['t0.20220101T0000Z', 't1.20220101T0000Z', 't2.20220101T0000Z', 't3.20220101T0000Z', 't4.20220101T0000Z', 't5.20220101T0000Z', 't6.20220101T0000Z']
+2015-09-02T13:22:01Z INFO - [t5.20240101T0000Z] -triggered off ['t0.20230101T0000Z', 't1.20230101T0000Z', 't2.20230101T0000Z', 't3.20230101T0000Z', 't4.20230101T0000Z', 't5.20230101T0000Z', 't6.20230101T0000Z']
+2015-09-02T13:22:01Z INFO - [t0.20240101T0000Z] -triggered off ['t0.20230101T0000Z', 't1.20230101T0000Z', 't2.20230101T0000Z', 't3.20230101T0000Z', 't4.20230101T0000Z', 't5.20230101T0000Z', 't6.20230101T0000Z']
+2015-09-02T13:22:01Z INFO - [t2.20240101T0000Z] -triggered off ['t0.20230101T0000Z', 't1.20230101T0000Z', 't2.20230101T0000Z', 't3.20230101T0000Z', 't4.20230101T0000Z', 't5.20230101T0000Z', 't6.20230101T0000Z']
+2015-09-02T13:22:01Z INFO - [t3.20240101T0000Z] -triggered off ['t0.20230101T0000Z', 't1.20230101T0000Z', 't2.20230101T0000Z', 't3.20230101T0000Z', 't4.20230101T0000Z', 't5.20230101T0000Z', 't6.20230101T0000Z']
+2015-09-02T13:22:01Z INFO - [t6.20240101T0000Z] -triggered off ['t0.20230101T0000Z', 't1.20230101T0000Z', 't2.20230101T0000Z', 't3.20230101T0000Z', 't4.20230101T0000Z', 't5.20230101T0000Z', 't6.20230101T0000Z']
+2015-09-02T13:22:01Z INFO - [t1.20240101T0000Z] -triggered off ['t0.20230101T0000Z', 't1.20230101T0000Z', 't2.20230101T0000Z', 't3.20230101T0000Z', 't4.20230101T0000Z', 't5.20230101T0000Z', 't6.20230101T0000Z']
+2015-09-02T13:22:01Z INFO - [t4.20240101T0000Z] -triggered off ['t0.20230101T0000Z', 't1.20230101T0000Z', 't2.20230101T0000Z', 't3.20230101T0000Z', 't4.20230101T0000Z', 't5.20230101T0000Z', 't6.20230101T0000Z']
+2015-09-02T13:22:06Z INFO - [t4.20250101T0000Z] -triggered off ['t0.20240101T0000Z', 't1.20240101T0000Z', 't2.20240101T0000Z', 't3.20240101T0000Z', 't4.20240101T0000Z', 't5.20240101T0000Z', 't6.20240101T0000Z']
+2015-09-02T13:22:06Z INFO - [t6.20250101T0000Z] -triggered off ['t0.20240101T0000Z', 't1.20240101T0000Z', 't2.20240101T0000Z', 't3.20240101T0000Z', 't4.20240101T0000Z', 't5.20240101T0000Z', 't6.20240101T0000Z']
+2015-09-02T13:22:06Z INFO - [t2.20250101T0000Z] -triggered off ['t0.20240101T0000Z', 't1.20240101T0000Z', 't2.20240101T0000Z', 't3.20240101T0000Z', 't4.20240101T0000Z', 't5.20240101T0000Z', 't6.20240101T0000Z']
+2015-09-02T13:22:06Z INFO - [t3.20250101T0000Z] -triggered off ['t0.20240101T0000Z', 't1.20240101T0000Z', 't2.20240101T0000Z', 't3.20240101T0000Z', 't4.20240101T0000Z', 't5.20240101T0000Z', 't6.20240101T0000Z']
+2015-09-02T13:22:06Z INFO - [t1.20250101T0000Z] -triggered off ['t0.20240101T0000Z', 't1.20240101T0000Z', 't2.20240101T0000Z', 't3.20240101T0000Z', 't4.20240101T0000Z', 't5.20240101T0000Z', 't6.20240101T0000Z']
+2015-09-02T13:22:06Z INFO - [t0.20250101T0000Z] -triggered off ['t0.20240101T0000Z', 't1.20240101T0000Z', 't2.20240101T0000Z', 't3.20240101T0000Z', 't4.20240101T0000Z', 't5.20240101T0000Z', 't6.20240101T0000Z']
+2015-09-02T13:22:06Z INFO - [t5.20250101T0000Z] -triggered off ['t0.20240101T0000Z', 't1.20240101T0000Z', 't2.20240101T0000Z', 't3.20240101T0000Z', 't4.20240101T0000Z', 't5.20240101T0000Z', 't6.20240101T0000Z']
diff --git a/tests/job-submission/07-multi/suite.rc b/tests/job-submission/07-multi/suite.rc
new file mode 100644
index 0000000..a9acfad
--- /dev/null
+++ b/tests/job-submission/07-multi/suite.rc
@@ -0,0 +1,33 @@
+#!Jinja2
+[cylc]
+    UTC mode = True
+   [[reference test]]
+       required run mode = live
+       live mode suite timeout = PT3M
+
+[scheduling]
+    initial cycle point=2020
+    final cycle point=2025
+    [[dependencies]]
+        [[[P1Y]]]
+            graph="""
+T[-P1Y]:succeed-all => T
+"""
+[runtime]
+    [[T]]
+        script = true
+#        script=sleep 120
+#        [[[remote]]]
+#            host=xcfl00
+#        [[[job submission]]]
+#            method = pbs
+#        [[[directives]]]
+#            -m = n
+#            -q = shared
+#            -r = n
+    [[t0,t1,t2,t3]]
+        inherit = T
+    [[t4,t5,t6]]
+        inherit = T
+        [[[remote]]]
+            host = {{CYLC_TEST_HOST}}
diff --git a/tests/jobscript/00-torture.t b/tests/jobscript/00-torture.t
index cf170ad..6dc0787 100644
--- a/tests/jobscript/00-torture.t
+++ b/tests/jobscript/00-torture.t
@@ -32,7 +32,6 @@ CYLC_CONF_PATH= suite_run_ok $TEST_NAME \
 TEST_NAME=$TEST_NAME_BASE-foo-jobscript-match
 CYLC_CONF_PATH= run_ok $TEST_NAME cylc jobscript $SUITE_NAME foo.1
 sed 's/\(export CYLC_.*=\).*/\1/g' $TEST_NAME.stdout >jobfile
-echo "" >> jobfile
 sed 's/##suitename##/'$SUITE_NAME'/' \
     $TEST_SOURCE_DIR/$TEST_NAME_BASE/foo.ref-jobfile >reffile
 cmp_ok jobfile reffile
diff --git a/tests/jobscript/00-torture/foo.ref-jobfile b/tests/jobscript/00-torture/foo.ref-jobfile
index 415651f..db34013 100644
--- a/tests/jobscript/00-torture/foo.ref-jobfile
+++ b/tests/jobscript/00-torture/foo.ref-jobfile
@@ -3,6 +3,7 @@
 # ++++ THIS IS A CYLC TASK JOB SCRIPT ++++
 # Suite: ##suitename##
 # Task: foo.1
+# Job log directory: 1/foo/01
 # Job submit method: background
 
 echo "JOB SCRIPT STARTING"
@@ -33,13 +34,10 @@ TRAP_FAIL_SIGNAL() {
     for S in ${VACATION_SIGNALS:-} $FAIL_SIGNALS; do
         trap "" $S
     done
-    if [[ -n ${CYLC_TASK_LOG_ROOT:-} ]]; then
-        {
-            echo "CYLC_JOB_EXIT=$SIGNAL"
-            date -u +'CYLC_JOB_EXIT_TIME=%FT%H:%M:%SZ'
-        } >>$CYLC_TASK_LOG_ROOT.status
+    if [[ -n "${CYLC_TASK_MESSAGE_STARTED_PID:-}" ]]; then
+        wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true
     fi
-    cylc task failed "Task job script received signal $@"
+    cylc task message -p 'CRITICAL' "Task job script received signal $SIGNAL" 'failed'
     exit 1
 }
 for S in $FAIL_SIGNALS; do
@@ -94,6 +92,7 @@ export CYLC_TASK_SUBMIT_NUMBER=
 export CYLC_TASK_TRY_NUMBER=
 export CYLC_TASK_WORK_DIR=
 export CYLC_TASK_WORK_PATH=
+export CYLC_JOB_PID=
 
 # ACCESS TO THE SUITE BIN DIRECTORY:
 export PATH=$CYLC_SUITE_DEF_PATH/bin:$PATH
@@ -107,11 +106,8 @@ E_FIV="$( foo.sh )"
 export E_ONE E_TWO E_THR E_FOU E_FIV
 
 # SEND TASK STARTED MESSAGE:
-{
-    echo "CYLC_JOB_PID=$$"
-    date -u +'CYLC_JOB_INIT_TIME=%FT%H:%M:%SZ'
-} >>$CYLC_TASK_LOG_ROOT.status
-cylc task started
+cylc task message 'started' &
+CYLC_TASK_MESSAGE_STARTED_PID=$!
 
 # SHARE DIRECTORY CREATE:
 mkdir -p $CYLC_SUITE_SHARE_DIR || true
@@ -193,13 +189,10 @@ cd
 rmdir $CYLC_TASK_WORK_DIR 2>/dev/null || true
 
 # SEND TASK SUCCEEDED MESSAGE:
-{
-    echo 'CYLC_JOB_EXIT=SUCCEEDED'
-    date -u +'CYLC_JOB_EXIT_TIME=%FT%H:%M:%SZ'
-} >>$CYLC_TASK_LOG_ROOT.status
-cylc task succeeded
+wait "${CYLC_TASK_MESSAGE_STARTED_PID}" 2>/dev/null || true
+cylc task message 'succeeded'
 
 echo 'JOB SCRIPT EXITING (TASK SUCCEEDED)'
 trap '' EXIT
 
-#EOF
+#EOF: 1/foo/01
diff --git a/tests/lib/bash/test_header b/tests/lib/bash/test_header
index 49ca7ce..3e09d96 100644
--- a/tests/lib/bash/test_header
+++ b/tests/lib/bash/test_header
@@ -58,6 +58,12 @@
 #         Test that FILE does not exist
 #     init_suite SUITE_NAME
 #         Create a suite called '__cylc__test__${SUITE_NAME}__' in $TEST_DIR.
+#     mock_smtpd_init
+#         Start a mock SMTP server daemon for testing. Write host:port setting
+#         to the variable TEST_SMTPD_HOST. Write pid of daemon to
+#         TEST_SMTPD_PID. Write log to TEST_SMTPD_LOG.
+#     mock_smtpd_kill
+#         Kill the mock SMTP server daemon process.
 #     purge_suite SUITE_NAME
 #         Tidy up test directories for SUITE_NAME.
 #     poll COMMAND
@@ -88,6 +94,9 @@ function FINALLY() {
         ssh -oBatchMode=yes -oConnectTimeout=5 "${TEST_RHOST_CYLC_DIR%%:*}" \
             "rm -fr ${TEST_RHOST_CYLC_DIR#*:}"
     fi
+    if [[ -n "${TEST_SMTPD_PID:-}" ]]; then
+        kill "${TEST_SMTPD_PID}"
+    fi
     if (($FAILURES > 0)); then
         echo -e "\n    stdout and stderr stored in: $TEST_LOG_DIR" >&2
         if $SUITE_RUN_FAILS; then
@@ -180,9 +189,11 @@ function cmp_ok() {
     local FILE_CONTROL=${2:--}
     local TEST_NAME=$(basename $FILE_TEST)-cmp-ok
     local DIFF_CMD=${CYLC_TEST_DIFF_CMD:-'diff -u'}
-    if ${DIFF_CMD} "$FILE_TEST" "$FILE_CONTROL" 1>$TEST_NAME.stderr 2>&1; then
+    if ${DIFF_CMD} "$FILE_TEST" "$FILE_CONTROL" >"${TEST_NAME}.stderr" 2>&1;then
         ok $TEST_NAME
         return
+    else
+        cat "${TEST_NAME}.stderr" >&2
     fi
     mkdir -p $TEST_LOG_DIR
     cp $TEST_NAME.stderr $TEST_LOG_DIR/$TEST_NAME.stderr
@@ -342,6 +353,41 @@ print mkdtemp(dir=os.path.expanduser("~"), prefix="cylc-")
 __PYTHON__
 }
 
+function mock_smtpd_init() {  # Logic borrowed from Rose
+    local SMTPD_PORT=
+    for SMTPD_PORT in 8025 8125 8225 8325 8425 8525 8625 8725 8825 8925; do 
+        local SMTPD_HOST="localhost:${SMTPD_PORT}"
+        local SMTPD_LOG="${TEST_DIR}/smtpd.log"
+        python -m 'smtpd' -c 'DebuggingServer' -d -n "${SMTPD_HOST}" \
+            1>"${SMTPD_LOG}" 2>&1 &
+        local SMTPD_PID="$!"
+        while ! grep -q 'DebuggingServer started' "${SMTPD_LOG}" 2>/dev/null; do
+            if ps "${SMTPD_PID}" 1>/dev/null 2>&1; then
+                sleep 1
+            else
+                rm -f "${SMTPD_LOG}"
+                unset SMTPD_HOST SMTPD_LOG SMTPD_PID
+                break
+            fi
+        done
+        if [[ -n "${SMTPD_PID:-}" ]]; then
+            TEST_SMTPD_HOST="${SMTPD_HOST}"
+            TEST_SMTPD_PID="${SMTPD_PID}"
+            TEST_SMTPD_LOG="${SMTPD_LOG}"
+            break
+        fi
+    done
+}
+
+function mock_smtpd_kill() {  # Logic borrowed from Rose
+    if [[ -n "${TEST_SMTPD_PID:-}" ]] && ps "${TEST_SMTPD_PID}" >/dev/null 2>&1
+    then
+        kill "${TEST_SMTPD_PID}"
+        wait "${TEST_SMTPD_PID}" 2>/dev/null || true
+        unset TEST_SMTPD_HOST TEST_SMTPD_LOG TEST_SMTPD_PID
+    fi
+}
+
 CYLC_DIR=${CYLC_DIR:-$(cd $(dirname $BASH_SOURCE)/../../.. && pwd)}
 PATH=$CYLC_DIR/bin:$PATH
 
diff --git a/tests/cyclers/34-implicit-back-compat.t b/tests/logging/00-client.t
old mode 100644
new mode 100755
similarity index 50%
copy from tests/cyclers/34-implicit-back-compat.t
copy to tests/logging/00-client.t
index 790ec2c..15c884a
--- a/tests/cyclers/34-implicit-back-compat.t
+++ b/tests/logging/00-client.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,22 +15,41 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Check that implicit cycling is ok in cylc-5 back compat mode.
+# Test logging of client connections and commands.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 3
-#-------------------------------------------------------------------------------
+set_test_number 4
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-cmp
-cmp_ok $TEST_NAME_BASE-validate.stderr <<__ERR__
-WARNING, foo: not explicitly defined in dependency graphs (deprecated)
-__ERR__
-#-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+suite_run_ok $TEST_NAME cylc run --no-detach --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+# Test logging of client commands invoked by task foo.
+UUID=$(cylc cat-log $SUITE_NAME | grep '\[client-connect].*cylc-hold' | awk '{print $7}')
+cylc cat-log $SUITE_NAME | grep "\[client-.* $UUID" | sed -e 's/^.* - //' > log1.txt
+USER_AT_HOST=${USER}@$(hostname -f)
+cmp_ok log1.txt << __END__
+[client-connect] ${USER_AT_HOST}:cylc-hold privilege='full-control' $UUID
+[client-command] hold_suite ${USER_AT_HOST}:cylc-hold $UUID
+[client-connect] ${USER_AT_HOST}:cylc-show privilege='full-control' $UUID
+[client-command] get_suite_info ${USER_AT_HOST}:cylc-show $UUID
+[client-connect] ${USER_AT_HOST}:cylc-broadcast privilege='full-control' $UUID
+[client-command] broadcast_get ${USER_AT_HOST}:cylc-broadcast $UUID
+[client-connect] ${USER_AT_HOST}:cylc-release privilege='full-control' $UUID
+[client-command] release_suite ${USER_AT_HOST}:cylc-release $UUID
+__END__
+#-------------------------------------------------------------------------------
+# Test logging of task messaging connections.
+cylc cat-log $SUITE_NAME | grep "\[client-.*cylc-message" | awk '{print $4,$5,$6}' > log2.txt
+USER_AT_HOST=${USER}@$(hostname -f)
+cmp_ok log2.txt << __END__
+[client-connect] ${USER_AT_HOST}:cylc-message privilege='full-control'
+[client-command] task_message ${USER_AT_HOST}:cylc-message
+[client-connect] ${USER_AT_HOST}:cylc-message privilege='full-control'
+[client-command] task_message ${USER_AT_HOST}:cylc-message
+__END__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/logging/00-client/suite.rc b/tests/logging/00-client/suite.rc
new file mode 100644
index 0000000..55aad81
--- /dev/null
+++ b/tests/logging/00-client/suite.rc
@@ -0,0 +1,17 @@
+title = Generate log messages for various suite connection types.
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+        script = """
+sleep 5
+# COMMAND INTERFACE:
+UUID=$(cylc hold --print-uuid $CYLC_SUITE_NAME 2>&1 > /dev/null)
+# INFO INTERFACE:
+cylc show --set-uuid=$UUID $CYLC_SUITE_NAME
+# BROADCAST INTERFACE:
+cylc broadcast --set-uuid=$UUID -d $CYLC_SUITE_NAME
+# COMMAND INTERFACE:
+cylc release --set-uuid=$UUID $CYLC_SUITE_NAME
+sleep 5"""
diff --git a/tests/purge/test_header b/tests/logging/test_header
similarity index 100%
copy from tests/purge/test_header
copy to tests/logging/test_header
diff --git a/tests/message-triggers/01-new/reference.log b/tests/message-triggers/01-new/reference.log
index b35f818..16ab80a 100644
--- a/tests/message-triggers/01-new/reference.log
+++ b/tests/message-triggers/01-new/reference.log
@@ -1,62 +1,13 @@
-2014-08-20T23:28:04Z INFO - port:7766
-2014-08-20T23:28:04Z INFO - Suite starting at 2014-08-20T23:28:04Z
-2014-08-20T23:28:04Z INFO - Log event clock: real time
 2014-08-20T23:28:04Z INFO - Run mode: live
 2014-08-20T23:28:04Z INFO - Initial point: 20140801T0000Z
 2014-08-20T23:28:04Z INFO - Final point: 20141201T0000Z
 2014-08-20T23:28:04Z INFO - Cold Start 20140801T0000Z
 2014-08-20T23:28:04Z INFO - [foo.20140801T0000Z] -triggered off []
 2014-08-20T23:28:04Z INFO - [baz.20140801T0000Z] -triggered off []
-2014-08-20T23:28:05Z INFO - [foo.20140801T0000Z] -submit_method_id=4397
-2014-08-20T23:28:05Z INFO - [foo.20140801T0000Z] -submission succeeded
-2014-08-20T23:28:05Z INFO - [baz.20140801T0000Z] -submit_method_id=4400
-2014-08-20T23:28:05Z INFO - [baz.20140801T0000Z] -submission succeeded
-2014-08-20T23:28:05Z INFO - [foo.20140801T0000Z] -(current:submitted)> foo.20140801T0000Z started at 2014-08-20T23:28:04Z
-2014-08-20T23:28:05Z INFO - [baz.20140801T0000Z] -(current:submitted)> baz.20140801T0000Z started at 2014-08-20T23:28:04Z
 2014-08-20T23:28:06Z INFO - [foo.20141001T0000Z] -triggered off []
-2014-08-20T23:28:07Z INFO - [foo.20141001T0000Z] -submit_method_id=4470
-2014-08-20T23:28:07Z INFO - [foo.20141001T0000Z] -submission succeeded
-2014-08-20T23:28:07Z INFO - [foo.20141001T0000Z] -(current:submitted)> foo.20141001T0000Z started at 2014-08-20T23:28:06Z
-2014-08-20T23:28:07Z INFO - [foo.20140801T0000Z] -(current:running)> file 1 for 20140801T0000Z done at 2014-08-20T23:28:06Z
 2014-08-20T23:28:08Z INFO - [foo.20141201T0000Z] -triggered off []
 2014-08-20T23:28:08Z INFO - [bar.20140801T0000Z] -triggered off ['foo.20140801T0000Z']
-2014-08-20T23:28:09Z INFO - [foo.20141201T0000Z] -submit_method_id=4513
-2014-08-20T23:28:09Z INFO - [foo.20141201T0000Z] -submission succeeded
-2014-08-20T23:28:09Z INFO - [bar.20140801T0000Z] -submit_method_id=4516
-2014-08-20T23:28:09Z INFO - [bar.20140801T0000Z] -submission succeeded
-2014-08-20T23:28:09Z INFO - [foo.20150201T0000Z] -holding (beyond suite stop point) 20141201T0000Z
-2014-08-20T23:28:09Z INFO - [foo.20141001T0000Z] -(current:running)> file 1 for 20141001T0000Z done at 2014-08-20T23:28:08Z
-2014-08-20T23:28:09Z INFO - [foo.20140801T0000Z] -(current:running)> file 2 for 20141001T0000Z done at 2014-08-20T23:28:09Z
-2014-08-20T23:28:09Z INFO - [foo.20141201T0000Z] -(current:submitted)> foo.20141201T0000Z started at 2014-08-20T23:28:08Z
-2014-08-20T23:28:09Z INFO - [bar.20140801T0000Z] -(current:submitted)> bar.20140801T0000Z started at 2014-08-20T23:28:08Z
 2014-08-20T23:28:10Z INFO - [baz.20141001T0000Z] -triggered off ['foo.20140801T0000Z']
 2014-08-20T23:28:10Z INFO - [bar.20141001T0000Z] -triggered off ['foo.20141001T0000Z']
-2014-08-20T23:28:11Z INFO - [baz.20141001T0000Z] -submit_method_id=4617
-2014-08-20T23:28:11Z INFO - [baz.20141001T0000Z] -submission succeeded
-2014-08-20T23:28:11Z INFO - [bar.20141001T0000Z] -submit_method_id=4620
-2014-08-20T23:28:11Z INFO - [bar.20141001T0000Z] -submission succeeded
-2014-08-20T23:28:11Z INFO - [foo.20141001T0000Z] -(current:running)> file 2 for 20141201T0000Z done at 2014-08-20T23:28:11Z
-2014-08-20T23:28:11Z INFO - [foo.20140801T0000Z] -(current:running)> foo.20140801T0000Z succeeded at 2014-08-20T23:28:11Z
-2014-08-20T23:28:11Z INFO - [foo.20141201T0000Z] -(current:running)> file 1 for 20141201T0000Z done at 2014-08-20T23:28:11Z
-2014-08-20T23:28:11Z INFO - [baz.20141001T0000Z] -(current:submitted)> baz.20141001T0000Z started at 2014-08-20T23:28:10Z
-2014-08-20T23:28:11Z INFO - [bar.20141001T0000Z] -(current:submitted)> bar.20141001T0000Z started at 2014-08-20T23:28:10Z
 2014-08-20T23:28:12Z INFO - [baz.20141201T0000Z] -triggered off ['foo.20141001T0000Z']
 2014-08-20T23:28:12Z INFO - [bar.20141201T0000Z] -triggered off ['foo.20141201T0000Z']
-2014-08-20T23:28:13Z INFO - [baz.20141201T0000Z] -submit_method_id=4742
-2014-08-20T23:28:13Z INFO - [baz.20141201T0000Z] -submission succeeded
-2014-08-20T23:28:13Z INFO - [bar.20141201T0000Z] -submit_method_id=4745
-2014-08-20T23:28:13Z INFO - [bar.20141201T0000Z] -submission succeeded
-2014-08-20T23:28:13Z INFO - [baz.20150201T0000Z] -holding (beyond suite stop point) 20141201T0000Z
-2014-08-20T23:28:13Z INFO - [bar.20150201T0000Z] -holding (beyond suite stop point) 20141201T0000Z
-2014-08-20T23:28:13Z INFO - [foo.20141001T0000Z] -(current:running)> foo.20141001T0000Z succeeded at 2014-08-20T23:28:13Z
-2014-08-20T23:28:13Z INFO - [foo.20141201T0000Z] -(current:running)> file 2 for 20150201T0000Z done at 2014-08-20T23:28:13Z
-2014-08-20T23:28:13Z INFO - [baz.20141201T0000Z] -(current:submitted)> baz.20141201T0000Z started at 2014-08-20T23:28:12Z
-2014-08-20T23:28:13Z INFO - [bar.20141201T0000Z] -(current:submitted)> bar.20141201T0000Z started at 2014-08-20T23:28:12Z
-2014-08-20T23:28:15Z INFO - [foo.20141201T0000Z] -(current:running)> foo.20141201T0000Z succeeded at 2014-08-20T23:28:15Z
-2014-08-20T23:28:15Z INFO - [baz.20140801T0000Z] -(current:running)> baz.20140801T0000Z succeeded at 2014-08-20T23:28:14Z
-2014-08-20T23:28:16Z INFO - [baz.20141201T0000Z] -(current:running)> baz.20141201T0000Z succeeded at 2014-08-20T23:28:16Z
-2014-08-20T23:28:18Z INFO - [baz.20141001T0000Z] -(current:running)> baz.20141001T0000Z succeeded at 2014-08-20T23:28:18Z
-2014-08-20T23:28:19Z INFO - [bar.20140801T0000Z] -(current:running)> bar.20140801T0000Z succeeded at 2014-08-20T23:28:19Z
-2014-08-20T23:28:21Z INFO - [bar.20141001T0000Z] -(current:running)> bar.20141001T0000Z succeeded at 2014-08-20T23:28:21Z
-2014-08-20T23:28:21Z INFO - [bar.20141201T0000Z] -(current:running)> bar.20141201T0000Z succeeded at 2014-08-20T23:28:21Z
-2014-08-20T23:28:22Z INFO - Suite shutting down at 2014-08-20T23:28:22Z
diff --git a/tests/message-triggers/01-new/suite.rc b/tests/message-triggers/01-new/suite.rc
index 21a0be4..fa65675 100644
--- a/tests/message-triggers/01-new/suite.rc
+++ b/tests/message-triggers/01-new/suite.rc
@@ -1,4 +1,4 @@
-title = "test suite for cylc-6 message triggers"
+title = test suite for cylc-6 message triggers
 [cylc]
     UTC mode = True
     [[reference test]]
@@ -10,20 +10,21 @@ title = "test suite for cylc-6 message triggers"
     [[dependencies]]
         [[[P2M]]]
            graph = """
-            foo:x => bar
-            foo[-P2M]:y => baz
-                  """
+foo:x => bar
+foo[-P2M]:y => baz
+"""
+
 [runtime]
+    [[bar, baz]]
+        script = true
     [[foo]]
         script = """
 echo HELLO
-sleep 2 
-TARGET_POINT=$CYLC_TASK_CYCLE_POINT
-cylc message "file 1 for $TARGET_POINT done"
-sleep 2
-TARGET_POINT=$(cylc cycle-point --offset P2M)
-cylc message "file 2 for $TARGET_POINT done"
-sleep 2
+MESSAGE_X="file 1 for $CYLC_TASK_CYCLE_POINT done"
+MESSAGE_Y="file 2 for $(cylc cycle-point --offset P2M) done"
+cylc message "${MESSAGE_X}" "${MESSAGE_Y}"
+grep -q "CYLC_MESSAGE=.*|NORMAL|${MESSAGE_X}" "$0.status"
+grep -q "CYLC_MESSAGE=.*|NORMAL|${MESSAGE_Y}" "$0.status"
 """
         [[[outputs]]]
             x = "file 1 for [] done"
diff --git a/tests/cylc-poll/01-task-failed.t b/tests/message-triggers/02-alternate.t
similarity index 94%
copy from tests/cylc-poll/01-task-failed.t
copy to tests/message-triggers/02-alternate.t
index c8dc18b..516c05a 100644
--- a/tests/cylc-poll/01-task-failed.t
+++ b/tests/message-triggers/02-alternate.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test that polling a failed task sets the task state correctly
+# Test alternate message triggers (task finish with incomplete outputs; #1551).
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
diff --git a/tests/message-triggers/02-alternate/reference.log b/tests/message-triggers/02-alternate/reference.log
new file mode 100644
index 0000000..14120ce
--- /dev/null
+++ b/tests/message-triggers/02-alternate/reference.log
@@ -0,0 +1,23 @@
+2015-07-24T01:13:30Z INFO - port:7766
+2015-07-24T01:13:30Z INFO - Suite starting at 2015-07-24T01:13:30Z
+2015-07-24T01:13:30Z INFO - Run mode: live
+2015-07-24T01:13:30Z INFO - Initial point: 1
+2015-07-24T01:13:30Z INFO - Final point: 1
+2015-07-24T01:13:30Z INFO - Cold Start 1
+2015-07-24T01:13:30Z INFO - [foo.1] -job(01) initiate job-submit
+2015-07-24T01:13:30Z INFO - [foo.1] -triggered off []
+2015-07-24T01:13:31Z INFO - [foo.1] -submit_method_id=7377
+2015-07-24T01:13:31Z INFO - [foo.1] -submission succeeded
+2015-07-24T01:13:31Z INFO - [foo.1] -(current:submitted)> foo.1 started at 2015-07-24T01:13:31Z
+2015-07-24T01:13:31Z INFO - [foo.1] -(current:running)> message one for 1 at 2015-07-24T01:13:31Z
+2015-07-24T01:13:31Z INFO - [foo.1] -(current:running)> foo.1 succeeded at 2015-07-24T01:13:31Z
+2015-07-24T01:13:31Z WARNING - [foo.1] -Succeeded with unreported outputs:
+  message two for 1
+2015-07-24T01:13:33Z INFO - [run_me.1] -job(01) initiate job-submit
+2015-07-24T01:13:33Z INFO - [run_me.1] -triggered off ['foo.1']
+2015-07-24T01:13:33Z INFO - [dont_run_me.1] -suiciding
+2015-07-24T01:13:34Z INFO - [run_me.1] -submit_method_id=7464
+2015-07-24T01:13:34Z INFO - [run_me.1] -submission succeeded
+2015-07-24T01:13:34Z INFO - [run_me.1] -(current:submitted)> run_me.1 started at 2015-07-24T01:13:33Z
+2015-07-24T01:13:34Z INFO - [run_me.1] -(current:running)> run_me.1 succeeded at 2015-07-24T01:13:34Z
+2015-07-24T01:13:35Z INFO - Suite shutting down at 2015-07-24T01:13:35Z
diff --git a/tests/message-triggers/02-alternate/suite.rc b/tests/message-triggers/02-alternate/suite.rc
new file mode 100644
index 0000000..07519e9
--- /dev/null
+++ b/tests/message-triggers/02-alternate/suite.rc
@@ -0,0 +1,20 @@
+title = Test suite for alternate message triggers.
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        required run mode = live
+        live mode suite timeout = PT30S
+[scheduling]
+    [[dependencies]]
+           graph = """foo:x => run_me & !dont_run_me
+                      foo:y => dont_run_me & !run_me"""
+[runtime]
+    [[run_me]]
+        script = /bin/true
+    [[dont_run_me]]
+        script = /bin/false
+    [[foo]]
+        script = cylc message "message one for $CYLC_TASK_CYCLE_POINT"
+        [[[outputs]]]
+            x = "message one for []"
+            y = "message two for []"
diff --git a/tests/cyclers/29-0000_rollunder.t b/tests/message-triggers/03-placeholder.t
similarity index 87%
copy from tests/cyclers/29-0000_rollunder.t
copy to tests/message-triggers/03-placeholder.t
index 732f8ea..83136bf 100644
--- a/tests/cyclers/29-0000_rollunder.t
+++ b/tests/message-triggers/03-placeholder.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,16 +15,15 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test intercycle dependencies.
+# Test validation fails message outputs with no cycle offset placeholder.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
 #-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE 0000_rollunder
+install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_fail $TEST_NAME cylc validate $SUITE_NAME
-grep_ok "Cannot dump TimePoint year: -1 not in bounds 0 to 9999." \
-    $TEST_NAME.stderr
+grep_ok 'ERROR: bad message output string' $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/message-triggers/03-placeholder/suite.rc b/tests/message-triggers/03-placeholder/suite.rc
new file mode 100644
index 0000000..db16b27
--- /dev/null
+++ b/tests/message-triggers/03-placeholder/suite.rc
@@ -0,0 +1,12 @@
+[scheduling]
+    cycling mode = integer
+    initial cycle point = 1
+    [[dependencies]]
+        [[[P1]]]
+            graph = foo:x => bar
+[runtime]
+    [[foo]]
+        script = cylc message "hello to $CYLC_TASK_CYCLE_POINT"
+        [[[outputs]]]
+            x = "hello to Bob"
+            # (should be "hello to []")
diff --git a/tests/events/00-suite.t b/tests/pep8/00-bin-lib.t
old mode 100644
new mode 100755
similarity index 69%
copy from tests/events/00-suite.t
copy to tests/pep8/00-bin-lib.t
index d9dcf9b..624594c
--- a/tests/events/00-suite.t
+++ b/tests/pep8/00-bin-lib.t
@@ -15,16 +15,19 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Test compliance with PEP8.
 . "$(dirname "$0")/test_header"
-set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
 
-run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+if ! pep8 --version 1>'/dev/null' 2>&1; then
+    skip_all '"pep8" command not available'
+fi
+
+set_test_number 3
+
+run_ok "${TEST_NAME_BASE}" pep8 --ignore=E402 \
+    "${CYLC_DIR}/lib/cylc" \
+    $(grep -l '#!.*\<python\>' "${CYLC_DIR}/bin/"*)
+cmp_ok "${TEST_NAME_BASE}.stdout" <'/dev/null'
+cmp_ok "${TEST_NAME_BASE}.stderr" <'/dev/null'
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
 exit
diff --git a/tests/purge/test_header b/tests/pep8/test_header
similarity index 100%
rename from tests/purge/test_header
rename to tests/pep8/test_header
diff --git a/tests/purge/00-purge.t b/tests/purge/00-purge.t
deleted file mode 100644
index 68d6c84..0000000
--- a/tests/purge/00-purge.t
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-# 
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#-------------------------------------------------------------------------------
-# Test cylc purge from a suite
-. $(dirname $0)/test_header
-#-------------------------------------------------------------------------------
-set_test_number 2
-#-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE purge
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
-#-------------------------------------------------------------------------------
-purge_suite $SUITE_NAME
diff --git a/tests/purge/purge/bin/A.sh b/tests/purge/purge/bin/A.sh
deleted file mode 100755
index be5fe9f..0000000
--- a/tests/purge/purge/bin/A.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# CHECK INPUT FILES EXIST
-ONE=$INPUT_DIR/obs-${CYLC_TASK_CYCLE_TIME}.nc
-TWO=$RUNNING_DIR/A-${CYLC_TASK_CYCLE_TIME}.restart
-for PRE in $ONE $TWO; do
-    if [[ ! -f $PRE ]]; then
-        echo "ERROR, file not found $PRE" >&2
-        exit 1
-    fi
-done
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-sleep $TASK_EXE_SECONDS
-
-# generate a restart file for the next three cycles
-touch $RUNNING_DIR/A-$(cylc cycle-point --offset-hours=6 ).restart
-touch $RUNNING_DIR/A-$(cylc cycle-point --offset-hours=12).restart
-touch $RUNNING_DIR/A-$(cylc cycle-point --offset-hours=18).restart
-
-# model outputs
-touch $OUTPUT_DIR/surface-winds-${CYLC_TASK_CYCLE_TIME}.nc
-touch $OUTPUT_DIR/precipitation-${CYLC_TASK_CYCLE_TIME}.nc
-
-echo "Goodbye"
diff --git a/tests/purge/purge/bin/B.sh b/tests/purge/purge/bin/B.sh
deleted file mode 100755
index e30f06e..0000000
--- a/tests/purge/purge/bin/B.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# CHECK INPUT FILES EXIST
-ONE=$INPUT_DIR/surface-winds-${CYLC_TASK_CYCLE_TIME}.nc
-TWO=$RUNNING_DIR/B-${CYLC_TASK_CYCLE_TIME}.restart
-for PRE in $ONE $TWO; do
-    if [[ ! -f $PRE ]]; then
-        echo "ERROR, file not found $PRE" >&2
-        exit 1
-    fi
-done
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-
-sleep $TASK_EXE_SECONDS
-
-# generate a restart file for the next and T+24 cycles
-touch $RUNNING_DIR/B-$(cylc cycle-point --offset-hours=6 ).restart
-touch $RUNNING_DIR/B-$(cylc cycle-point --offset-hours=24).restart
-
-# model outputs
-touch $OUTPUT_DIR/sea-state-${CYLC_TASK_CYCLE_TIME}.nc
-
-echo "Goodbye"
diff --git a/tests/purge/purge/bin/C.sh b/tests/purge/purge/bin/C.sh
deleted file mode 100755
index 170b167..0000000
--- a/tests/purge/purge/bin/C.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# CHECK INPUT FILES EXIST
-ONE=$INPUT_DIR/precipitation-${CYLC_TASK_CYCLE_TIME}.nc
-TWO=$RUNNING_DIR/C-${CYLC_TASK_CYCLE_TIME}.restart
-for PRE in $ONE $TWO; do
-    if [[ ! -f $PRE ]]; then
-        echo "ERROR, file not found $PRE" >&2
-        exit 1
-    fi
-done
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-
-sleep $TASK_EXE_SECONDS
-
-# generate a restart file for the next and T+24 cycles
-touch $RUNNING_DIR/C-$(cylc cycle-point --offset-hours=6 ).restart
-touch $RUNNING_DIR/C-$(cylc cycle-point --offset-hours=24).restart
-
-# model outputs
-touch $OUTPUT_DIR/river-flow-${CYLC_TASK_CYCLE_TIME}.nc
-
-echo "Goodbye"
diff --git a/tests/purge/purge/bin/ColdA.sh b/tests/purge/purge/bin/ColdA.sh
deleted file mode 100755
index 45ac6ce..0000000
--- a/tests/purge/purge/bin/ColdA.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-sleep $TASK_EXE_SECONDS
-
-touch $RUNNING_DIR/A-${CYLC_TASK_CYCLE_TIME}.restart
diff --git a/tests/purge/purge/bin/ColdB.sh b/tests/purge/purge/bin/ColdB.sh
deleted file mode 100755
index 4e29b4a..0000000
--- a/tests/purge/purge/bin/ColdB.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-sleep $TASK_EXE_SECONDS
-
-touch $RUNNING_DIR/B-${CYLC_TASK_CYCLE_TIME}.restart
diff --git a/tests/purge/purge/bin/ColdC.sh b/tests/purge/purge/bin/ColdC.sh
deleted file mode 100755
index 5f4c99d..0000000
--- a/tests/purge/purge/bin/ColdC.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-sleep $TASK_EXE_SECONDS
-
-touch $RUNNING_DIR/C-${CYLC_TASK_CYCLE_TIME}.restart
diff --git a/tests/purge/purge/bin/D.sh b/tests/purge/purge/bin/D.sh
deleted file mode 100755
index ff7f607..0000000
--- a/tests/purge/purge/bin/D.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# CHECK INPUT FILES EXIST
-ONE=$INPUT_DIR/sea-state-${CYLC_TASK_CYCLE_TIME}.nc
-TWO=$INPUT_DIR/river-flow-${CYLC_TASK_CYCLE_TIME}.nc
-for PRE in $ONE $TWO; do
-    if [[ ! -f $PRE ]]; then
-        echo "ERROR, file not found $PRE" >&2
-        exit 1
-    fi
-done
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-
-sleep $TASK_EXE_SECONDS
-
-# generate outputs
-touch $OUTPUT_DIR/combined.products
diff --git a/tests/purge/purge/bin/E.sh b/tests/purge/purge/bin/E.sh
deleted file mode 100755
index 50c1171..0000000
--- a/tests/purge/purge/bin/E.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# CHECK INPUT FILES EXIST
-PRE=$INPUT_DIR/sea-state-${CYLC_TASK_CYCLE_TIME}.nc
-if [[ ! -f $PRE ]]; then
-    echo "ERROR, file not found $PRE" >&2
-    exit 1
-fi
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-
-sleep $TASK_EXE_SECONDS
-
-# generate outputs
-touch $OUTPUT_DIR/sea-state.products
diff --git a/tests/purge/purge/bin/F.sh b/tests/purge/purge/bin/F.sh
deleted file mode 100755
index 9b5cfc5..0000000
--- a/tests/purge/purge/bin/F.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-# CHECK INPUT FILES EXIST
-PRE=$INPUT_DIR/river-flow-${CYLC_TASK_CYCLE_TIME}.nc
-if [[ ! -f $PRE ]]; then
-    echo "ERROR, file not found $PRE" >&2
-    exit 1
-fi
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-sleep $TASK_EXE_SECONDS
-
-# generate outputs
-touch $OUTPUT_DIR/river-flow-products-${CYLC_TASK_CYCLE_TIME}.nc
diff --git a/tests/purge/purge/bin/X.sh b/tests/purge/purge/bin/X.sh
deleted file mode 100755
index 2d0cfc9..0000000
--- a/tests/purge/purge/bin/X.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-sleep $TASK_EXE_SECONDS
-
-touch $OUTPUT_DIR/obs-${CYLC_TASK_CYCLE_TIME}.nc
diff --git a/tests/purge/purge/bin/clean-workspace.sh b/tests/purge/purge/bin/clean-workspace.sh
deleted file mode 100755
index cfca137..0000000
--- a/tests/purge/purge/bin/clean-workspace.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/bash
-
-set -eu
-
-usage="USAGE: clean-workspace.sh PATH"
-
-if [[ $# != 1 ]]; then
-    echo $usage >&2
-    exit 1
-fi
-
-echo "Hello from $CYLC_TASK_NAME at $CYLC_TASK_CYCLE_TIME in $CYLC_SUITE_REG_NAME"
-sleep $TASK_EXE_SECONDS
-
-if [[ $# == 1 ]]; then
-    WORKSPACE=$1
-else
-    echo "No workspace specified for cleaning"
-    exit 1
-fi
-
-echo "Cleaning $WORKSPACE ..."
-
-rm -rf $WORKSPACE
-mkdir -p $WORKSPACE
-
-echo "Done"
diff --git a/tests/purge/purge/reference.log b/tests/purge/purge/reference.log
deleted file mode 100644
index fab644c..0000000
--- a/tests/purge/purge/reference.log
+++ /dev/null
@@ -1,179 +0,0 @@
-2014/01/07 17:36:00 INFO - Thread-2 start (Event Handlers)
-2014/01/07 17:36:00 INFO - port:7766
-2014/01/07 17:36:00 INFO - Suite starting at 2014-01-07 17:36:00.197836
-2014/01/07 17:36:00 INFO - Log event clock: real time
-2014/01/07 17:36:00 INFO - Run mode: live
-2014/01/07 17:36:00 INFO - Initial point: 2010010106
-2014/01/07 17:36:00 INFO - Final point: None
-2014/01/07 17:36:00 INFO - Thread-3 start (Poll & Kill Commands)
-2014/01/07 17:36:00 INFO - Thread-5 start (Request Handling)
-2014/01/07 17:36:00 INFO - Thread-4 start (Job Submission)
-2014/01/07 17:36:00 INFO - Cold Start 2010010106
-2014/01/07 17:36:00 INFO - [prep.2010010106] -triggered off []
-2014/01/07 17:36:01 INFO - [prep.2010010106] -(current:ready)> prep.2010010106 submitting now
-2014/01/07 17:36:02 INFO - [prep.2010010106] -(current:ready)> prep.2010010106 submission succeeded
-2014/01/07 17:36:02 INFO - [prep.2010010106] -(current:submitted)> prep.2010010106 submit_method_id=20291
-2014/01/07 17:36:02 INFO - [prep.2010010106] -(current:submitted)> prep.2010010106 started at 2014-01-07T17:36:01
-2014/01/07 17:36:02 INFO - [prep.2010010106] -(current:running)> Setting stop cycle 2010010206 at 2014-01-07T17:36:01
-2014/01/07 17:36:03 INFO - [prep.2010010106] -(current:running)> prep.2010010106 succeeded at 2014-01-07T17:36:02
-2014/01/07 17:36:03 INFO - Setting stop cycle time: 2010010206
-2014/01/07 17:36:03 INFO - Command succeeded: stop after point(2010010206)
-2014/01/07 17:36:03 INFO - setting runahead limit to 30
-2014/01/07 17:36:03 INFO - Command succeeded: set runahead(30)
-2014/01/07 17:36:04 INFO - [ColdB.2010010106] -triggered off ['prep.2010010106']
-2014/01/07 17:36:04 INFO - [X.2010010106] -triggered off ['prep.2010010106']
-2014/01/07 17:36:04 INFO - [ColdC.2010010106] -triggered off ['prep.2010010106']
-2014/01/07 17:36:04 INFO - [ColdA.2010010106] -triggered off ['prep.2010010106']
-2014/01/07 17:36:05 INFO - [ColdB.2010010106] -(current:ready)> ColdB.2010010106 submitting now
-2014/01/07 17:36:05 INFO - [ColdB.2010010106] -(current:ready)> ColdB.2010010106 started at 2014-01-07T17:36:04
-2014/01/07 17:36:05 INFO - [ColdB.2010010106] -(current:running)> ColdB.2010010106 submission succeeded
-2014/01/07 17:36:05 INFO - [ColdB.2010010106] -(current:running)> ColdB.2010010106 submit_method_id=20375
-2014/01/07 17:36:05 INFO - [X.2010010106] -(current:ready)> X.2010010106 submitting now
-2014/01/07 17:36:05 INFO - [X.2010010106] -(current:ready)> X.2010010106 started at 2014-01-07T17:36:04
-2014/01/07 17:36:05 INFO - [ColdC.2010010106] -(current:ready)> ColdC.2010010106 submitting now
-2014/01/07 17:36:05 INFO - [ColdC.2010010106] -(current:ready)> ColdC.2010010106 submission succeeded
-2014/01/07 17:36:05 INFO - [ColdC.2010010106] -(current:submitted)> ColdC.2010010106 submit_method_id=20434
-2014/01/07 17:36:05 INFO - [ColdC.2010010106] -(current:submitted)> ColdC.2010010106 started at 2014-01-07T17:36:05
-2014/01/07 17:36:05 INFO - [ColdA.2010010106] -(current:ready)> ColdA.2010010106 submitting now
-2014/01/07 17:36:06 INFO - [ColdB.2010010106] -(current:running)> ColdB.2010010106 succeeded at 2014-01-07T17:36:05
-2014/01/07 17:36:06 INFO - [X.2010010106] -(current:running)> X.2010010106 submission succeeded
-2014/01/07 17:36:06 INFO - [X.2010010106] -(current:running)> X.2010010106 submit_method_id=20400
-2014/01/07 17:36:06 INFO - [X.2010010106] -(current:running)> X.2010010106 succeeded at 2014-01-07T17:36:06
-2014/01/07 17:36:06 INFO - [ColdC.2010010106] -(current:running)> ColdC.2010010106 succeeded at 2014-01-07T17:36:06
-2014/01/07 17:36:06 INFO - [ColdA.2010010106] -(current:ready)> ColdA.2010010106 started at 2014-01-07T17:36:05
-2014/01/07 17:36:06 INFO - [ColdA.2010010106] -(current:running)> ColdA.2010010106 succeeded at 2014-01-07T17:36:06
-2014/01/07 17:36:06 WARNING - [ColdA.2010010106] -Assuming non-reported outputs were completed:
-ColdA.2010010106 submitted
-2014/01/07 17:36:07 INFO - [A.2010010106] -triggered off ['ColdA.2010010106', 'X.2010010106']
-2014/01/07 17:36:07 INFO - [X.2010010112] -triggered off ['prep.2010010106']
-2014/01/07 17:36:07 INFO - [ColdA.2010010106] -(current:succeeded)> ColdA.2010010106 submission succeeded
-2014/01/07 17:36:07 INFO - [ColdA.2010010106] -(current:succeeded)> ColdA.2010010106 submit_method_id=20480
-2014/01/07 17:36:09 INFO - [A.2010010106] -(current:ready)> A.2010010106 submitting now
-2014/01/07 17:36:09 INFO - [A.2010010106] -(current:ready)> A.2010010106 submission succeeded
-2014/01/07 17:36:09 INFO - [A.2010010106] -(current:submitted)> A.2010010106 submit_method_id=20635
-2014/01/07 17:36:09 INFO - [X.2010010112] -(current:ready)> X.2010010112 submitting now
-2014/01/07 17:36:10 INFO - [A.2010010106] -(current:submitted)> A.2010010106 started at 2014-01-07T17:36:09
-2014/01/07 17:36:10 INFO - [A.2010010106] -(current:running)> A.2010010106 succeeded at 2014-01-07T17:36:10
-2014/01/07 17:36:10 INFO - [X.2010010112] -(current:ready)> X.2010010112 started at 2014-01-07T17:36:09
-2014/01/07 17:36:10 CRITICAL - [X.2010010112] -(current:running)> Task job script received signal ERR at 2014-01-07T17:36:09
-2014/01/07 17:36:10 CRITICAL - [X.2010010112] -(current:running)> X.2010010112 failed at 2014-01-07T17:36:09
-2014/01/07 17:36:10 INFO - [X.2010010112] -(current:failed)> X.2010010112 submission succeeded
-2014/01/07 17:36:10 WARNING - [X.2010010112] -rejecting a message received while in the failed state:
-2014/01/07 17:36:10 WARNING - [X.2010010112] -  X.2010010112 submission succeeded
-2014/01/07 17:36:10 INFO - [X.2010010112] -(current:failed)> X.2010010112 submit_method_id=20659
-2014/01/07 17:36:10 WARNING - [X.2010010112] -rejecting a message received while in the failed state:
-2014/01/07 17:36:10 WARNING - [X.2010010112] -  X.2010010112 submit_method_id=20659
-2014/01/07 17:36:11 INFO - [C.2010010106] -triggered off ['A.2010010106', 'ColdC.2010010106']
-2014/01/07 17:36:11 INFO - [B.2010010106] -triggered off ['A.2010010106', 'ColdB.2010010106']
-2014/01/07 17:36:12 INFO - [X.2010010118] -triggered off ['prep.2010010106']
-2014/01/07 17:36:12 INFO - [C.2010010106] -(current:ready)> C.2010010106 submitting now
-2014/01/07 17:36:13 INFO - [C.2010010106] -(current:ready)> C.2010010106 submission succeeded
-2014/01/07 17:36:13 INFO - [C.2010010106] -(current:submitted)> C.2010010106 submit_method_id=20797
-2014/01/07 17:36:13 INFO - [C.2010010106] -(current:submitted)> C.2010010106 started at 2014-01-07T17:36:12
-2014/01/07 17:36:13 INFO - [B.2010010106] -(current:ready)> B.2010010106 submitting now
-2014/01/07 17:36:13 INFO - [B.2010010106] -(current:ready)> B.2010010106 started at 2014-01-07T17:36:12
-2014/01/07 17:36:14 INFO - [C.2010010106] -(current:running)> C.2010010106 succeeded at 2014-01-07T17:36:13
-2014/01/07 17:36:14 INFO - [B.2010010106] -(current:running)> B.2010010106 submission succeeded
-2014/01/07 17:36:14 INFO - [B.2010010106] -(current:running)> B.2010010106 submit_method_id=20821
-2014/01/07 17:36:14 INFO - [B.2010010106] -(current:running)> B.2010010106 succeeded at 2014-01-07T17:36:13
-2014/01/07 17:36:15 INFO - [E.2010010106] -triggered off ['B.2010010106']
-2014/01/07 17:36:15 INFO - [D.2010010106] -triggered off ['B.2010010106', 'C.2010010106']
-2014/01/07 17:36:15 INFO - [F.2010010106] -triggered off ['C.2010010106']
-2014/01/07 17:36:16 INFO - [E.2010010106] -(current:ready)> E.2010010106 submitting now
-2014/01/07 17:36:16 INFO - [E.2010010106] -(current:ready)> E.2010010106 started at 2014-01-07T17:36:16
-2014/01/07 17:36:16 INFO - [D.2010010106] -(current:ready)> D.2010010106 submitting now
-2014/01/07 17:36:16 INFO - [D.2010010106] -(current:ready)> D.2010010106 submission succeeded
-2014/01/07 17:36:16 INFO - [D.2010010106] -(current:submitted)> D.2010010106 submit_method_id=21038
-2014/01/07 17:36:16 INFO - [F.2010010106] -(current:ready)> F.2010010106 submitting now
-2014/01/07 17:36:16 INFO - [X.2010010118] -(current:ready)> X.2010010118 submitting now
-2014/01/07 17:36:16 INFO - [X.2010010118] -(current:ready)> X.2010010118 started at 2014-01-07T17:36:15
-2014/01/07 17:36:16 INFO - [X.2010010118] -(current:running)> X.2010010118 submission succeeded
-2014/01/07 17:36:16 INFO - [X.2010010118] -(current:running)> X.2010010118 submit_method_id=20978
-2014/01/07 17:36:17 INFO - [E.2010010106] -(current:running)> E.2010010106 submission succeeded
-2014/01/07 17:36:17 INFO - [E.2010010106] -(current:running)> E.2010010106 submit_method_id=21003
-2014/01/07 17:36:17 INFO - [E.2010010106] -(current:running)> E.2010010106 succeeded at 2014-01-07T17:36:17
-2014/01/07 17:36:17 INFO - [D.2010010106] -(current:submitted)> D.2010010106 started at 2014-01-07T17:36:16
-2014/01/07 17:36:17 INFO - [F.2010010106] -(current:ready)> F.2010010106 started at 2014-01-07T17:36:16
-2014/01/07 17:36:17 INFO - [X.2010010118] -(current:running)> X.2010010118 succeeded at 2014-01-07T17:36:17
-2014/01/07 17:36:18 INFO - [X.2010010200] -triggered off ['prep.2010010106']
-2014/01/07 17:36:18 INFO - [D.2010010106] -(current:running)> D.2010010106 succeeded at 2014-01-07T17:36:17
-2014/01/07 17:36:18 INFO - [F.2010010106] -(current:running)> F.2010010106 succeeded at 2014-01-07T17:36:17
-2014/01/07 17:36:18 WARNING - [F.2010010106] -Assuming non-reported outputs were completed:
-F.2010010106 submitted
-2014/01/07 17:36:18 INFO - [F.2010010106] -(current:succeeded)> F.2010010106 submission succeeded
-2014/01/07 17:36:18 INFO - [F.2010010106] -(current:succeeded)> F.2010010106 submit_method_id=21079
-2014/01/07 17:36:20 INFO - [recover.2010010112] -triggered off ['A.2010010106', 'B.2010010106', 'C.2010010106', 'D.2010010106', 'E.2010010106', 'F.2010010106', 'X.2010010112']
-2014/01/07 17:36:20 INFO - [X.2010010200] -(current:ready)> X.2010010200 submitting now
-2014/01/07 17:36:20 INFO - [X.2010010200] -(current:ready)> X.2010010200 submission succeeded
-2014/01/07 17:36:20 INFO - [X.2010010200] -(current:submitted)> X.2010010200 submit_method_id=21265
-2014/01/07 17:36:20 INFO - [X.2010010200] -(current:submitted)> X.2010010200 started at 2014-01-07T17:36:20
-2014/01/07 17:36:21 INFO - [X.2010010200] -(current:running)> X.2010010200 succeeded at 2014-01-07T17:36:20
-2014/01/07 17:36:22 INFO - [X.2010010206] -triggered off ['prep.2010010106']
-2014/01/07 17:36:22 INFO - [recover.2010010112] -(current:ready)> recover.2010010112 submitting now
-2014/01/07 17:36:22 INFO - [recover.2010010112] -(current:ready)> recover.2010010112 submission succeeded
-2014/01/07 17:36:22 INFO - [recover.2010010112] -(current:submitted)> recover.2010010112 submit_method_id=21338
-2014/01/07 17:36:23 INFO - [recover.2010010112] -(current:submitted)> recover.2010010112 started at 2014-01-07T17:36:22
-2014/01/07 17:36:23 INFO - pre-insertion state dump: /home/oliverh/cylc-run/purge/state/state.2014:1:7:17:36:23
-2014/01/07 17:36:23 INFO - Command succeeded: insert task(ColdA,2010010206,False,None)
-2014/01/07 17:36:23 INFO - pre-purge state dump: /home/oliverh/cylc-run/purge/state/state.2014:1:7:17:36:23
-2014/01/07 17:36:23 INFO - Command succeeded: purge tree(X.2010010112,2010010200)
-2014/01/07 17:36:24 INFO - [ColdA.2010010206] -triggered off []
-2014/01/07 17:36:24 INFO - [X.2010010206] -(current:ready)> X.2010010206 submitting now
-2014/01/07 17:36:24 INFO - [X.2010010206] -(current:ready)> X.2010010206 submission succeeded
-2014/01/07 17:36:24 INFO - [X.2010010206] -(current:submitted)> X.2010010206 submit_method_id=21415
-2014/01/07 17:36:25 INFO - [X.2010010206] -(current:submitted)> X.2010010206 started at 2014-01-07T17:36:24
-2014/01/07 17:36:25 INFO - [X.2010010206] -(current:running)> X.2010010206 succeeded at 2014-01-07T17:36:25
-2014/01/07 17:36:26 INFO - [ColdA.2010010206] -(current:ready)> ColdA.2010010206 submitting now
-2014/01/07 17:36:27 INFO - [ColdA.2010010206] -(current:ready)> ColdA.2010010206 submission succeeded
-2014/01/07 17:36:27 INFO - [ColdA.2010010206] -(current:submitted)> ColdA.2010010206 submit_method_id=21488
-2014/01/07 17:36:27 INFO - [ColdA.2010010206] -(current:submitted)> ColdA.2010010206 started at 2014-01-07T17:36:26
-2014/01/07 17:36:27 INFO - [ColdA.2010010206] -(current:running)> ColdA.2010010206 succeeded at 2014-01-07T17:36:27
-2014/01/07 17:36:28 INFO - [A.2010010206] -triggered off ['A.2010010200', 'ColdA.2010010206', 'X.2010010206']
-2014/01/07 17:36:29 INFO - [A.2010010206] -(current:ready)> A.2010010206 submitting now
-2014/01/07 17:36:29 INFO - [A.2010010206] -(current:ready)> A.2010010206 submission succeeded
-2014/01/07 17:36:29 INFO - [A.2010010206] -(current:submitted)> A.2010010206 submit_method_id=21553
-2014/01/07 17:36:29 INFO - [A.2010010206] -(current:submitted)> A.2010010206 started at 2014-01-07T17:36:28
-2014/01/07 17:36:29 INFO - [A.2010010206] -(current:running)> A.2010010206 succeeded at 2014-01-07T17:36:29
-2014/01/07 17:36:30 INFO - pre-trigger state dump: /home/oliverh/cylc-run/purge/state/state.2014:1:7:17:36:30
-2014/01/07 17:36:30 INFO - Command succeeded: trigger task(B,2010010206,False)
-2014/01/07 17:36:30 INFO - pre-trigger state dump: /home/oliverh/cylc-run/purge/state/state.2014:1:7:17:36:30
-2014/01/07 17:36:30 INFO - Command succeeded: trigger task(C,2010010206,False)
-2014/01/07 17:36:31 INFO - [C.2010010206] -triggered off ['A.2010010206', 'C.2010010200']
-2014/01/07 17:36:31 INFO - [B.2010010206] -triggered off ['A.2010010206', 'B.2010010200']
-2014/01/07 17:36:32 INFO - [C.2010010206] -(current:ready)> C.2010010206 submitting now
-2014/01/07 17:36:32 INFO - [C.2010010206] -(current:ready)> C.2010010206 submission succeeded
-2014/01/07 17:36:32 INFO - [C.2010010206] -(current:submitted)> C.2010010206 submit_method_id=21686
-2014/01/07 17:36:32 INFO - [C.2010010206] -(current:submitted)> C.2010010206 started at 2014-01-07T17:36:32
-2014/01/07 17:36:32 INFO - [B.2010010206] -(current:ready)> B.2010010206 submitting now
-2014/01/07 17:36:32 INFO - [B.2010010206] -(current:ready)> B.2010010206 started at 2014-01-07T17:36:32
-2014/01/07 17:36:33 INFO - [C.2010010206] -(current:running)> C.2010010206 succeeded at 2014-01-07T17:36:33
-2014/01/07 17:36:33 INFO - [B.2010010206] -(current:running)> B.2010010206 succeeded at 2014-01-07T17:36:33
-2014/01/07 17:36:33 WARNING - [B.2010010206] -Assuming non-reported outputs were completed:
-B.2010010206 submitted
-2014/01/07 17:36:33 INFO - [B.2010010206] -(current:succeeded)> B.2010010206 submission succeeded
-2014/01/07 17:36:33 INFO - [B.2010010206] -(current:succeeded)> B.2010010206 submit_method_id=21711
-2014/01/07 17:36:34 INFO - [E.2010010206] -triggered off ['B.2010010206']
-2014/01/07 17:36:34 INFO - [D.2010010206] -triggered off ['B.2010010206', 'C.2010010206']
-2014/01/07 17:36:34 INFO - [F.2010010206] -triggered off ['C.2010010206']
-2014/01/07 17:36:35 INFO - [E.2010010206] -(current:ready)> E.2010010206 submitting now
-2014/01/07 17:36:35 INFO - [E.2010010206] -(current:ready)> E.2010010206 started at 2014-01-07T17:36:35
-2014/01/07 17:36:35 INFO - [E.2010010206] -(current:running)> E.2010010206 submission succeeded
-2014/01/07 17:36:35 INFO - [E.2010010206] -(current:running)> E.2010010206 submit_method_id=21868
-2014/01/07 17:36:35 INFO - [D.2010010206] -(current:ready)> D.2010010206 submitting now
-2014/01/07 17:36:35 INFO - [D.2010010206] -(current:ready)> D.2010010206 started at 2014-01-07T17:36:35
-2014/01/07 17:36:35 INFO - [F.2010010206] -(current:ready)> F.2010010206 submitting now
-2014/01/07 17:36:35 INFO - [F.2010010206] -(current:ready)> F.2010010206 submission succeeded
-2014/01/07 17:36:35 INFO - [F.2010010206] -(current:submitted)> F.2010010206 submit_method_id=21935
-2014/01/07 17:36:36 INFO - [E.2010010206] -(current:running)> E.2010010206 succeeded at 2014-01-07T17:36:36
-2014/01/07 17:36:36 INFO - [D.2010010206] -(current:running)> D.2010010206 succeeded at 2014-01-07T17:36:36
-2014/01/07 17:36:36 WARNING - [D.2010010206] -Assuming non-reported outputs were completed:
-D.2010010206 submitted
-2014/01/07 17:36:36 INFO - [D.2010010206] -(current:succeeded)> D.2010010206 submission succeeded
-2014/01/07 17:36:36 INFO - [D.2010010206] -(current:succeeded)> D.2010010206 submit_method_id=21894
-2014/01/07 17:36:36 INFO - [F.2010010206] -(current:submitted)> F.2010010206 started at 2014-01-07T17:36:35
-2014/01/07 17:36:36 INFO - [F.2010010206] -(current:running)> F.2010010206 succeeded at 2014-01-07T17:36:36
-2014/01/07 17:36:37 INFO - Stopping: 
-  + all cycling tasks have spawned past the final cycle 2010010206
-2014/01/07 17:36:38 INFO - Thread-2 exit (Event Handlers)
-2014/01/07 17:36:38 INFO - Thread-3 exit (Poll & Kill Commands)
-2014/01/07 17:36:38 INFO - Thread-4 exit (Job Submission)
diff --git a/tests/purge/purge/suite.rc b/tests/purge/purge/suite.rc
deleted file mode 100644
index 1baaa6c..0000000
--- a/tests/purge/purge/suite.rc
+++ /dev/null
@@ -1,135 +0,0 @@
-title = "Old Cylc Admin Test Suite"
-
-# TODO: this ancient suite has been updated for cylc-6+ cycling, but it could
-# do with a more general rewrite (e.g. to use the suite share and work dirs in
-# the modern way, for shared workspaces).
-
-#        B[-PT6H]-> B -> E
-#                / \
-#  X & A[-PT6H]-> A   -> D
-#                \ /
-#        C[-PT6H]-> C -> F
-#
-
-[cylc]
-    cycle point format = %Y%m%d%H # (for old-style reference log)
-    [[reference test]]
-        required run mode = live
-        expected task failures = X.2010010112
-        live mode suite timeout = PT1.5M
-
-[scheduling]
-    # This test suite only needs a start time;
-    # the first task sets a stop time.
-    initial cycle time = 20100101T06
-
-    [[special tasks]]
-        cold-start       = ColdA, ColdB, ColdC
-        clock-triggered  = X(PT1H)
-
-    [[dependencies]]
-        [[[R1]]]
-            graph = "prep => X & ColdA & ColdB & ColdC"
-        [[[T00,T06,T12,T18]]]
-            graph = """
-      prep[^] => X => A => B => E
-      A => C => F
-      B & C => D
-
-      Warm[-PT6H]:succeed-all & Post[-PT6H]:succeed-all & X:fail => recover
-      Warm:succeed-all & Post:succeed-all => !recover
-
-      ColdA | A[-PT6H] => A
-      ColdB | B[-PT6H] => B
-      ColdC | C[-PT6H] => C
-                    """
-[runtime]
-    [[root]]
-        script = """
-mkdir -p $INPUT_DIR $OUTPUT_DIR
-${CYLC_TASK_NAME}.sh"""
-        [[[environment]]]
-            #TASK_EXE_SECONDS = $( cylc rnd 1 15 )
-            TASK_EXE_SECONDS = 0 # fast
-            WORKSPACE  = $CYLC_SUITE_SHARE_DIR
-            RUNNING    = $WORKSPACE/running
-            INPUT_DIR  = $WORKSPACE
-            OUTPUT_DIR = $WORKSPACE
-
-    [[Model]]
-        description = "Model task"
-        pre-script = mkdir -p $RUNNING_DIR
-        [[[environment]]]
-            RUNNING_DIR = $RUNNING/$CYLC_TASK_NAME
-
-    [[Cold]]
-        inherit = Model
-        description = "Model cold start task"
-      [[[environment]]]
-            # Needs the corresponding model running dir
-            RUNNING_DIR = "$RUNNING/${CYLC_TASK_NAME#Cold}"
-
-    [[Warm]]
-        inherit = Model
-        description = "Model warm start task"
-
-    [[Post]]
-        description = "Post processing task"
-
-    [[prep]]
-        description = "Clean out the suite workspace for a new run"
-        script = """
-clean-workspace.sh $WORKSPACE
-STOP=$( cylc cycle-point --offset-hours=24 )
-cylc message "Setting stop cycle $STOP"
-cylc stop -f $CYLC_SUITE_REG_NAME $STOP
-# set a runahead limit big enough to cross the purge gap
-cylc set-runahead -f $CYLC_SUITE_REG_NAME 30"""
-
-    [[X]]
-        description = "Retrieve real time data for model A"
-        pre-script = """
-#sleep 10
-CYCLE2=$( cylc cycle-point --offset-hours=6 $CYLC_SUITE_INITIAL_CYCLE_TIME )
-if (( CYLC_TASK_CYCLE_TIME == CYCLE2 )); then
-    echo "This task ABORTS in the 2nd cycle!"
-    /bin/false
-fi"""
-
-    [[A,B,C]]
-        inherit = Warm
-
-    [[D,E,F]]
-        inherit = Post
-
-    [[ColdA,ColdB,ColdC]]
-        inherit = Cold
-
-    [[recover]]
-        script = """
-# insert ColdA:
-AT=$( cylc cycle-point --offset-hours=18 )
-cylc insert -f $CYLC_SUITE_REG_NAME ColdA ${AT}
-# purge tasks that cannot run:
-TO=$( cylc cycle-point --offset-hours=12 )
-cylc purge -f $CYLC_SUITE_REG_NAME X.$CYLC_TASK_CYCLE_TIME ${TO}
-# when the new A task is finished, trigger B and C manually
-# (they write out restart files to T+24)
-cylc suite-state $CYLC_SUITE_REG_NAME -t A -c $AT -S succeeded --interval=1 --max-polls=30
-cylc trigger $CYLC_SUITE_REG_NAME B $AT
-cylc trigger $CYLC_SUITE_REG_NAME C $AT
-echo DONE"""
-
-[visualization]
-    default node attributes = "style=filled"
-    [[node attributes]]
-        Model = "shape=septagon"
-        Post   = "style=", "shape=rectangle"
-        Cold   = "shape=egg", "fillcolor=slateblue"
-        X   = "shape=box", "fillcolor=lawngreen"
-        A      = "fillcolor=red"
-        B      = "fillcolor=magenta3"
-        C      = "fillcolor=orange"
-        D      = "style=bold", "color=green4"
-        E      = "style=bold", "color=blue"
-        F      = "style=bold", "color=red"
diff --git a/tests/registration/00-simple.t b/tests/registration/00-simple.t
old mode 100644
new mode 100755
diff --git a/tests/graph-equivalence/04-multiline_and2.t b/tests/registration/02-corrupted.t
old mode 100644
new mode 100755
similarity index 53%
copy from tests/graph-equivalence/04-multiline_and2.t
copy to tests/registration/02-corrupted.t
index d62a905..5f83d9b
--- a/tests/graph-equivalence/04-multiline_and2.t
+++ b/tests/registration/02-corrupted.t
@@ -14,28 +14,32 @@
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#-------------------------------------------------------------------------------
-# Test graph = """a => c
-#                 b => c"""
-# gives the same result as
-#      graph = "a & b => c"
-
+#------------------------------------------------------------------------------
+# Check a corrupted suite registration doesn't prevent access to other reg's.
 . $(dirname $0)/test_header
-#-------------------------------------------------------------------------------
+#------------------------------------------------------------------------------
 set_test_number 3
-#-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE multiline_and2
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-check-c
-cylc run $SUITE_NAME --hold
-cylc show $SUITE_NAME c.1 | sed -n "/prerequisites/,/outputs/p" > c-prereqs
-cmp_ok $TEST_SOURCE_DIR/multiline_and_refs/c-ref c-prereqs
-cylc shutdown $SUITE_NAME --now -f
-#-------------------------------------------------------------------------------
-purge_suite $SUITE_NAME
+#------------------------------------------------------------------------------
+mkdir $TEST_DIR/REGDB
+mkdir $TEST_DIR/suite
+cat > $TEST_DIR/suite/suite.rc <<__END__
+[scheduling]
+   [[dependencies]]
+       graph = foo
+__END__
+# Register some suites.
+cylc reg --db=$TEST_DIR/REGDB my.suite.1 $TEST_DIR/suite
+cylc reg --db=$TEST_DIR/REGDB my.suite.2 $TEST_DIR/suite
+cylc reg --db=$TEST_DIR/REGDB my.suite.3 $TEST_DIR/suite
+# Make a corrupted registration file.
+touch $TEST_DIR/REGDB/junk
+
+#------------------------------------------------------------------------------
+TEST_NAME=${TEST_NAME_BASE}-print
+run_ok $TEST_NAME cylc db print --db=$TEST_DIR/REGDB
+cmp_ok <(sort $TEST_NAME.stdout) - << __OUT__
+my.suite.1 | No title provided | $TEST_DIR/suite
+my.suite.2 | No title provided | $TEST_DIR/suite
+my.suite.3 | No title provided | $TEST_DIR/suite
+__OUT__
+grep_ok "ERROR, junk suite registration corrupted?" $TEST_NAME.stderr
diff --git a/tests/reload/11-garbage.t b/tests/reload/11-garbage.t
index 7d24d49..d236266 100644
--- a/tests/reload/11-garbage.t
+++ b/tests/reload/11-garbage.t
@@ -29,7 +29,7 @@ TEST_NAME=$TEST_NAME_BASE-run
 suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-check-fail
-run_ok $TEST_NAME grep 'Command failed: reload suite' \
+run_ok $TEST_NAME grep 'Command failed: reload_suite' \
 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/log/suite/log
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/remote/00-basic.t b/tests/remote/00-basic.t
index 394d435..4355d82 100644
--- a/tests/remote/00-basic.t
+++ b/tests/remote/00-basic.t
@@ -32,13 +32,13 @@ TEST_NAME=$TEST_NAME_BASE-userathost
 SUITE_RUN_DIR=$(cylc get-global-config --print-run-dir)/$SUITE_NAME
 echo $CYLC_TEST_TASK_OWNER@$CYLC_TEST_TASK_HOST > userathost
 cmp_ok userathost - <<__OUT__
-$(sqlite3 $SUITE_RUN_DIR/cylc-suite.db "select host from task_states where name='foo'")
+$(sqlite3 $SUITE_RUN_DIR/cylc-suite.db "select host from task_states where name=='foo'")
 __OUT__
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-hostonly
 echo $CYLC_TEST_TASK_HOST > hostonly
 cmp_ok hostonly - <<__OUT__
-$(sqlite3 $SUITE_RUN_DIR/cylc-suite.db "select host from task_states where name='bar'")
+$(sqlite3 $SUITE_RUN_DIR/cylc-suite.db "select host from task_states where name=='bar'")
 __OUT__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/restart/01-broadcast.t b/tests/restart/01-broadcast.t
index ca369f4..b04bf17 100644
--- a/tests/restart/01-broadcast.t
+++ b/tests/restart/01-broadcast.t
@@ -20,18 +20,10 @@ if [[ -z ${TEST_DIR:-} ]]; then
     . $(dirname $0)/test_header
 fi
 #-------------------------------------------------------------------------------
-set_test_number 14
+set_test_number 10
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE broadcast
-TEST_SUITE_RUN_OPTIONS=
-SUITE_TIMEOUT=240
-if [[ -n ${CYLC_LL_TEST_TASK_HOST:-} && ${CYLC_LL_TEST_TASK_HOST:-} != 'None' ]]; then
-    ssh $CYLC_LL_TEST_TASK_HOST mkdir -p .cylc/$SUITE_NAME/
-    scp $TEST_DIR/$SUITE_NAME/passphrase $CYLC_LL_TEST_TASK_HOST:.cylc/$SUITE_NAME/passphrase
-    export CYLC_LL_TEST_SITE_DIRECTIVES CYLC_LL_TEST_TASK_HOST
-    TEST_SUITE_RUN_OPTIONS="--set=USE_LOADLEVELER=true"
-    SUITE_TIMEOUT=900
-fi
+cp "$TEST_SOURCE_DIR/lib/suite-runtime-restart.rc" "$TEST_DIR/$SUITE_NAME/"
 export TEST_DIR
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
@@ -39,34 +31,22 @@ run_ok $TEST_NAME cylc validate $SUITE_NAME
 cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --debug $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
-# Sleep until penultimate task (the suite stops and starts, so port files alone
-# won't help)
-TEST_NAME=$TEST_NAME_BASE-monitor
-START_TIME=$(date +%s)
-export START_TIME SUITE_NAME SUITE_TIMEOUT
-run_ok $TEST_NAME bash <<'__SCRIPT__'
-while [[ -e $HOME/.cylc/ports/$SUITE_NAME || ! -e $TEST_DIR/suite-stopping ]]; do
-    if [[ $(date +%s) > $(( START_TIME + SUITE_TIMEOUT )) ]]; then
-        echo "[ERROR] Suite Timeout - shutting down..." >&2
-        cylc shutdown --now --kill $SUITE_NAME &
-        exit 1
-    fi
-    sleep 1
-done
-__SCRIPT__
-cmp_ok "$TEST_NAME.stderr" </dev/null
+suite_run_ok $TEST_NAME cylc run --no-detach $SUITE_NAME
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-restart-run
+suite_run_ok $TEST_NAME cylc restart --no-detach $SUITE_NAME
+#-------------------------------------------------------------------------------
 state_dir=$(cylc get-global-config --print-run-dir)/$SUITE_NAME/state/
 cp $state_dir/state $TEST_DIR/
-for state_file in $(ls $TEST_DIR/state*); do
+for state_file in $(ls $TEST_DIR/*state*); do
     sed -i "/^time : /d" $state_file
 done
-cmp_ok $TEST_DIR/state-pre-restart-2013092300 <<'__STATE__'
+cmp_ok $TEST_DIR/pre-restart-state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
-S'2013092300'
+S'20130923T0000Z'
 p2
 (dp3
 S'broadcast_task'
@@ -75,102 +55,44 @@ p4
 S'environment'
 p6
 (dp7
-S'MY_TIME'
+S'MY_VALUE'
 p8
-S'2013092300'
+S'something'
 p9
 ssss.
 Begin task states
-broadcast_task.2013092300 : status=waiting, spawned=false
-force_restart.2013092300 : status=running, spawned=true
-force_restart.2013092306 : status=waiting, spawned=false
-output_states.2013092300 : status=waiting, spawned=false
-send_a_broadcast_task.2013092300 : status=succeeded, spawned=true
-send_a_broadcast_task.2013092306 : status=waiting, spawned=false
-tidy.2013092300 : status=waiting, spawned=false
+broadcast_task.20130923T0000Z : status=waiting, spawned=false
+finish.20130923T0000Z : status=waiting, spawned=false
+output_states.20130923T0000Z : status=waiting, spawned=false
+send_a_broadcast_task.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-grep_ok "broadcast_task|2013092300|0|1|waiting" $TEST_DIR/states-db-pre-restart-2013092300
-grep_ok "send_a_broadcast_task|2013092300|1|1|succeeded" $TEST_DIR/states-db-pre-restart-2013092300
-contains_ok $TEST_DIR/states-db-post-restart-2013092300 <<'__DB_DUMP__'
-broadcast_task|2013092300|0|1|waiting
-force_restart|2013092300|1|1|succeeded
-output_states|2013092300|1|1|running
-send_a_broadcast_task|2013092300|1|1|succeeded
-tidy|2013092300|0|1|waiting
+grep_ok "broadcast_task|20130923T0000Z|0|1|waiting" \
+    $TEST_DIR/pre-restart-db
+grep_ok "send_a_broadcast_task|20130923T0000Z|1|1|succeeded" \
+    $TEST_DIR/pre-restart-db
+contains_ok $TEST_DIR/post-restart-db <<'__DB_DUMP__'
+broadcast_task|20130923T0000Z|0|1|waiting
+finish|20130923T0000Z|0|1|waiting
+send_a_broadcast_task|20130923T0000Z|1|1|succeeded
+shutdown|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
-cmp_ok $TEST_DIR/state-pre-restart-2013092306 <<'__STATE__'
-run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
-(dp1
-S'2013092300'
-p2
-(dp3
-S'broadcast_task'
-p4
-(dp5
-S'environment'
-p6
-(dp7
-S'MY_TIME'
-p8
-S'2013092300'
-p9
-ssssS'2013092306'
-p10
-(dp11
-S'broadcast_task'
-p12
-(dp13
-S'environment'
-p14
-(dp15
-S'MY_TIME'
-p16
-S'2013092306'
-p17
-ssss.
-Begin task states
-broadcast_task.2013092306 : status=waiting, spawned=false
-force_restart.2013092306 : status=running, spawned=true
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092306 : status=waiting, spawned=false
-send_a_broadcast_task.2013092306 : status=succeeded, spawned=true
-send_a_broadcast_task.2013092312 : status=held, spawned=false
-tidy.2013092300 : status=succeeded, spawned=true
-tidy.2013092306 : status=waiting, spawned=false
-__STATE__
-contains_ok $TEST_DIR/states-db-pre-restart-2013092306 <<'__DB_DUMP__'
-broadcast_task|2013092300|1|1|succeeded
-broadcast_task|2013092306|0|1|waiting
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|running
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|0|1|waiting
-send_a_broadcast_task|2013092300|1|1|succeeded
-send_a_broadcast_task|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
-__DB_DUMP__
-
-contains_ok $TEST_DIR/states-db-post-restart-2013092306 <<'__DB_DUMP__'
-broadcast_task|2013092300|1|1|succeeded
-broadcast_task|2013092306|0|1|waiting
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|running
-send_a_broadcast_task|2013092300|1|1|succeeded
-send_a_broadcast_task|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
+sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
+ "select name, cycle, submit_num, try_num, status
+  from task_states
+  order by name, cycle;" > $TEST_DIR/db
+contains_ok $TEST_DIR/db <<'__DB_DUMP__'
+broadcast_task|20130923T0000Z|1|1|succeeded
+finish|20130923T0000Z|1|1|succeeded
+output_states|20130923T0000Z|1|1|succeeded
+send_a_broadcast_task|20130923T0000Z|1|1|succeeded
+shutdown|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
 cmp_ok $TEST_DIR/state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
-S'2013092306'
+S'20130923T0000Z'
 p2
 (dp3
 S'broadcast_task'
@@ -179,37 +101,17 @@ p4
 S'environment'
 p6
 (dp7
-S'MY_TIME'
+S'MY_VALUE'
 p8
-S'2013092306'
+S'something'
 p9
 ssss.
 Begin task states
-broadcast_task.2013092312 : status=held, spawned=false
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092312 : status=held, spawned=false
-send_a_broadcast_task.2013092312 : status=held, spawned=false
-tidy.2013092306 : status=succeeded, spawned=true
-tidy.2013092312 : status=held, spawned=false
+broadcast_task.20130923T0000Z : status=succeeded, spawned=true
+finish.20130923T0000Z : status=succeeded, spawned=true
+output_states.20130923T0000Z : status=succeeded, spawned=true
+send_a_broadcast_task.20130923T0000Z : status=succeeded, spawned=true
+shutdown.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
- "select name, cycle, submit_num, try_num, status
-  from task_states
-  order by name, cycle;" > $TEST_DIR/states-db
-contains_ok $TEST_DIR/states-db <<'__DB_DUMP__'
-broadcast_task|2013092300|1|1|succeeded
-broadcast_task|2013092306|1|1|succeeded
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|succeeded
-send_a_broadcast_task|2013092300|1|1|succeeded
-send_a_broadcast_task|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|1|1|succeeded
-__DB_DUMP__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
-if [[ -n ${CYLC_LL_TEST_TASK_HOST:-} && ${CYLC_LL_TEST_TASK_HOST:-} != 'None' && -n $SUITE_NAME ]]; then
-    ssh $CYLC_LL_TEST_TASK_HOST rm -rf .cylc/$SUITE_NAME
-fi
diff --git a/tests/restart/02-failed.t b/tests/restart/02-failed.t
index 6476fdf..a5210ba 100644
--- a/tests/restart/02-failed.t
+++ b/tests/restart/02-failed.t
@@ -20,131 +20,65 @@ if [[ -z ${TEST_DIR:-} ]]; then
     . $(dirname $0)/test_header
 fi
 #-------------------------------------------------------------------------------
-set_test_number 12
+set_test_number 9
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE failed
-TEST_SUITE_RUN_OPTIONS=
-SUITE_TIMEOUT=240
-if [[ -n ${CYLC_LL_TEST_TASK_HOST:-} && ${CYLC_LL_TEST_TASK_HOST:-} != 'None' ]]; then
-    ssh $CYLC_LL_TEST_TASK_HOST mkdir -p .cylc/$SUITE_NAME/
-    scp $TEST_DIR/$SUITE_NAME/passphrase $CYLC_LL_TEST_TASK_HOST:.cylc/$SUITE_NAME/passphrase
-    export CYLC_LL_TEST_SITE_DIRECTIVES CYLC_LL_TEST_TASK_HOST
-    TEST_SUITE_RUN_OPTIONS="--set=USE_LOADLEVELER=true"
-    SUITE_TIMEOUT=900
-fi
+cp "$TEST_SOURCE_DIR/lib/suite-runtime-restart.rc" "$TEST_DIR/$SUITE_NAME/"
 export TEST_DIR
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_ok $TEST_NAME cylc validate $SUITE_NAME
+cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --debug $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
-# Sleep until penultimate task (the suite stops and starts, so port files alone
-# won't help)
-TEST_NAME=$TEST_NAME_BASE-monitor
-START_TIME=$(date +%s)
-export START_TIME SUITE_NAME SUITE_TIMEOUT
-run_ok $TEST_NAME bash <<'__SCRIPT__'
-while [[ -e $HOME/.cylc/ports/$SUITE_NAME || ! -e $TEST_DIR/suite-stopping ]]; do
-    if [[ $(date +%s) > $(( START_TIME + SUITE_TIMEOUT )) ]]; then
-        echo "[ERROR] Suite Timeout - shutting down..." >&2
-        cylc shutdown --now --kill $SUITE_NAME &
-        exit 1
-    fi
-    sleep 1
-done
-__SCRIPT__
-cmp_ok "$TEST_NAME.stderr" </dev/null
+suite_run_ok $TEST_NAME cylc run --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-restart-run
+suite_run_ok $TEST_NAME cylc restart --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
 state_dir=$(cylc get-global-config --print-run-dir)/$SUITE_NAME/state/
 cp $state_dir/state $TEST_DIR/
-for state_file in $(ls $TEST_DIR/state*); do
+for state_file in $(ls $TEST_DIR/*state*); do
     sed -i "/^time : /d" $state_file
 done
-cmp_ok $TEST_DIR/state-pre-restart-2013092300 <<'__STATE__'
-run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
-(dp1
-.
-Begin task states
-failed_task.2013092300 : status=failed, spawned=true
-failed_task.2013092306 : status=waiting, spawned=false
-force_restart.2013092300 : status=running, spawned=true
-force_restart.2013092306 : status=waiting, spawned=false
-output_states.2013092300 : status=waiting, spawned=false
-tidy.2013092300 : status=waiting, spawned=false
-__STATE__
-grep_ok "failed_task|2013092300|1|1|failed" $TEST_DIR/states-db-pre-restart-2013092300
-contains_ok $TEST_DIR/states-db-post-restart-2013092300 <<'__DB_DUMP__'
-failed_task|2013092300|1|1|failed
-force_restart|2013092300|1|1|succeeded
-output_states|2013092300|1|1|running
-tidy|2013092300|0|1|waiting
-__DB_DUMP__
-cmp_ok $TEST_DIR/state-pre-restart-2013092306 <<'__STATE__'
+cmp_ok $TEST_DIR/pre-restart-state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-failed_task.2013092306 : status=failed, spawned=true
-failed_task.2013092312 : status=held, spawned=false
-force_restart.2013092306 : status=running, spawned=true
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092306 : status=waiting, spawned=false
-tidy.2013092300 : status=succeeded, spawned=true
-tidy.2013092306 : status=waiting, spawned=false
+failed_task.20130923T0000Z : status=failed, spawned=true
+finish.20130923T0000Z : status=waiting, spawned=false
+output_states.20130923T0000Z : status=waiting, spawned=false
 __STATE__
-contains_ok $TEST_DIR/states-db-pre-restart-2013092306 <<'__DB_DUMP__'
-failed_task|2013092300|1|1|failed
-failed_task|2013092306|1|1|failed
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|running
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|0|1|waiting
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
+grep_ok "failed_task|20130923T0000Z|1|1|failed" \
+    $TEST_DIR/pre-restart-db
+contains_ok $TEST_DIR/post-restart-db <<'__DB_DUMP__'
+failed_task|20130923T0000Z|1|1|failed
+finish|20130923T0000Z|0|1|waiting
+shutdown|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
-contains_ok $TEST_DIR/states-db-post-restart-2013092306 <<'__DB_DUMP__'
-failed_task|2013092300|1|1|failed
-failed_task|2013092306|1|1|failed
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|running
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
+sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
+ "select name, cycle, submit_num, try_num, status
+  from task_states
+  order by name, cycle;" > $TEST_DIR/db
+contains_ok $TEST_DIR/db <<'__DB_DUMP__'
+failed_task|20130923T0000Z|1|1|failed
+finish|20130923T0000Z|1|1|succeeded
+output_states|20130923T0000Z|1|1|succeeded
+shutdown|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
 cmp_ok $TEST_DIR/state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-failed_task.2013092312 : status=held, spawned=false
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092312 : status=held, spawned=false
-tidy.2013092306 : status=succeeded, spawned=true
-tidy.2013092312 : status=held, spawned=false
+finish.20130923T0000Z : status=succeeded, spawned=true
+output_states.20130923T0000Z : status=succeeded, spawned=true
+shutdown.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
- "select name, cycle, submit_num, try_num, status
-  from task_states
-  order by name, cycle;" > $TEST_DIR/states-db
-contains_ok $TEST_DIR/states-db <<'__DB_DUMP__'
-failed_task|2013092300|1|1|failed
-failed_task|2013092306|1|1|failed
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|1|1|succeeded
-__DB_DUMP__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
-if [[ -n ${CYLC_LL_TEST_TASK_HOST:-} && ${CYLC_LL_TEST_TASK_HOST:-} != 'None' && -n $SUITE_NAME ]]; then
-    ssh $CYLC_LL_TEST_TASK_HOST rm -rf .cylc/$SUITE_NAME
-fi
diff --git a/tests/restart/03-retrying.t b/tests/restart/03-retrying.t
index 745ef4f..86838a5 100644
--- a/tests/restart/03-retrying.t
+++ b/tests/restart/03-retrying.t
@@ -15,150 +15,71 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test restarting a simple suite with a retrying task
+# Test restarting a simple suite with a task in a retrying state.
 if [[ -z ${TEST_DIR:-} ]]; then
     . $(dirname $0)/test_header
 fi
 #-------------------------------------------------------------------------------
-set_test_number 14
+set_test_number 9
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE retrying
-TEST_SUITE_RUN_OPTIONS=
-SUITE_TIMEOUT=240
-if [[ -n ${CYLC_TEST_BATCH_TASK_HOST:-} && ${CYLC_TEST_BATCH_TASK_HOST:-} != 'None' ]]
-then
-    ssh ${SSH_OPTS} -n "${CYLC_TEST_BATCH_TASK_HOST}" \
-        "mkdir -p '.cylc/${SUITE_NAME}/'"
-    scp ${SSH_OPTS} "${TEST_DIR}/${SUITE_NAME}/passphrase" \
-        "${CYLC_TEST_BATCH_TASK_HOST}:.cylc/${SUITE_NAME}/passphrase"
-    export CYLC_TEST_BATCH_SITE_DIRECTIVES CYLC_TEST_BATCH_TASK_HOST
-    TEST_SUITE_RUN_OPTIONS="--set=BATCH_SYS_NAME=$BATCH_SYS_NAME"
-    SUITE_TIMEOUT=900
-fi
+cp "$TEST_SOURCE_DIR/lib/suite-runtime-restart.rc" "$TEST_DIR/$SUITE_NAME/"
 export TEST_DIR
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
+run_ok $TEST_NAME cylc validate $SUITE_NAME
 cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --debug $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
-# Sleep until penultimate task (the suite stops and starts, so port files alone
-# won't help)
-TEST_NAME=$TEST_NAME_BASE-monitor
-START_TIME=$(date +%s)
-export START_TIME SUITE_NAME SUITE_TIMEOUT
-run_ok $TEST_NAME bash <<'__SCRIPT__'
-while [[ -e $HOME/.cylc/ports/$SUITE_NAME || ! -e $TEST_DIR/suite-stopping ]]; do
-    if [[ $(date +%s) > $(( START_TIME + SUITE_TIMEOUT )) ]]; then
-        echo "[ERROR] Suite Timeout - shutting down..." >&2
-        cylc shutdown --now --kill $SUITE_NAME &
-        exit 1
-    fi
-    sleep 1
-done
-__SCRIPT__
-cmp_ok "$TEST_NAME.stderr" </dev/null
+suite_run_ok $TEST_NAME cylc run --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-restart-run
+suite_run_ok $TEST_NAME cylc restart --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
 state_dir=$(cylc get-global-config --print-run-dir)/$SUITE_NAME/state/
 cp $state_dir/state $TEST_DIR/
-for state_file in $(ls $TEST_DIR/state*); do
+for state_file in $(ls $TEST_DIR/*state*); do
     sed -i "/^time : /d" $state_file
 done
-cmp_ok $TEST_DIR/state-pre-restart-2013092300 <<'__STATE__'
-run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
-(dp1
-.
-Begin task states
-force_restart.2013092300 : status=running, spawned=true
-force_restart.2013092306 : status=waiting, spawned=false
-output_states.2013092300 : status=waiting, spawned=false
-retrying_task.2013092300 : status=retrying, spawned=true
-retrying_task.2013092306 : status=waiting, spawned=false
-tidy.2013092300 : status=waiting, spawned=false
-__STATE__
-grep_ok "retrying_task|2013092300|1|2|retrying" $TEST_DIR/states-db-pre-restart-2013092300
-contains_ok $TEST_DIR/states-db-post-restart-2013092300 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-output_states|2013092300|1|1|running
-retrying_task|2013092300|2|2|retrying
-tidy|2013092300|0|1|waiting
-__DB_DUMP__
-contains_ok $TEST_DIR/states-db-tidy-2013092300 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-retrying_task|2013092300|4|3|succeeded
-tidy|2013092300|1|1|running
-__DB_DUMP__
-cmp_ok $TEST_DIR/state-pre-restart-2013092306 <<'__STATE__'
+cmp_ok $TEST_DIR/pre-restart-state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092306 : status=running, spawned=true
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092306 : status=waiting, spawned=false
-retrying_task.2013092306 : status=retrying, spawned=true
-retrying_task.2013092312 : status=held, spawned=false
-tidy.2013092300 : status=succeeded, spawned=true
-tidy.2013092306 : status=waiting, spawned=false
+finish.20130923T0000Z : status=waiting, spawned=false
+output_states.20130923T0000Z : status=waiting, spawned=false
+retrying_task.20130923T0000Z : status=retrying, spawned=true
 __STATE__
-contains_ok $TEST_DIR/states-db-pre-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|running
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|0|1|waiting
-retrying_task|2013092300|4|3|succeeded
-retrying_task|2013092306|1|2|retrying
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
+grep_ok "retrying_task|20130923T0000Z|1|2|retrying" \
+    $TEST_DIR/pre-restart-db
+contains_ok $TEST_DIR/post-restart-db <<'__DB_DUMP__'
+finish|20130923T0000Z|0|1|waiting
+retrying_task|20130923T0000Z|2|2|retrying
+shutdown|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
-contains_ok $TEST_DIR/states-db-post-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|running
-retrying_task|2013092300|4|3|succeeded
-retrying_task|2013092306|2|2|retrying
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
+sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
+ "select name, cycle, submit_num, try_num, status
+  from task_states
+  order by name, cycle;" > $TEST_DIR/db
+contains_ok $TEST_DIR/db <<'__DB_DUMP__'
+finish|20130923T0000Z|1|1|succeeded
+output_states|20130923T0000Z|1|1|succeeded
+retrying_task|20130923T0000Z|4|3|succeeded
+shutdown|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
 cmp_ok $TEST_DIR/state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092312 : status=held, spawned=false
-retrying_task.2013092312 : status=held, spawned=false
-tidy.2013092306 : status=succeeded, spawned=true
-tidy.2013092312 : status=held, spawned=false
+finish.20130923T0000Z : status=succeeded, spawned=true
+output_states.20130923T0000Z : status=succeeded, spawned=true
+retrying_task.20130923T0000Z : status=succeeded, spawned=true
+shutdown.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
- "select name, cycle, submit_num, try_num, status
-  from task_states
-  order by name, cycle;" > $TEST_DIR/states-db
-contains_ok $TEST_DIR/states-db <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|succeeded
-retrying_task|2013092300|4|3|succeeded
-retrying_task|2013092306|4|3|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|1|1|succeeded
-__DB_DUMP__
 #-------------------------------------------------------------------------------
-purge_suite "${SUITE_NAME}"
-if [[ -n "${CYLC_TEST_BATCH_TASK_HOST:-}" && \
-    "${CYLC_TEST_BATCH_TASK_HOST:-}" != 'None' ]]
-then
-    ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" \
-        "rm -fr .cylc/${SUITE_NAME} cylc-run/${SUITE_NAME}"
-fi
-exit
+purge_suite "$SUITE_NAME"
diff --git a/tests/restart/04-running.t b/tests/restart/04-running.t
index e17015f..b187a43 100644
--- a/tests/restart/04-running.t
+++ b/tests/restart/04-running.t
@@ -15,144 +15,72 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test restarting a simple suite with a running task
+# Test restarting a simple suite with a task still running (orphaned)
 if [[ -z ${TEST_DIR:-} ]]; then
     . $(dirname $0)/test_header
 fi
 #-------------------------------------------------------------------------------
-set_test_number 13
+set_test_number 9
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE running
-TEST_SUITE_RUN_OPTIONS=
-SUITE_TIMEOUT=240
-if [[ -n ${CYLC_TEST_BATCH_TASK_HOST:-} && ${CYLC_TEST_BATCH_TASK_HOST:-} != 'None' ]]
-then
-    ssh ${SSH_OPTS} -n "${CYLC_TEST_BATCH_TASK_HOST}" \
-        "mkdir -p '.cylc/${SUITE_NAME}/'"
-    scp ${SSH_OPTS} "${TEST_DIR}/${SUITE_NAME}/passphrase" \
-        "${CYLC_TEST_BATCH_TASK_HOST}:.cylc/${SUITE_NAME}/passphrase"
-    export CYLC_TEST_BATCH_SITE_DIRECTIVES CYLC_TEST_BATCH_TASK_HOST
-    TEST_SUITE_RUN_OPTIONS="--set=BATCH_SYS_NAME=$BATCH_SYS_NAME"
-    SUITE_TIMEOUT=900
-fi
+cp "$TEST_SOURCE_DIR/lib/suite-runtime-restart.rc" "$TEST_DIR/$SUITE_NAME/"
 export TEST_DIR
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
+run_ok $TEST_NAME cylc validate $SUITE_NAME
 cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --debug $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
-# Sleep until penultimate task (the suite stops and starts, so port files alone
-# won't help)
-TEST_NAME=$TEST_NAME_BASE-monitor
-START_TIME=$(date +%s)
-export START_TIME SUITE_NAME SUITE_TIMEOUT
-run_ok $TEST_NAME bash <<'__SCRIPT__'
-while [[ -e $HOME/.cylc/ports/$SUITE_NAME || ! -e $TEST_DIR/suite-stopping ]]; do
-    if [[ $(date +%s) > $(( START_TIME + SUITE_TIMEOUT )) ]]; then
-        echo "[ERROR] Suite Timeout - shutting down..." >&2
-        cylc shutdown --now --kill $SUITE_NAME &
-        exit 1
-    fi
-    sleep 1
-done
-__SCRIPT__
-cmp_ok "$TEST_NAME.stderr" </dev/null
+suite_run_ok $TEST_NAME cylc run --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+sleep 10
+TEST_NAME=$TEST_NAME_BASE-restart-run
+suite_run_ok $TEST_NAME cylc restart --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
 state_dir=$(cylc get-global-config --print-run-dir)/$SUITE_NAME/state/
 cp $state_dir/state $TEST_DIR/
-for state_file in $(ls $TEST_DIR/state*); do
+for state_file in $(ls $TEST_DIR/*state*); do
     sed -i "/^time : /d" $state_file
 done
-cmp_ok $TEST_DIR/state-pre-restart-2013092300 <<'__STATE__'
+cmp_ok $TEST_DIR/pre-restart-state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092300 : status=running, spawned=true
-force_restart.2013092306 : status=waiting, spawned=false
-output_states.2013092300 : status=waiting, spawned=false
-running_task.2013092300 : status=running, spawned=true
-running_task.2013092306 : status=waiting, spawned=false
-tidy.2013092300 : status=waiting, spawned=false
+finish.20130923T0000Z : status=waiting, spawned=false
+output_states.20130923T0000Z : status=waiting, spawned=false
+running_task.20130923T0000Z : status=running, spawned=true
 __STATE__
-grep_ok "running_task|2013092300|1|1|running" $TEST_DIR/states-db-pre-restart-2013092300
-contains_ok $TEST_DIR/states-db-post-restart-2013092300 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-running_task|2013092300|1|1|running
-tidy|2013092300|0|1|waiting
+grep_ok "running_task|20130923T0000Z|1|1|running" \
+    $TEST_DIR/pre-restart-db
+contains_ok $TEST_DIR/post-restart-db <<'__DB_DUMP__'
+finish|20130923T0000Z|0|1|waiting
+running_task|20130923T0000Z|1|1|running
+shutdown|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
-cmp_ok $TEST_DIR/state-pre-restart-2013092306 <<'__STATE__'
-run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
-(dp1
-.
-Begin task states
-force_restart.2013092306 : status=running, spawned=true
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092306 : status=waiting, spawned=false
-running_task.2013092306 : status=running, spawned=true
-running_task.2013092312 : status=held, spawned=false
-tidy.2013092300 : status=succeeded, spawned=true
-tidy.2013092306 : status=waiting, spawned=false
-__STATE__
-contains_ok $TEST_DIR/states-db-pre-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|running
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|0|1|waiting
-running_task|2013092300|1|1|succeeded
-running_task|2013092306|1|1|running
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
-__DB_DUMP__
-
-contains_ok $TEST_DIR/states-db-post-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|2|1|running
-running_task|2013092300|1|1|succeeded
-running_task|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
+sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
+ "select name, cycle, submit_num, try_num, status
+  from task_states
+  order by name, cycle;" > $TEST_DIR/db
+contains_ok $TEST_DIR/db <<'__DB_DUMP__'
+finish|20130923T0000Z|1|1|succeeded
+output_states|20130923T0000Z|1|1|succeeded
+running_task|20130923T0000Z|1|1|succeeded
+shutdown|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
 cmp_ok $TEST_DIR/state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092312 : status=held, spawned=false
-running_task.2013092312 : status=held, spawned=false
-tidy.2013092306 : status=succeeded, spawned=true
-tidy.2013092312 : status=held, spawned=false
+finish.20130923T0000Z : status=succeeded, spawned=true
+output_states.20130923T0000Z : status=succeeded, spawned=true
+running_task.20130923T0000Z : status=succeeded, spawned=true
+shutdown.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
- "select name, cycle, submit_num, try_num, status
-  from task_states
-  order by name, cycle;" > $TEST_DIR/states-db
-contains_ok $TEST_DIR/states-db <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|2|1|succeeded
-running_task|2013092300|1|1|succeeded
-running_task|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|1|1|succeeded
-__DB_DUMP__
 #-------------------------------------------------------------------------------
-purge_suite "${SUITE_NAME}"
-if [[ -n "${CYLC_TEST_BATCH_TASK_HOST:-}" && \
-    "${CYLC_TEST_BATCH_TASK_HOST:-}" != 'None' ]]
-then
-    ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" \
-        "rm -fr .cylc/${SUITE_NAME} cylc-run/${SUITE_NAME}"
-fi
-exit
+purge_suite "$SUITE_NAME"
diff --git a/tests/restart/05-submit-failed.t b/tests/restart/05-submit-failed.t
index b32c441..86f3f1c 100644
--- a/tests/restart/05-submit-failed.t
+++ b/tests/restart/05-submit-failed.t
@@ -20,131 +20,71 @@ if [[ -z ${TEST_DIR:-} ]]; then
     . $(dirname $0)/test_header
 fi
 #-------------------------------------------------------------------------------
-set_test_number 13
+set_test_number 9
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE submit-failed
-TEST_SUITE_RUN_OPTIONS=
-SUITE_TIMEOUT=240
-if [[ -n ${CYLC_TEST_BATCH_TASK_HOST:-} && ${CYLC_TEST_BATCH_TASK_HOST:-} != 'None' ]]; then
-    ssh $CYLC_TEST_BATCH_TASK_HOST mkdir -p .cylc/$SUITE_NAME/
-    scp $TEST_DIR/$SUITE_NAME/passphrase $CYLC_TEST_BATCH_TASK_HOST:.cylc/$SUITE_NAME/passphrase
-    export CYLC_TEST_BATCH_SITE_DIRECTIVES CYLC_TEST_BATCH_TASK_HOST
-    TEST_SUITE_RUN_OPTIONS="--set=BATCH_SYS_NAME=$BATCH_SYS_NAME"
-    SUITE_TIMEOUT=900
-fi
+cp "$TEST_SOURCE_DIR/lib/suite-runtime-restart.rc" "$TEST_DIR/$SUITE_NAME/"
 export TEST_DIR
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
+run_ok $TEST_NAME cylc validate $SUITE_NAME
 cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --debug -v $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
-# Sleep until penultimate task (the suite stops and starts, so port files alone
-# won't help)
-TEST_NAME=$TEST_NAME_BASE-monitor
-START_TIME=$(date +%s)
-export START_TIME SUITE_NAME SUITE_TIMEOUT
-run_ok $TEST_NAME bash <<'__SCRIPT__'
-while [[ -e $HOME/.cylc/ports/$SUITE_NAME || ! -e $TEST_DIR/suite-stopping ]]; do
-    if [[ $(date +%s) > $(( START_TIME + SUITE_TIMEOUT )) ]]; then
-        echo "[ERROR] Suite Timeout - shutting down..." >&2
-        cylc shutdown --now --kill $SUITE_NAME &
-        exit 1
-    fi
-    sleep 1
-done
-__SCRIPT__
-cmp_ok "$TEST_NAME.stderr" </dev/null
+suite_run_ok $TEST_NAME cylc run --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-run
+suite_run_ok $TEST_NAME cylc restart --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
 state_dir=$(cylc get-global-config --print-run-dir)/$SUITE_NAME/state/
 cp $state_dir/state $TEST_DIR/
-for state_file in $(ls $TEST_DIR/state*); do
+for state_file in $(ls $TEST_DIR/*state*); do
     sed -i "/^time : /d" $state_file
 done
-cmp_ok $TEST_DIR/state-pre-restart-2013092300 <<'__STATE__'
+cmp_ok $TEST_DIR/pre-restart-state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092300 : status=running, spawned=true
-force_restart.2013092306 : status=waiting, spawned=false
-output_states.2013092300 : status=waiting, spawned=false
-submit_fail_task.2013092300 : status=submit-failed, spawned=false
-tidy.2013092300 : status=waiting, spawned=false
+finish.20130923T0000Z : status=waiting, spawned=false
+output_states.20130923T0000Z : status=waiting, spawned=false
+submit_failed_task.20130923T0000Z : status=submit-failed, spawned=false
 __STATE__
-grep_ok "submit_fail_task|2013092300|1|1|submit-failed" $TEST_DIR/states-db-pre-restart-2013092300
-contains_ok $TEST_DIR/states-db-post-restart-2013092300 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-output_states|2013092300|1|1|running
-submit_fail_task|2013092300|1|1|submit-failed
-tidy|2013092300|0|1|waiting
+grep_ok "submit_failed_task|20130923T0000Z|1|1|submit-failed" \
+    $TEST_DIR/pre-restart-db
+contains_ok $TEST_DIR/post-restart-db <<'__DB_DUMP__'
+finish|20130923T0000Z|0|1|waiting
+shutdown|20130923T0000Z|1|1|succeeded
+submit_failed_task|20130923T0000Z|1|1|submit-failed
 __DB_DUMP__
-cmp_ok $TEST_DIR/state-pre-restart-2013092306 <<'__STATE__'
-run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
-(dp1
-.
-Begin task states
-force_restart.2013092306 : status=running, spawned=true
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092306 : status=waiting, spawned=false
-submit_fail_task.2013092306 : status=submit-failed, spawned=false
-tidy.2013092300 : status=succeeded, spawned=true
-tidy.2013092306 : status=waiting, spawned=false
-__STATE__
-contains_ok $TEST_DIR/states-db-pre-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|running
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|0|1|waiting
-submit_fail_task|2013092300|1|1|submit-failed
-submit_fail_task|2013092306|1|1|submit-failed
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
-__DB_DUMP__
-
-contains_ok $TEST_DIR/states-db-post-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|running
-submit_fail_task|2013092300|1|1|submit-failed
-submit_fail_task|2013092306|1|1|submit-failed
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
+sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
+ "select name, cycle, submit_num, try_num, status
+  from task_states
+  order by name, cycle;" > $TEST_DIR/db
+contains_ok $TEST_DIR/db <<'__DB_DUMP__'
+finish|20130923T0000Z|1|1|succeeded
+output_states|20130923T0000Z|1|1|succeeded
+shutdown|20130923T0000Z|1|1|succeeded
+submit_failed_task|20130923T0000Z|1|1|submit-failed
 __DB_DUMP__
 cmp_ok $TEST_DIR/state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092312 : status=held, spawned=false
-submit_fail_task.2013092312 : status=held, spawned=false
-tidy.2013092306 : status=succeeded, spawned=true
-tidy.2013092312 : status=held, spawned=false
+finish.20130923T0000Z : status=succeeded, spawned=true
+output_states.20130923T0000Z : status=succeeded, spawned=true
+shutdown.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
- "select name, cycle, submit_num, try_num, status
-  from task_states
-  order by name, cycle;" > $TEST_DIR/states-db
-contains_ok $TEST_DIR/states-db <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|succeeded
-submit_fail_task|2013092300|1|1|submit-failed
-submit_fail_task|2013092306|1|1|submit-failed
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|1|1|succeeded
-__DB_DUMP__
 #-------------------------------------------------------------------------------
-purge_suite $SUITE_NAME
-if [[ -n ${CYLC_TEST_BATCH_TASK_HOST:-} && ${CYLC_TEST_BATCH_TASK_HOST:-} != 'None' && -n $SUITE_NAME ]]; then
-    ssh $CYLC_TEST_BATCH_TASK_HOST rm -rf .cylc/$SUITE_NAME
+purge_suite "$SUITE_NAME"
+if [[ -n "${CYLC_TEST_BATCH_TASK_HOST:-}" && \
+    "${CYLC_TEST_BATCH_TASK_HOST:-}" != 'None' ]]
+then
+    ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" \
+        "rm -fr .cylc/${SUITE_NAME} cylc-run/${SUITE_NAME}"
 fi
diff --git a/tests/restart/06-succeeded.t b/tests/restart/06-succeeded.t
index ee2c124..1efbb45 100644
--- a/tests/restart/06-succeeded.t
+++ b/tests/restart/06-succeeded.t
@@ -20,18 +20,10 @@ if [[ -z ${TEST_DIR:-} ]]; then
     . $(dirname $0)/test_header
 fi
 #-------------------------------------------------------------------------------
-set_test_number 13
+set_test_number 9
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE succeeded
-TEST_SUITE_RUN_OPTIONS=
-SUITE_TIMEOUT=240
-if [[ -n ${CYLC_LL_TEST_TASK_HOST:-} && ${CYLC_LL_TEST_TASK_HOST:-} != 'None' ]]; then
-    ssh $CYLC_LL_TEST_TASK_HOST mkdir -p .cylc/$SUITE_NAME/
-    scp $TEST_DIR/$SUITE_NAME/passphrase $CYLC_LL_TEST_TASK_HOST:.cylc/$SUITE_NAME/passphrase
-    export CYLC_LL_TEST_SITE_DIRECTIVES CYLC_LL_TEST_TASK_HOST
-    TEST_SUITE_RUN_OPTIONS="--set=USE_LOADLEVELER=true"
-    SUITE_TIMEOUT=900
-fi
+cp "$TEST_SOURCE_DIR/lib/suite-runtime-restart.rc" "$TEST_DIR/$SUITE_NAME/"
 export TEST_DIR
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
@@ -39,114 +31,55 @@ run_ok $TEST_NAME cylc validate $SUITE_NAME
 cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --debug $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
-# Sleep until penultimate task (the suite stops and starts, so port files alone
-# won't help)
-TEST_NAME=$TEST_NAME_BASE-monitor
-START_TIME=$(date +%s)
-export START_TIME SUITE_NAME SUITE_TIMEOUT
-run_ok $TEST_NAME bash <<'__SCRIPT__'
-while [[ -e $HOME/.cylc/ports/$SUITE_NAME || ! -e $TEST_DIR/suite-stopping ]]; do
-    if [[ $(date +%s) > $(( START_TIME + SUITE_TIMEOUT )) ]]; then
-        echo "[ERROR] Suite Timeout - shutting down..." >&2
-        cylc shutdown --now --kill $SUITE_NAME &
-        exit 1
-    fi
-    sleep 1
-done
-__SCRIPT__
-cmp_ok "$TEST_NAME.stderr" </dev/null
+suite_run_ok $TEST_NAME cylc run --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-restart-run
+suite_run_ok $TEST_NAME cylc restart --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
 state_dir=$(cylc get-global-config --print-run-dir)/$SUITE_NAME/state/
 cp $state_dir/state $TEST_DIR/
-for state_file in $(ls $TEST_DIR/state*); do
+for state_file in $(ls $TEST_DIR/*state*); do
     sed -i "/^time : /d" $state_file
 done
-cmp_ok $TEST_DIR/state-pre-restart-2013092300 <<'__STATE__'
+cmp_ok $TEST_DIR/pre-restart-state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092300 : status=running, spawned=true
-force_restart.2013092306 : status=waiting, spawned=false
-output_states.2013092300 : status=waiting, spawned=false
-succeed_task.2013092300 : status=succeeded, spawned=true
-succeed_task.2013092306 : status=waiting, spawned=false
-tidy.2013092300 : status=waiting, spawned=false
+finish.20130923T0000Z : status=waiting, spawned=false
+output_states.20130923T0000Z : status=waiting, spawned=false
+succeeded_task.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-grep_ok "succeed_task|2013092300|1|1|succeeded" $TEST_DIR/states-db-pre-restart-2013092300
-contains_ok $TEST_DIR/states-db-post-restart-2013092300 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-output_states|2013092300|1|1|running
-succeed_task|2013092300|1|1|succeeded
-tidy|2013092300|0|1|waiting
+grep_ok "succeeded_task|20130923T0000Z|1|1|succeeded" \
+    $TEST_DIR/pre-restart-db
+contains_ok $TEST_DIR/post-restart-db <<'__DB_DUMP__'
+finish|20130923T0000Z|0|1|waiting
+shutdown|20130923T0000Z|1|1|succeeded
+succeeded_task|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
-cmp_ok $TEST_DIR/state-pre-restart-2013092306 <<'__STATE__'
-run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
-(dp1
-.
-Begin task states
-force_restart.2013092306 : status=running, spawned=true
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092306 : status=waiting, spawned=false
-succeed_task.2013092306 : status=succeeded, spawned=true
-succeed_task.2013092312 : status=held, spawned=false
-tidy.2013092300 : status=succeeded, spawned=true
-tidy.2013092306 : status=waiting, spawned=false
-__STATE__
-contains_ok $TEST_DIR/states-db-pre-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|running
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|0|1|waiting
-succeed_task|2013092300|1|1|succeeded
-succeed_task|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
-__DB_DUMP__
-
-contains_ok $TEST_DIR/states-db-post-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|running
-succeed_task|2013092300|1|1|succeeded
-succeed_task|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
+sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
+ "select name, cycle, submit_num, try_num, status
+  from task_states
+  order by name, cycle;" > $TEST_DIR/db
+contains_ok $TEST_DIR/db <<'__DB_DUMP__'
+finish|20130923T0000Z|1|1|succeeded
+output_states|20130923T0000Z|1|1|succeeded
+shutdown|20130923T0000Z|1|1|succeeded
+succeeded_task|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
 cmp_ok $TEST_DIR/state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092312 : status=held, spawned=false
-succeed_task.2013092312 : status=held, spawned=false
-tidy.2013092306 : status=succeeded, spawned=true
-tidy.2013092312 : status=held, spawned=false
+finish.20130923T0000Z : status=succeeded, spawned=true
+output_states.20130923T0000Z : status=succeeded, spawned=true
+shutdown.20130923T0000Z : status=succeeded, spawned=true
+succeeded_task.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
- "select name, cycle, submit_num, try_num, status
-  from task_states
-  order by name, cycle;" > $TEST_DIR/states-db
-contains_ok $TEST_DIR/states-db <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|succeeded
-succeed_task|2013092300|1|1|succeeded
-succeed_task|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|1|1|succeeded
-__DB_DUMP__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
-if [[ -n ${CYLC_LL_TEST_TASK_HOST:-} && ${CYLC_LL_TEST_TASK_HOST:-} != 'None' && -n $SUITE_NAME ]]; then
-    ssh $CYLC_LL_TEST_TASK_HOST rm -rf .cylc/$SUITE_NAME
-fi
diff --git a/tests/restart/07-waiting.t b/tests/restart/07-waiting.t
index 658c6fa..d703c0a 100644
--- a/tests/restart/07-waiting.t
+++ b/tests/restart/07-waiting.t
@@ -20,19 +20,10 @@ if [[ -z ${TEST_DIR:-} ]]; then
     . $(dirname $0)/test_header
 fi
 #-------------------------------------------------------------------------------
-set_test_number 13
+set_test_number 9
 #-------------------------------------------------------------------------------
 install_suite $TEST_NAME_BASE waiting
-TEST_SUITE_RUN_OPTIONS=
-SUITE_TIMEOUT=240
-if [[ -n ${CYLC_LL_TEST_TASK_HOST:-} && ${CYLC_LL_TEST_TASK_HOST:-} != 'None' ]]; then
-    ssh $CYLC_LL_TEST_TASK_HOST mkdir -p .cylc/$SUITE_NAME/
-    scp $TEST_DIR/$SUITE_NAME/passphrase \
-        $CYLC_LL_TEST_TASK_HOST:.cylc/$SUITE_NAME/passphrase
-    export CYLC_LL_TEST_SITE_DIRECTIVES CYLC_LL_TEST_TASK_HOST
-    TEST_SUITE_RUN_OPTIONS="--set=USE_LOADLEVELER=true"
-    SUITE_TIMEOUT=900
-fi
+cp "$TEST_SOURCE_DIR/lib/suite-runtime-restart.rc" "$TEST_DIR/$SUITE_NAME/"
 export TEST_DIR
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
@@ -40,111 +31,55 @@ run_ok $TEST_NAME cylc validate $SUITE_NAME
 cmp_ok "$TEST_NAME.stderr" </dev/null
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --debug $TEST_SUITE_RUN_OPTIONS $SUITE_NAME
-# Sleep until penultimate task (the suite stops and starts, so port files alone
-# won't help)
-TEST_NAME=$TEST_NAME_BASE-monitor
-START_TIME=$(date +%s)
-export START_TIME SUITE_NAME SUITE_TIMEOUT
-run_ok $TEST_NAME bash <<'__SCRIPT__'
-while [[ -e $HOME/.cylc/ports/$SUITE_NAME || ! -e $TEST_DIR/suite-stopping ]]; do
-    if [[ $(date +%s) > $(( START_TIME + SUITE_TIMEOUT )) ]]; then
-        echo "[ERROR] Suite Timeout - shutting down..." >&2
-        cylc shutdown --now --kill $SUITE_NAME &
-        exit 1
-    fi
-    sleep 1
-done
-__SCRIPT__
-cmp_ok "$TEST_NAME.stderr" </dev/null
+suite_run_ok $TEST_NAME cylc run --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-restart-run
+suite_run_ok $TEST_NAME cylc restart --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
 state_dir=$(cylc get-global-config --print-run-dir)/$SUITE_NAME/state/
 cp $state_dir/state $TEST_DIR/
-for state_file in $(ls $TEST_DIR/state*); do
+for state_file in $(ls $TEST_DIR/*state*); do
     sed -i "/^time : /d" $state_file
 done
-cmp_ok $TEST_DIR/state-pre-restart-2013092300 <<'__STATE__'
+cmp_ok $TEST_DIR/pre-restart-state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092300 : status=running, spawned=true
-force_restart.2013092306 : status=waiting, spawned=false
-output_states.2013092300 : status=waiting, spawned=false
-tidy.2013092300 : status=waiting, spawned=false
-waiting_task.2013092300 : status=waiting, spawned=false
+finish.20130923T0000Z : status=waiting, spawned=false
+output_states.20130923T0000Z : status=waiting, spawned=false
+waiting_task.20130923T0000Z : status=waiting, spawned=false
 __STATE__
-grep_ok "waiting_task|2013092300|0|1|waiting" $TEST_DIR/states-db-pre-restart-2013092300
-contains_ok $TEST_DIR/states-db-post-restart-2013092300 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-output_states|2013092300|1|1|running
-tidy|2013092300|0|1|waiting
-waiting_task|2013092300|0|1|waiting
+grep_ok "waiting_task|20130923T0000Z|0|1|waiting" \
+    $TEST_DIR/pre-restart-db
+contains_ok $TEST_DIR/post-restart-db <<'__DB_DUMP__'
+finish|20130923T0000Z|0|1|waiting
+shutdown|20130923T0000Z|1|1|succeeded
+waiting_task|20130923T0000Z|0|1|waiting
 __DB_DUMP__
-cmp_ok $TEST_DIR/state-pre-restart-2013092306 <<'__STATE__'
-run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
-(dp1
-.
-Begin task states
-force_restart.2013092306 : status=running, spawned=true
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092306 : status=waiting, spawned=false
-tidy.2013092300 : status=succeeded, spawned=true
-tidy.2013092306 : status=waiting, spawned=false
-waiting_task.2013092306 : status=waiting, spawned=false
-__STATE__
-contains_ok $TEST_DIR/states-db-pre-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|running
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|0|1|waiting
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
-waiting_task|2013092300|1|1|succeeded
-waiting_task|2013092306|0|1|waiting
-__DB_DUMP__
-contains_ok $TEST_DIR/states-db-post-restart-2013092306 <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|running
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|0|1|waiting
-waiting_task|2013092300|1|1|succeeded
-waiting_task|2013092306|0|1|waiting
+sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
+ "select name, cycle, submit_num, try_num, status
+  from task_states
+  order by name, cycle;" > $TEST_DIR/db
+contains_ok $TEST_DIR/db <<'__DB_DUMP__'
+finish|20130923T0000Z|1|1|succeeded
+output_states|20130923T0000Z|1|1|succeeded
+shutdown|20130923T0000Z|1|1|succeeded
+waiting_task|20130923T0000Z|1|1|succeeded
 __DB_DUMP__
 cmp_ok $TEST_DIR/state <<'__STATE__'
 run mode : live
-initial cycle : 2013092300
-final cycle : 2013092306
+initial cycle : 20130923T0000Z
+final cycle : 20130923T0000Z
 (dp1
 .
 Begin task states
-force_restart.2013092312 : status=held, spawned=false
-output_states.2013092312 : status=held, spawned=false
-tidy.2013092306 : status=succeeded, spawned=true
-tidy.2013092312 : status=held, spawned=false
-waiting_task.2013092312 : status=held, spawned=false
+finish.20130923T0000Z : status=succeeded, spawned=true
+output_states.20130923T0000Z : status=succeeded, spawned=true
+shutdown.20130923T0000Z : status=succeeded, spawned=true
+waiting_task.20130923T0000Z : status=succeeded, spawned=true
 __STATE__
-sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
- "select name, cycle, submit_num, try_num, status
-  from task_states
-  order by name, cycle;" > $TEST_DIR/states-db
-contains_ok $TEST_DIR/states-db <<'__DB_DUMP__'
-force_restart|2013092300|1|1|succeeded
-force_restart|2013092306|1|1|succeeded
-output_states|2013092300|1|1|succeeded
-output_states|2013092306|1|1|succeeded
-tidy|2013092300|1|1|succeeded
-tidy|2013092306|1|1|succeeded
-waiting_task|2013092300|1|1|succeeded
-waiting_task|2013092306|1|1|succeeded
-__DB_DUMP__
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
-if [[ -n ${CYLC_LL_TEST_TASK_HOST:-} && ${CYLC_LL_TEST_TASK_HOST:-} != 'None' && -n $SUITE_NAME ]]; then
-    ssh $CYLC_LL_TEST_TASK_HOST rm -rf .cylc/$SUITE_NAME
-fi
diff --git a/tests/restart/11-bad-state-dump.t b/tests/restart/08-bad-state-dump.t
similarity index 100%
rename from tests/restart/11-bad-state-dump.t
rename to tests/restart/08-bad-state-dump.t
diff --git a/tests/restart/08-retrying-loadleveler.t b/tests/restart/08-retrying-loadleveler.t
deleted file mode 100644
index daca86d..0000000
--- a/tests/restart/08-retrying-loadleveler.t
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-# 
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#-------------------------------------------------------------------------------
-# Test restarting a simple suite using loadleveler with a retrying task
-#     This test requires a specific host [test battery] entry in 
-#     site/user config in order to run, otherwise it will be bypassed
-#-------------------------------------------------------------------------------
-. "$(dirname "$0")/test_header"
-#-------------------------------------------------------------------------------
-# export an environment variable for this - allows a script to be used to 
-# select a compute node and have that same host used by the suite.
-BATCH_SYS_NAME="${TEST_NAME_BASE##??-*-}"
-export CYLC_TEST_BATCH_TASK_HOST=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME]host")
-export CYLC_TEST_BATCH_SITE_DIRECTIVES=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME][directives]")
-if [[ -z "${CYLC_TEST_BATCH_TASK_HOST}" || "${CYLC_TEST_BATCH_TASK_HOST}" == None ]]
-then
-    skip_all "\"[test battery][batch systems][$BATCH_SYS_NAME]host\" not defined"
-fi
-# check the host is reachable
-if ! ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" true 1>/dev/null 2>&1
-then
-    skip_all "Host "$CYLC_TEST_BATCH_TASK_HOST" unreachable"
-fi
-. "${TEST_SOURCE_DIR}/03-retrying.t"
diff --git a/tests/restart/12-reload.t b/tests/restart/09-reload.t
similarity index 100%
rename from tests/restart/12-reload.t
rename to tests/restart/09-reload.t
diff --git a/tests/restart/09-running-loadleveler.t b/tests/restart/09-running-loadleveler.t
deleted file mode 100644
index cf42a3f..0000000
--- a/tests/restart/09-running-loadleveler.t
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-# 
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#-------------------------------------------------------------------------------
-# Test restarting a simple suite using loadleveler with a running task
-#     This test requires a specific host [test battery] entry in 
-#     site/user config in order to run, otherwise it will be bypassed
-#-------------------------------------------------------------------------------
-. "$(dirname "$0")/test_header"
-#-------------------------------------------------------------------------------
-export TEST_DIR
-# export an environment variable for this - allows a script to be used to 
-# select a compute node and have that same host used by the suite.
-BATCH_SYS_NAME="${TEST_NAME_BASE##??-*-}"
-export CYLC_TEST_BATCH_TASK_HOST=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME]host")
-export CYLC_TEST_BATCH_SITE_DIRECTIVES=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME][directives]")
-if [[ -z "${CYLC_TEST_BATCH_TASK_HOST}" || "${CYLC_TEST_BATCH_TASK_HOST}" == None ]]
-then
-    skip_all "\"[test battery][batch systems][$BATCH_SYS_NAME]host\" not defined"
-fi
-# check the host is reachable
-if ! ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" true 1>/dev/null 2>&1
-then
-    skip_all "Host "$CYLC_TEST_BATCH_TASK_HOST" unreachable"
-fi
-
-. "${TEST_SOURCE_DIR}/04-running.t"
diff --git a/tests/restart/13-pre-initial-2.t b/tests/restart/10-pre-initial-2.t
similarity index 100%
rename from tests/restart/13-pre-initial-2.t
rename to tests/restart/10-pre-initial-2.t
diff --git a/tests/restart/10-submit-failed-loadleveler.t b/tests/restart/10-submit-failed-loadleveler.t
deleted file mode 100644
index 4002d39..0000000
--- a/tests/restart/10-submit-failed-loadleveler.t
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
-# Copyright (C) 2008-2015 NIWA
-# 
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see <http://www.gnu.org/licenses/>.
-#-------------------------------------------------------------------------------
-# Test restarting a simple suite using loadleveler with a submit-failed task
-#     This test requires a specific host [test battery] entry in 
-#     site/user config in order to run, otherwise it will be bypassed
-#-------------------------------------------------------------------------------
-. "$(dirname "$0")/test_header"
-#-------------------------------------------------------------------------------
-# export an environment variable for this - allows a script to be used to 
-# select a compute node and have that same host used by the suite.
-BATCH_SYS_NAME="${TEST_NAME_BASE##??-*-}"
-export CYLC_TEST_BATCH_TASK_HOST=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME]host")
-export CYLC_TEST_BATCH_SITE_DIRECTIVES=$(cylc get-global-config -i \
-    "[test battery][batch systems][$BATCH_SYS_NAME][directives]")
-if [[ -z "${CYLC_TEST_BATCH_TASK_HOST}" || "${CYLC_TEST_BATCH_TASK_HOST}" == None ]]
-then
-    skip_all "[test battery][batch systems][$BATCH_SYS_NAME]host not defined"
-fi
-# check the host is reachable
-if ! ssh -n ${SSH_OPTS} "${CYLC_TEST_BATCH_TASK_HOST}" true 1>/dev/null 2>&1
-then
-    skip_all "Host "$CYLC_TEST_BATCH_TASK_HOST" unreachable"
-fi
-. "${TEST_SOURCE_DIR}/03-retrying.t"
diff --git a/tests/restart/14-back-comp-restart.t b/tests/restart/11-back-comp-restart.t
similarity index 100%
rename from tests/restart/14-back-comp-restart.t
rename to tests/restart/11-back-comp-restart.t
diff --git a/tests/restart/21-deleted-logs.t b/tests/restart/12-deleted-logs.t
similarity index 100%
rename from tests/restart/21-deleted-logs.t
rename to tests/restart/12-deleted-logs.t
diff --git a/tests/restart/22-bad-job-host.t b/tests/restart/13-bad-job-host.t
old mode 100755
new mode 100644
similarity index 91%
rename from tests/restart/22-bad-job-host.t
rename to tests/restart/13-bad-job-host.t
index e683bba..9ddf065
--- a/tests/restart/22-bad-job-host.t
+++ b/tests/restart/13-bad-job-host.t
@@ -18,12 +18,13 @@
 # Test restarting a suite when the host of a submitted or running job is not
 # available. https://github.com/cylc/cylc/issues/1327
 . "$(dirname "$0")/test_header"
-export CYLC_TEST_HOST=$(cylc get-global-config -i '[test battery]remote host')
+export CYLC_TEST_HOST=$( \
+    cylc get-global-config -i '[test battery]remote host' 2>'/dev/null')
 if [[ -z "${CYLC_TEST_HOST}" ]]; then
-    skip_all '[test battery]remote host: not defined'
+    skip_all '"[test battery]remote host": not defined'
 fi
 set_test_number 3
-install_suite "${TEST_NAME_BASE}" "${TEST_NAME_BASE}"
+install_suite "${TEST_NAME_BASE}" bad-job-host
 ssh ${SSH_OPTS} "${CYLC_TEST_HOST}" \
     "mkdir -p '.cylc/${SUITE_NAME}/' && cat >'.cylc/${SUITE_NAME}/passphrase'" \
     <"${TEST_DIR}/${SUITE_NAME}/passphrase"
diff --git a/tests/restart/14-multicycle.t b/tests/restart/14-multicycle.t
new file mode 100644
index 0000000..820f7e3
--- /dev/null
+++ b/tests/restart/14-multicycle.t
@@ -0,0 +1,121 @@
+#!/bin/bash
+# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
+# Copyright (C) 2008-2015 NIWA
+# 
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+#-------------------------------------------------------------------------------
+# Test restarting a suite with multi-cycle tasks and interdependencies.
+if [[ -z ${TEST_DIR:-} ]]; then
+    . $(dirname $0)/test_header
+fi
+#-------------------------------------------------------------------------------
+set_test_number 9
+#-------------------------------------------------------------------------------
+install_suite $TEST_NAME_BASE multicycle
+export TEST_DIR
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-validate
+run_ok $TEST_NAME cylc validate $SUITE_NAME
+cmp_ok "$TEST_NAME.stderr" </dev/null
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-run
+suite_run_ok $TEST_NAME cylc run --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+TEST_NAME=$TEST_NAME_BASE-restart-run
+suite_run_ok $TEST_NAME cylc restart --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+state_dir=$(cylc get-global-config --print-run-dir)/$SUITE_NAME/state/
+cp $state_dir/state $TEST_DIR/
+for state_file in $(ls $TEST_DIR/*state*); do
+    sed -i "/^time : /d" $state_file
+done
+cmp_ok $TEST_DIR/pre-restart-state <<'__STATE__'
+run mode : live
+initial cycle : 20130923T0000Z
+final cycle : 20130926T0000Z
+(dp1
+.
+Begin task states
+bar.20130924T0000Z : status=succeeded, spawned=true
+bar.20130924T1200Z : status=succeeded, spawned=true
+bar.20130925T0000Z : status=waiting, spawned=false
+foo.20130924T1200Z : status=succeeded, spawned=true
+foo.20130925T0000Z : status=waiting, spawned=false
+output_states.20130925T0000Z : status=waiting, spawned=false
+__STATE__
+cmp_ok $TEST_DIR/pre-restart-db <<'__DB_DUMP__'
+bar|20130923T0000Z|1|1|succeeded
+bar|20130923T1200Z|1|1|succeeded
+bar|20130924T0000Z|1|1|succeeded
+bar|20130924T1200Z|1|1|succeeded
+bar|20130925T0000Z|0|1|waiting
+foo|20130923T0000Z|1|1|succeeded
+foo|20130923T1200Z|1|1|succeeded
+foo|20130924T0000Z|1|1|succeeded
+foo|20130924T1200Z|1|1|succeeded
+foo|20130925T0000Z|0|1|waiting
+output_states|20130925T0000Z|0|1|waiting
+__DB_DUMP__
+contains_ok $TEST_DIR/post-restart-db <<'__DB_DUMP__'
+bar|20130923T0000Z|1|1|succeeded
+bar|20130923T1200Z|1|1|succeeded
+bar|20130924T0000Z|1|1|succeeded
+bar|20130924T1200Z|1|1|succeeded
+bar|20130925T0000Z|0|1|waiting
+foo|20130923T0000Z|1|1|succeeded
+foo|20130923T1200Z|1|1|succeeded
+foo|20130924T0000Z|1|1|succeeded
+foo|20130924T1200Z|1|1|succeeded
+foo|20130925T0000Z|0|1|waiting
+shutdown|20130925T0000Z|1|1|succeeded
+__DB_DUMP__
+sqlite3 $(cylc get-global-config --print-run-dir)/$SUITE_NAME/cylc-suite.db \
+ "select name, cycle, submit_num, try_num, status
+  from task_states
+  order by name, cycle;" > $TEST_DIR/db
+cmp_ok $TEST_DIR/db <<'__DB_DUMP__'
+bar|20130923T0000Z|1|1|succeeded
+bar|20130923T1200Z|1|1|succeeded
+bar|20130924T0000Z|1|1|succeeded
+bar|20130924T1200Z|1|1|succeeded
+bar|20130925T0000Z|1|1|succeeded
+bar|20130925T1200Z|1|1|succeeded
+bar|20130926T0000Z|1|1|succeeded
+bar|20130926T1200Z|0|1|held
+foo|20130923T0000Z|1|1|succeeded
+foo|20130923T1200Z|1|1|succeeded
+foo|20130924T0000Z|1|1|succeeded
+foo|20130924T1200Z|1|1|succeeded
+foo|20130925T0000Z|1|1|succeeded
+foo|20130925T1200Z|1|1|succeeded
+foo|20130926T0000Z|1|1|succeeded
+foo|20130926T1200Z|0|1|held
+output_states|20130925T0000Z|1|1|succeeded
+shutdown|20130925T0000Z|1|1|succeeded
+__DB_DUMP__
+cmp_ok $TEST_DIR/state <<'__STATE__'
+run mode : live
+initial cycle : 20130923T0000Z
+final cycle : 20130926T0000Z
+(dp1
+.
+Begin task states
+bar.20130925T1200Z : status=succeeded, spawned=true
+bar.20130926T0000Z : status=succeeded, spawned=true
+bar.20130926T1200Z : status=held, spawned=false
+foo.20130926T0000Z : status=succeeded, spawned=true
+foo.20130926T1200Z : status=held, spawned=false
+__STATE__
+#-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
diff --git a/tests/restart/15-retrying-slurm.t b/tests/restart/15-retrying-slurm.t
deleted file mode 120000
index 5197388..0000000
--- a/tests/restart/15-retrying-slurm.t
+++ /dev/null
@@ -1 +0,0 @@
-08-retrying-loadleveler.t
\ No newline at end of file
diff --git a/tests/restart/16-retrying-pbs.t b/tests/restart/16-retrying-pbs.t
deleted file mode 120000
index 5197388..0000000
--- a/tests/restart/16-retrying-pbs.t
+++ /dev/null
@@ -1 +0,0 @@
-08-retrying-loadleveler.t
\ No newline at end of file
diff --git a/tests/restart/17-running-slurm.t b/tests/restart/17-running-slurm.t
deleted file mode 120000
index 72441b8..0000000
--- a/tests/restart/17-running-slurm.t
+++ /dev/null
@@ -1 +0,0 @@
-09-running-loadleveler.t
\ No newline at end of file
diff --git a/tests/restart/18-running-pbs.t b/tests/restart/18-running-pbs.t
deleted file mode 120000
index 72441b8..0000000
--- a/tests/restart/18-running-pbs.t
+++ /dev/null
@@ -1 +0,0 @@
-09-running-loadleveler.t
\ No newline at end of file
diff --git a/tests/restart/19-submit-failed-slurm.t b/tests/restart/19-submit-failed-slurm.t
deleted file mode 120000
index 2393b3b..0000000
--- a/tests/restart/19-submit-failed-slurm.t
+++ /dev/null
@@ -1 +0,0 @@
-10-submit-failed-loadleveler.t
\ No newline at end of file
diff --git a/tests/restart/20-submit-failed-pbs.t b/tests/restart/20-submit-failed-pbs.t
deleted file mode 120000
index 2393b3b..0000000
--- a/tests/restart/20-submit-failed-pbs.t
+++ /dev/null
@@ -1 +0,0 @@
-10-submit-failed-loadleveler.t
\ No newline at end of file
diff --git a/tests/restart/back-comp-restart/state b/tests/restart/back-comp-restart/state
index 12f2a26..595ef6d 100644
--- a/tests/restart/back-comp-restart/state
+++ b/tests/restart/back-comp-restart/state
@@ -1,7 +1,7 @@
 run mode : live
 time : 2014:07:21:13:35:53
 initial cycle : 2014050100
-final cycle : (none)
+final cycle : None
 (dp1
 .
 Begin task states
diff --git a/tests/restart/22-bad-job-host/suite.rc b/tests/restart/bad-job-host/suite.rc
similarity index 85%
rename from tests/restart/22-bad-job-host/suite.rc
rename to tests/restart/bad-job-host/suite.rc
index c68acb6..65f2263 100644
--- a/tests/restart/22-bad-job-host/suite.rc
+++ b/tests/restart/bad-job-host/suite.rc
@@ -1,4 +1,8 @@
 #!jinja2
+[cylc]
+    [[event hooks]]
+        timeout = PT2M
+        abort on timeout = True
 [scheduling]
     [[dependencies]]
             graph = """
@@ -15,8 +19,6 @@ timeout 30 bash -c 'while [[ -e 'file' ]]; do sleep 1; done' || true
 """
         [[[remote]]]
             host = {{environ['CYLC_TEST_HOST']}}
-        [[[job submission]]]
-            method = at
     [[t-shutdown]]
         script = """
 # Shutdown and wait
@@ -31,6 +33,6 @@ rm -f "${CYLC_SUITE_WORK_DIR}/${CYLC_TASK_CYCLE_POINT}/t-remote/file"
             host = {{environ['CYLC_TEST_HOST']}}
     [[t-check-log]]
         script = """
-grep -q 'WARNING - garbage: initialisation did not complete' \
+grep -q 'ERROR - garbage: initialisation did not complete' \
     "${CYLC_SUITE_LOG_DIR}/log"
 """
diff --git a/tests/restart/broadcast/suite.rc b/tests/restart/broadcast/suite.rc
index ce169dd..ef33ec8 100644
--- a/tests/restart/broadcast/suite.rc
+++ b/tests/restart/broadcast/suite.rc
@@ -1,105 +1,40 @@
 #!jinja2
-{%- if USE_LOADLEVELER is defined and USE_LOADLEVELER %}
-{%- set HOST = environ['CYLC_LL_TEST_TASK_HOST'] %}
-{%- set SITE_DIRECTIVES = environ['CYLC_LL_TEST_SITE_DIRECTIVES'] %}
-{%- else %}
-{%- set USE_LOADLEVELER = false %}
-{%- set HOST = "" %}
-{%- set SITE_DIRECTIVES = "" %}
-{%- endif %}
 {%- set TEST_DIR = environ['TEST_DIR'] %}
 [cylc]
+    UTC mode = True
     [[event hooks]]
-        timeout handler = "touch {{ TEST_DIR }}/suite-stopping && shutdown_this_suite_hook"
-        timeout = 3
+        timeout handler = shutdown_this_suite_hook
+        timeout = PT3M
 [scheduling]
-    initial cycle time = 2013092300
-    final cycle time   = 2013092306
-    runahead limit = 2
+    initial cycle time = 20130923T00
+    final cycle time   = 20130923T00
     [[dependencies]]
-        [[[0,6,12,18]]]
+        [[[R1]]]
             graph = """
-                tidy[T-6] => send_a_broadcast_task => force_restart
-                force_restart => output_states
-                send_a_broadcast_task & output_states => broadcast_task => tidy
+                send_a_broadcast_task => shutdown
+                shutdown => output_states
+                output_states => broadcast_task
+                broadcast_task => finish
             """
 [runtime]
     [[root]]
-        script = "sleep 1"
-    [[RESTART_TASKS]]
-        {%- if USE_LOADLEVELER %}
-        [[[job submission]]]
-            method = loadleveler
-        [[[directives]]]
-            class            = serial
-            job_type         = serial
-            wall_clock_limit = '60,30'
-            {{ SITE_DIRECTIVES }}
-        [[[remote]]]
-            host = {{ HOST }}
-        {%- endif %}
+        [[[event hooks]]]
+            failed handler = shutdown_this_suite_hook
     [[send_a_broadcast_task]]
-        inherit = RESTART_TASKS
+        description = "Broadcast setup task"
         script = """
-            cylc broadcast -n broadcast_task -t $CYLC_TASK_CYCLE_TIME -s "[environment]MY_TIME=$CYLC_TASK_CYCLE_TIME" $CYLC_SUITE_REG_NAME
+            cylc broadcast -n broadcast_task -t $CYLC_TASK_CYCLE_POINT -s "[environment]MY_VALUE='something'" $CYLC_SUITE_REG_NAME
             cylc broadcast -d $CYLC_SUITE_REG_NAME
         """
-        description = "Broadcast setup task"
     [[broadcast_task]]
-        inherit = RESTART_TASKS
+        description = "Broadcast-recipient task (runs after restart)"
         script = """
-            if [[ $MY_TIME -ne $CYLC_TASK_CYCLE_TIME ]]; then
-                echo $MY_TIME should be the same as $CYLC_TASK_CYCLE_TIME
+            if [[ "$MY_VALUE" != "something" ]]; then
+                echo "[FAIL] MY_VALUE ($MY_VALUE) not set correctly by broadcast" >&2
                 exit 1
             fi
+            echo "[PASS] MY_VALUE=$MY_VALUE"
         """
-        description = "Broadcast-recipient task (runs after restart)"
-        [[[environment]]]
-            MY_TIME=2000010100
-    [[force_restart]]
-        pre-script = """
-            # We need to make sure that the results stay consistent.
-            sleep 2
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-pre-restart-$CYLC_TASK_CYCLE_TIME
-            cp $CYLC_SUITE_RUN_DIR/state/state {{ TEST_DIR }}/state-pre-restart-$CYLC_TASK_CYCLE_TIME
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_INITIAL_CYCLE_TIME ]]; then
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --now --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if USE_LOADLEVELER %}--set=USE_LOADLEVELER=true {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            else
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if USE_LOADLEVELER %}--set=USE_LOADLEVELER=true {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            fi
-        """
-        description = "Force a shutdown and restart of the suite"
         [[[environment]]]
-            CYLC_LL_TEST_TASK_HOST={{ HOST }}
-            CYLC_LL_TEST_SITE_DIRECTIVES={{ SITE_DIRECTIVES }}
-            TEST_DIR={{ TEST_DIR }}
-    [[output_states]]
-        script = """
-            while [[ ! -e $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME ]]; do
-                sleep 0.1
-            done
-            cylc suite-state --interval=1 --task=output_states --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=10 --status=running $CYLC_SUITE_REG_NAME
-            sleep 5  # Wait a few typical loops so that the states make it into the database.
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-post-restart-$CYLC_TASK_CYCLE_TIME
-        """
-        description = "Wait for the restart to complete"
-    [[tidy]]
-        script = """
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_FINAL_CYCLE_TIME ]]; then
-                touch {{ TEST_DIR }}/suite-stopping
-            fi
-        """
-        description = "Tidy up the cycle so that the next can start"
+            MY_VALUE=nothing
+{% include 'suite-runtime-restart.rc' %}
diff --git a/tests/restart/failed/suite.rc b/tests/restart/failed/suite.rc
index 15a6bdc..a2b7380 100644
--- a/tests/restart/failed/suite.rc
+++ b/tests/restart/failed/suite.rc
@@ -1,93 +1,25 @@
 #!jinja2
-{%- if USE_LOADLEVELER is defined and USE_LOADLEVELER %}
-{%- set HOST = environ['CYLC_LL_TEST_TASK_HOST'] %}
-{%- set SITE_DIRECTIVES = environ['CYLC_LL_TEST_SITE_DIRECTIVES'] %}
-{%- else %}
-{%- set USE_LOADLEVELER = false %}
-{%- set HOST = "" %}
-{%- set SITE_DIRECTIVES = "" %}
-{%- endif %}
 {%- set TEST_DIR = environ['TEST_DIR'] %}
 [cylc]
+    UTC mode = True
     [[event hooks]]
-        timeout handler = "touch {{ TEST_DIR }}/suite-stopping && shutdown_this_suite_hook"
-        timeout = 3
+        timeout handler = shutdown_this_suite_hook
+        timeout = PT3M
 [scheduling]
-    initial cycle point = 2013092300
-    final cycle point   = 2013092306
-    runahead limit = 2
+    initial cycle time = 20130923T00
+    final cycle time   = 20130923T00
     [[dependencies]]
-        [[[0,6,12,18]]]
+        [[[R1]]]
             graph = """
-                tidy[T-6] => failed_task:fail => force_restart
-                force_restart => output_states
-                output_states => tidy
-                tidy => !FAIL_TASKS
+                failed_task:fail => shutdown
+                shutdown => output_states
+                output_states => finish => !failed_task
             """
 [runtime]
-    [[root]]
-        script = "sleep 1"
-    [[FAIL_TASKS]]
-    [[RESTART_TASKS]]
-        {%- if USE_LOADLEVELER %}
-        [[[job submission]]]
-            method = loadleveler
-        [[[directives]]]
-            class            = serial
-            job_type         = serial
-            wall_clock_limit = '50,20'
-            {{ SITE_DIRECTIVES }}
-        [[[remote]]]
-            host = {{ HOST }}
-        {%- endif %}
     [[failed_task]]
-        inherit = RESTART_TASKS, FAIL_TASKS
-        script = "sleep 10; exit 1"
-        description = "Failed state task for restart"
-    [[force_restart]]
-        pre-script = """
-            # We need to make sure that the results stay consistent.
-            sleep 2
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-pre-restart-$CYLC_TASK_CYCLE_TIME
-            cp $CYLC_SUITE_RUN_DIR/state/state {{ TEST_DIR }}/state-pre-restart-$CYLC_TASK_CYCLE_TIME
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_INITIAL_CYCLE_TIME ]]; then
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --now --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if USE_LOADLEVELER %}--set=USE_LOADLEVELER=true {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            else
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if USE_LOADLEVELER %}--set=USE_LOADLEVELER=true {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            fi
-        """
-        description = "Force a shutdown and restart of the suite"
-        [[[environment]]]
-            CYLC_LL_TEST_TASK_HOST={{ HOST }}
-            CYLC_LL_TEST_SITE_DIRECTIVES={{ SITE_DIRECTIVES }}
-            TEST_DIR={{ TEST_DIR }}
-    [[output_states]]
-        script = """
-            while [[ ! -e $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME ]]; do
-                sleep 0.1
-            done
-            cylc suite-state --interval=1 --task=output_states --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=10 --status=running $CYLC_SUITE_REG_NAME
-            sleep 5  # Wait a few typical loops so that the states make it into the database.
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-post-restart-$CYLC_TASK_CYCLE_TIME
-        """
-        description = "Wait for the restart to complete"
-    [[tidy]]
+        description = "Failed task (runs before restart)"
         script = """
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_FINAL_CYCLE_TIME ]]; then
-                touch {{ TEST_DIR }}/suite-stopping
-            fi
+            sleep 10
+            exit 1
         """
-        description = "Tidy up the cycle so that the next can start"
+{% include 'suite-runtime-restart.rc' %}
diff --git a/tests/restart/lib/suite-runtime-restart.rc b/tests/restart/lib/suite-runtime-restart.rc
new file mode 100644
index 0000000..fadc6cc
--- /dev/null
+++ b/tests/restart/lib/suite-runtime-restart.rc
@@ -0,0 +1,32 @@
+    [[OUTPUT]]
+        script = """
+            sleep 5
+            # Handle db locks.
+            for i in {0..10}; do
+                sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
+                    "select name, cycle, submit_num, try_num, status
+                     from task_states where name != '"$CYLC_TASK_NAME"'
+                     order by name, cycle;" > {{ TEST_DIR }}/$OUTPUT_SUFFIX-db && break
+                sleep 1
+            done
+            sed "/$CYLC_TASK_NAME/d" $CYLC_SUITE_RUN_DIR/state/state >{{ TEST_DIR }}/$OUTPUT_SUFFIX-state
+        """
+    [[shutdown]]
+        description = "Force a shutdown of the suite"
+        inherit = OUTPUT
+        post-script = """
+            cylc shutdown $CYLC_SUITE_REG_NAME
+            sleep 5
+        """
+        [[[environment]]]
+            OUTPUT_SUFFIX=pre-restart
+    [[output_states]]
+        description = "Wait for the restart to complete, then output states"
+        inherit = OUTPUT
+        pre-script = """
+            sleep 5
+        """
+        [[[environment]]]
+            OUTPUT_SUFFIX=post-restart
+    [[finish]]
+        script = true
diff --git a/tests/restart/multicycle/bin/shutdown_this_suite_hook b/tests/restart/multicycle/bin/shutdown_this_suite_hook
new file mode 100755
index 0000000..6362be9
--- /dev/null
+++ b/tests/restart/multicycle/bin/shutdown_this_suite_hook
@@ -0,0 +1,2 @@
+#!/bin/bash
+cylc shutdown --kill --max-polls=30 --interval=2 $2
diff --git a/tests/restart/multicycle/suite.rc b/tests/restart/multicycle/suite.rc
new file mode 100644
index 0000000..3d5a7dd
--- /dev/null
+++ b/tests/restart/multicycle/suite.rc
@@ -0,0 +1,53 @@
+#!jinja2
+{%- set TEST_DIR = environ['TEST_DIR'] %}
+[cylc]
+    UTC mode = True
+    [[event hooks]]
+        timeout handler = shutdown_this_suite_hook
+        timeout = PT3M
+[scheduling]
+    initial cycle time = 20130923T00
+    final cycle time   = 20130926T00
+    [[dependencies]]
+        [[[PT12H]]]
+            graph = """
+                foo[-PT12H] => foo => bar
+                bar[-P1D] => bar
+            """
+        [[[R1/20130925T0000Z]]]
+            graph = """
+                bar[-P1D] & bar[-PT12H] & foo[-PT12H] => shutdown => output_states
+                output_states => foo => bar
+            """
+[runtime]
+    [[foo,bar]]
+        description = "Placeholder tasks for dependencies"
+        script = """
+            sleep 1
+        """
+    [[OUTPUT]]
+        script = """
+            sleep 5
+            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
+                "select name, cycle, submit_num, try_num, status
+                 from task_states where name != '"$CYLC_TASK_NAME"'
+                 order by name, cycle;" > {{ TEST_DIR }}/$OUTPUT_SUFFIX-db
+            sed "/$CYLC_TASK_NAME/d" $CYLC_SUITE_RUN_DIR/state/state >{{ TEST_DIR }}/$OUTPUT_SUFFIX-state
+        """
+    [[shutdown]]
+        description = "Force a shutdown of the suite"
+        inherit = OUTPUT
+        post-script = """
+            cylc shutdown $CYLC_SUITE_REG_NAME
+            sleep 5
+        """
+        [[[environment]]]
+            OUTPUT_SUFFIX=pre-restart
+    [[output_states]]
+        description = "Wait for the restart to complete, then output states"
+        inherit = OUTPUT
+        pre-script = """
+            sleep 5
+        """
+        [[[environment]]]
+            OUTPUT_SUFFIX=post-restart
diff --git a/tests/restart/reload/suite.rc b/tests/restart/reload/suite.rc
index 3b8c79f..8e3b5f7 100644
--- a/tests/restart/reload/suite.rc
+++ b/tests/restart/reload/suite.rc
@@ -8,7 +8,7 @@ which should run to completion on restarting."""
 [cylc]
     [[reference test]]
         required run mode = live
-        live mode suite timeout = 0.5
+        live mode suite timeout = 2
 [scheduling]
     initial cycle time = 2010080800
     final cycle time = 2010080900
diff --git a/tests/restart/retrying/suite.rc b/tests/restart/retrying/suite.rc
index 7565456..3bd7773 100644
--- a/tests/restart/retrying/suite.rc
+++ b/tests/restart/retrying/suite.rc
@@ -1,118 +1,34 @@
 #!jinja2
-{%- if BATCH_SYS_NAME is defined and BATCH_SYS_NAME %}
-{%- set HOST = environ['CYLC_TEST_BATCH_TASK_HOST'] %}
-{%- set SITE_DIRECTIVES = environ['CYLC_TEST_BATCH_SITE_DIRECTIVES'] %}
-{%- else %}
-{%- set BATCH_SYS_NAME = none %}
-{%- set HOST = "" %}
-{%- set SITE_DIRECTIVES = "" %}
-{%- endif %}
 {%- set TEST_DIR = environ['TEST_DIR'] %}
 [cylc]
+    UTC mode = True
     [[event hooks]]
-        timeout handler = "touch {{ TEST_DIR }}/suite-stopping && shutdown_this_suite_hook"
-        timeout = 3
+        timeout handler = shutdown_this_suite_hook
+        timeout = PT3M
 [scheduling]
-    initial cycle time = 2013092300
-    final cycle time   = 2013092306
-    runahead limit = 2
+    initial cycle time = 20130923T00
+    final cycle time   = 20130923T00
     [[dependencies]]
-        [[[0,6,12,18]]]
+        [[[R1]]]
             graph = """
-                tidy[T-6] => retrying_task
-                retrying_task:start => force_restart
-                force_restart => output_states
-                output_states & retrying_task => tidy
+                retrying_task:start => shutdown
+                shutdown => output_states
+                output_states & retrying_task => finish
             """
 [runtime]
-    [[root]]
-        script = "sleep 1"
-    [[RESTART_TASKS]]
-        {%- if BATCH_SYS_NAME %}
-        [[[job submission]]]
-            method = {{ BATCH_SYS_NAME }}
-        [[[directives]]]
-        {%- if BATCH_SYS_NAME == 'loadleveler' %}
-            class            = serial
-            job_type         = serial
-            wall_clock_limit = '20,15'
-        {%- elif BATCH_SYS_NAME == 'slurm' %}
-            --time = 20
-        {%- elif BATCH_SYS_NAME == 'pbs' %}
-            -l walltime=120
-        {%- endif %}
-            {{ SITE_DIRECTIVES }}
-        [[[remote]]]
-            host = {{ HOST }}
-        {%- endif %}
     [[retrying_task]]
-        inherit = RESTART_TASKS
         script = """
+            sleep 10
             if [[ $CYLC_TASK_TRY_NUMBER -le 2 ]]; then
-                sleep 1
                 exit 1
             fi
         """
         description = "Retrying state task for restart"
-        retry delays = 0.5, 0.01
-    [[force_restart]]
-        pre-script = """
-            # We need to make sure that the results stay consistent.
-            sleep 2
-            cylc suite-state --interval=1 --task=retrying_task --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=120 --status=retrying $CYLC_SUITE_REG_NAME
-            sleep 2
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-pre-restart-$CYLC_TASK_CYCLE_TIME
-            cp $CYLC_SUITE_RUN_DIR/state/state {{ TEST_DIR }}/state-pre-restart-$CYLC_TASK_CYCLE_TIME
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_INITIAL_CYCLE_TIME ]]; then
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --now --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if BATCH_SYS_NAME %}--set=BATCH_SYS_NAME={{BATCH_SYS_NAME}} {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            else
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if BATCH_SYS_NAME %}--set=BATCH_SYS_NAME={{BATCH_SYS_NAME}} {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            fi
-        """
-        description = "Force a shutdown and restart of the suite"
-        [[[environment]]]
-            CYLC_TEST_BATCH_TASK_HOST={{ HOST }}
-            CYLC_TEST_BATCH_SITE_DIRECTIVES={{ SITE_DIRECTIVES }}
-            TEST_DIR={{ TEST_DIR }}
+        retry delays = PT40S, PT1S
+{% include 'suite-runtime-restart.rc' %}
+    [[shutdown]]
+        pre-script = sleep 5  # Extra sleep as trigger from retry task start.
     [[output_states]]
-        script = """
-            while [[ ! -e $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME ]]; do
-                sleep 0.1
-            done
-            cylc suite-state --interval=1 --task=output_states --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=10 --status=running $CYLC_SUITE_REG_NAME
-            cylc suite-state --interval=1 --task=retrying_task --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=120 --status=retrying $CYLC_SUITE_REG_NAME
-            sleep 5  # Wait a few typical loops so that the states make it into the database.
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-post-restart-$CYLC_TASK_CYCLE_TIME
-        """
-        description = "Wait for the restart to complete"
-    [[tidy]]
-        script = """
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_FINAL_CYCLE_TIME ]]; then
-                touch {{ TEST_DIR }}/suite-stopping
-            fi
-            # Remove the following block once retrying initial tasks are fixed.
-            if grep -q 'retrying_task.*status=waiting' <<< $(cylc cat-state $CYLC_SUITE_REG_NAME); then
-                cylc reset -f -v -v -s succeeded $CYLC_SUITE_REG_NAME 'retrying_task' $CYLC_TASK_CYCLE_TIME
-            fi
-            sleep 5
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-tidy-$CYLC_TASK_CYCLE_TIME
+        pre-script = """
+            sleep 10
         """
-        description = "Tidy up the cycle so that the next can start"
diff --git a/tests/restart/running/suite.rc b/tests/restart/running/suite.rc
index b5529cf..dcbdcf0 100644
--- a/tests/restart/running/suite.rc
+++ b/tests/restart/running/suite.rc
@@ -1,100 +1,29 @@
 #!jinja2
-{%- if BATCH_SYS_NAME is defined and BATCH_SYS_NAME %}
-{%- set HOST = environ['CYLC_TEST_BATCH_TASK_HOST'] %}
-{%- set SITE_DIRECTIVES = environ['CYLC_TEST_BATCH_SITE_DIRECTIVES'] %}
-{%- else %}
-{%- set BATCH_SYS_NAME = none %}
-{%- set HOST = "" %}
-{%- set SITE_DIRECTIVES = "" %}
-{%- endif %}
 {%- set TEST_DIR = environ['TEST_DIR'] %}
 [cylc]
     UTC mode = True
     [[event hooks]]
-        timeout handler = "touch {{ TEST_DIR }}/suite-stopping && shutdown_this_suite_hook"
-        timeout = 3
+        timeout handler = shutdown_this_suite_hook
+        timeout = PT3M
 [scheduling]
-    initial cycle time = 2013092300
-    final cycle time   = 2013092306
-    runahead limit = 2
+    initial cycle time = 20130923T00
+    final cycle time   = 20130923T00
     [[dependencies]]
-        [[[0,6,12,18]]]
+        [[[R1]]]
             graph = """
-                tidy[T-6] => running_task:start => force_restart
-                force_restart => output_states
-                output_states & running_task => tidy
+                running_task:start => shutdown
+                shutdown => output_states
+                output_states & running_task => finish
             """
 [runtime]
-    [[root]]
-        script = "sleep 1"
-    [[RESTART_TASKS]]
-        {%- if BATCH_SYS_NAME %}
-        [[[job submission]]]
-            method = {{ BATCH_SYS_NAME }}
-        [[[directives]]]
-        {%- if BATCH_SYS_NAME == 'loadleveler' %}
-            class            = serial
-            job_type         = serial
-            wall_clock_limit = '60,30'
-        {%- elif BATCH_SYS_NAME == 'slurm' %}
-            --time = 20
-        {%- elif BATCH_SYS_NAME == 'pbs' %}
-            -l walltime=120
-        {%- endif %}
-            {{ SITE_DIRECTIVES }}
-        [[[remote]]]
-            host = {{ HOST }}
-        {%- endif %}
     [[running_task]]
-        inherit = RESTART_TASKS
+        description = "Running task (runs during restart)"
         script = """
-            sleep 20
+            sleep 50
         """
-        description = "Running state task for restart (with shutdown --now)"
-    [[force_restart]]
-        pre-script = """
-            # We need to make sure that the results stay consistent.
-            sleep 2
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-pre-restart-$CYLC_TASK_CYCLE_TIME
-            cp $CYLC_SUITE_RUN_DIR/state/state {{ TEST_DIR }}/state-pre-restart-$CYLC_TASK_CYCLE_TIME
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_INITIAL_CYCLE_TIME ]]; then
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --now --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if BATCH_SYS_NAME %}--set=BATCH_SYS_NAME={{BATCH_SYS_NAME}} {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            else
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if BATCH_SYS_NAME %}--set=BATCH_SYS_NAME={{BATCH_SYS_NAME}} {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            fi
+{% include 'suite-runtime-restart.rc' %}
+    [[shutdown]]
+        post-script = """
+            cylc shutdown --now $CYLC_SUITE_REG_NAME
+            sleep 5
         """
-        description = "Force a shutdown and restart of the suite"
-        [[[environment]]]
-            CYLC_TEST_BATCH_TASK_HOST={{ HOST }}
-            CYLC_TEST_BATCH_SITE_DIRECTIVES={{ SITE_DIRECTIVES }}
-            TEST_DIR={{ TEST_DIR }}
-    [[output_states]]
-        script = """
-            while [[ ! -e $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME ]]; do
-                sleep 0.1
-            done
-            cylc suite-state --interval=1 --task=output_states --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=10 --status=running $CYLC_SUITE_REG_NAME
-            sleep 5  # Wait a few typical loops so that the states make it into the database.
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-post-restart-$CYLC_TASK_CYCLE_TIME
-        """
-        description = "Wait for the restart to complete"
-    [[tidy]]
-        script = """
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_FINAL_CYCLE_TIME ]]; then
-                touch {{ TEST_DIR }}/suite-stopping
-            fi
-        """
-        description = "Tidy up the cycle so that the next can start"
diff --git a/tests/restart/submit-failed/suite.rc b/tests/restart/submit-failed/suite.rc
index 38ed55f..e23bf1f 100644
--- a/tests/restart/submit-failed/suite.rc
+++ b/tests/restart/submit-failed/suite.rc
@@ -1,102 +1,27 @@
 #!jinja2
-{%- if BATCH_SYS_NAME is defined and BATCH_SYS_NAME %}
-{%- set HOST = environ['CYLC_TEST_BATCH_TASK_HOST'] %}
-{%- set SITE_DIRECTIVES = environ['CYLC_TEST_BATCH_SITE_DIRECTIVES'] %}
-{%- else %}
-{%- set BATCH_SYS_NAME = none %}
-{%- set HOST = "" %}
-{%- set SITE_DIRECTIVES = "" %}
-{%- endif %}
 {%- set TEST_DIR = environ['TEST_DIR'] %}
 [cylc]
+    UTC mode = True
     [[event hooks]]
-        timeout handler = "touch {{ TEST_DIR }}/suite-stopping && shutdown_this_suite_hook"
-        timeout = 3
+        timeout handler = shutdown_this_suite_hook
+        timeout = PT3M
 [scheduling]
-    initial cycle time = 2013092300
-    final cycle time   = 2013092306
-    runahead limit = 2
+    initial cycle time = 20130923T00
+    final cycle time   = 20130923T00
     [[dependencies]]
-        [[[0,6,12,18]]]
+        [[[R1]]]
             graph = """
-                tidy[T-6] => submit_fail_task
-                submit_fail_task:submit-fail => force_restart
-                force_restart => output_states
-                output_states => tidy => !FAIL_TASKS
+                submit_failed_task:submit-fail => shutdown
+                shutdown => output_states
+                output_states => finish => !submit_failed_task
             """
 [runtime]
-    [[root]]
-        script = "sleep 1"
-    [[FAIL_TASKS]]
-    [[RESTART_TASKS]]
-        {%- if BATCH_SYS_NAME %}
-        [[[job submission]]]
-            method = {{ BATCH_SYS_NAME }}
-        [[[directives]]]
-        {%- if BATCH_SYS_NAME == 'loadleveler' %}
-            class            = serial
-            job_type         = serial
-            wall_clock_limit = '20,15'
-        {%- elif BATCH_SYS_NAME == 'slurm' %}
-            --time = 20
-        {%- elif BATCH_SYS_NAME == 'pbs' %}
-            -l walltime=120
-        {%- endif %}
-            {{ SITE_DIRECTIVES }}
-        [[[remote]]]
-            host = {{ HOST }}
-        {%- endif %}
-    [[submit_fail_task]]
-        inherit = RESTART_TASKS, FAIL_TASKS
-        description = "Submit-fail state task for restart"
-        [[[job submission]]] 
-            method = at
-            # 'yesterday' is an invalid at time!
-            command template = at yesterday
-    [[force_restart]]
-        pre-script = """
-            # We need to make sure that the results stay consistent.
-            sleep 2
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-pre-restart-$CYLC_TASK_CYCLE_TIME
-            cp $CYLC_SUITE_RUN_DIR/state/state {{ TEST_DIR }}/state-pre-restart-$CYLC_TASK_CYCLE_TIME
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_INITIAL_CYCLE_TIME ]]; then
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --now --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if BATCH_SYS_NAME %}--set=BATCH_SYS_NAME={{BATCH_SYS_NAME}} {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            else
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if BATCH_SYS_NAME %}--set=BATCH_SYS_NAME={{BATCH_SYS_NAME}} {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            fi
-        """
-        description = "Force a shutdown and restart of the suite"
-        [[[environment]]]
-            CYLC_TEST_BATCH_TASK_HOST={{ HOST }}
-            CYLC_TEST_BATCH_SITE_DIRECTIVES={{ SITE_DIRECTIVES }}
-            TEST_DIR={{ TEST_DIR }}
-    [[output_states]]
+    [[submit_failed_task]]
+        description = "Submit-failed task (runs before restart)"
         script = """
-            while [[ ! -e $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME ]]; do
-                sleep 0.1
-            done
-            cylc suite-state --interval=1 --task=output_states --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=10 --status=running $CYLC_SUITE_REG_NAME
-            sleep 5  # Wait a few typical loops so that the states make it into the database.
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-post-restart-$CYLC_TASK_CYCLE_TIME
+            exit 1  # Should not submit, so this shouldn't run!
         """
-        description = "Wait for the restart to complete"
-    [[tidy]]
-        script = """
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_FINAL_CYCLE_TIME ]]; then
-                touch {{ TEST_DIR }}/suite-stopping
-            fi
-        """
-        description = "Tidy up the cycle so that the next can start"
+        [[[job submission]]]
+            method = at
+            command template = at oh-no
+{% include 'suite-runtime-restart.rc' %}
diff --git a/tests/restart/succeeded/suite.rc b/tests/restart/succeeded/suite.rc
index ce80e28..2c33e3c 100644
--- a/tests/restart/succeeded/suite.rc
+++ b/tests/restart/succeeded/suite.rc
@@ -1,92 +1,24 @@
 #!jinja2
-{%- if USE_LOADLEVELER is defined and USE_LOADLEVELER %}
-{%- set HOST = environ['CYLC_LL_TEST_TASK_HOST'] %}
-{%- set SITE_DIRECTIVES = environ['CYLC_LL_TEST_SITE_DIRECTIVES'] %}
-{%- else %}
-{%- set USE_LOADLEVELER = false %}
-{%- set HOST = "" %}
-{%- set SITE_DIRECTIVES = "" %}
-{%- endif %}
 {%- set TEST_DIR = environ['TEST_DIR'] %}
 [cylc]
+    UTC mode = True
     [[event hooks]]
-        timeout handler = "touch {{ TEST_DIR }}/suite-stopping && shutdown_this_suite_hook"
-        timeout = 3
+        timeout handler = shutdown_this_suite_hook
+        timeout = PT3M
 [scheduling]
-    initial cycle time = 2013092300
-    final cycle time   = 2013092306
-    runahead limit = 2
+    initial cycle time = 20130923T00
+    final cycle time   = 20130923T00
     [[dependencies]]
-        [[[0,6,12,18]]]
+        [[[R1]]]
             graph = """
-                tidy[T-6] => succeed_task
-                succeed_task:succeed => force_restart
-                force_restart => output_states
-                output_states => tidy
+                succeeded_task => shutdown
+                shutdown => output_states
+                output_states => finish
             """
 [runtime]
-    [[root]]
-        script = "sleep 1"
-    [[RESTART_TASKS]]
-        {%- if USE_LOADLEVELER %}
-        [[[job submission]]]
-            method = loadleveler
-        [[[directives]]]
-            class            = serial
-            job_type         = serial
-            wall_clock_limit = '60,30'
-            {{ SITE_DIRECTIVES }}
-        [[[remote]]]
-            host = {{ HOST }}
-        {%- endif %}
-    [[succeed_task]]
-        inherit = RESTART_TASKS
-        description = "Succeeded state task for restart"
-    [[force_restart]]
-        pre-script = """
-            # We need to make sure that the results stay consistent.
-            sleep 2
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-pre-restart-$CYLC_TASK_CYCLE_TIME
-            cp $CYLC_SUITE_RUN_DIR/state/state {{ TEST_DIR }}/state-pre-restart-$CYLC_TASK_CYCLE_TIME
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_INITIAL_CYCLE_TIME ]]; then
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --now --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if USE_LOADLEVELER %}--set=USE_LOADLEVELER=true {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            else
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if USE_LOADLEVELER %}--set=USE_LOADLEVELER=true {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            fi
-        """
-        description = "Force a shutdown and restart of the suite"
-        [[[environment]]]
-            CYLC_LL_TEST_TASK_HOST={{ HOST }}
-            CYLC_LL_TEST_SITE_DIRECTIVES={{ SITE_DIRECTIVES }}
-            TEST_DIR={{ TEST_DIR }}
-    [[output_states]]
-        script = """
-            while [[ ! -e $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME ]]; do
-                sleep 0.1
-            done
-            cylc suite-state --interval=1 --task=output_states --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=10 --status=running $CYLC_SUITE_REG_NAME
-            sleep 5  # Wait a few typical loops so that the states make it into the database.
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-post-restart-$CYLC_TASK_CYCLE_TIME
-        """
-        description = "Wait for the restart to complete"
-    [[tidy]]
+    [[succeeded_task]]
+        description = "Succeeded task (runs before restart)"
         script = """
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_FINAL_CYCLE_TIME ]]; then
-                touch {{ TEST_DIR }}/suite-stopping
-            fi
-            sleep 5
+            sleep 1
         """
-        description = "Tidy up the cycle so that the next can start"
+{% include 'suite-runtime-restart.rc' %}
diff --git a/tests/restart/waiting/suite.rc b/tests/restart/waiting/suite.rc
index 932f66c..9dab5b5 100644
--- a/tests/restart/waiting/suite.rc
+++ b/tests/restart/waiting/suite.rc
@@ -1,91 +1,21 @@
 #!jinja2
-{%- if USE_LOADLEVELER is defined and USE_LOADLEVELER %}
-{%- set HOST = environ['CYLC_LL_TEST_TASK_HOST'] %}
-{%- set SITE_DIRECTIVES = environ['CYLC_LL_TEST_SITE_DIRECTIVES'] %}
-{%- else %}
-{%- set USE_LOADLEVELER = false %}
-{%- set HOST = "" %}
-{%- set SITE_DIRECTIVES = "" %}
-{%- endif %}
 {%- set TEST_DIR = environ['TEST_DIR'] %}
 [cylc]
+    UTC mode = True
     [[event hooks]]
-        timeout handler = "touch {{ TEST_DIR }}/suite-stopping && shutdown_this_suite_hook"
-        timeout = 3
+        timeout handler = shutdown_this_suite_hook
+        timeout = PT3M
 [scheduling]
-    initial cycle time = 2013092300
-    final cycle time   = 2013092306
+    initial cycle time = 20130923T00
+    final cycle time   = 20130923T00
     [[dependencies]]
-        [[[0,6,12,18]]]
+        [[[R1]]]
             graph = """
-                tidy[T-6] => force_restart
-                force_restart => output_states
-                output_states => waiting_task
-                waiting_task => tidy
+                shutdown => output_states
+                output_states => waiting_task => finish
             """
 [runtime]
-    [[root]]
-        script = "sleep 1"
-    [[RESTART_TASKS]]
-        {%- if USE_LOADLEVELER %}
-        [[[job submission]]]
-            method = loadleveler
-        [[[directives]]]
-            class            = serial
-            job_type         = serial
-            wall_clock_limit = '60,30'
-            {{ SITE_DIRECTIVES }}
-        [[[remote]]]
-            host = {{ HOST }}
-        {%- endif %}
     [[waiting_task]]
-        inherit = RESTART_TASKS
-        description = "Waiting state task for restart (runs after restart)"
-    [[force_restart]]
-        pre-script = """
-            # We need to make sure that the results stay consistent.
-            sleep 5
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-pre-restart-$CYLC_TASK_CYCLE_TIME
-            cp $CYLC_SUITE_RUN_DIR/state/state {{ TEST_DIR }}/state-pre-restart-$CYLC_TASK_CYCLE_TIME
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_INITIAL_CYCLE_TIME ]]; then
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --now --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if USE_LOADLEVELER %}--set=USE_LOADLEVELER=true {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            else
-                cd $CYLC_SUITE_RUN_DIR && \
-                    cylc shutdown --interval=1 --max-polls=60 $CYLC_SUITE_REG_NAME && \
-                    cylc restart {% if USE_LOADLEVELER %}--set=USE_LOADLEVELER=true {% endif %}$CYLC_SUITE_REG_NAME && \
-                    touch $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME &
-            fi
-        """
-        description = "Force a shutdown and restart of the suite"
-        [[[environment]]]
-            CYLC_LL_TEST_TASK_HOST={{ HOST }}
-            CYLC_LL_TEST_SITE_DIRECTIVES={{ SITE_DIRECTIVES }}
-            TEST_DIR={{ TEST_DIR }}
-    [[output_states]]
-        script = """
-            while [[ ! -e $CYLC_SUITE_SHARE_DIR/restart-done-$CYLC_TASK_CYCLE_TIME ]]; do
-                sleep 0.1
-            done
-            cylc suite-state --interval=1 --task=output_states --cycle=$CYLC_TASK_CYCLE_TIME \
-                --max-polls=10 --status=running $CYLC_SUITE_REG_NAME
-            sleep 5  # Wait a few typical loops so that the states make it into the database.
-            sqlite3 $CYLC_SUITE_RUN_DIR/cylc-suite.db \
-                "select name, cycle, submit_num, try_num, status
-                 from task_states
-                 order by name, cycle;" > {{ TEST_DIR }}/states-db-post-restart-$CYLC_TASK_CYCLE_TIME
-        """
-        description = "Wait for the restart to complete"
-    [[tidy]]
-        script = """
-            if [[ $CYLC_TASK_CYCLE_TIME -eq $CYLC_SUITE_FINAL_CYCLE_TIME ]]; then
-                touch {{ TEST_DIR }}/suite-stopping
-            fi
-            sleep 5
-        """
-        description = "Tidy up the cycle so that the next can start"
+        description = "Waiting task (runs after restart)"
+        script = true
+{% include 'suite-runtime-restart.rc' %}
diff --git a/tests/runahead/no_final/suite.rc b/tests/runahead/no_final/suite.rc
index 67fd937..e0e2337 100644
--- a/tests/runahead/no_final/suite.rc
+++ b/tests/runahead/no_final/suite.rc
@@ -1,7 +1,7 @@
 [cylc]
     cycle point time zone = Z
     [[event hooks]]
-        timeout = PT0.1M
+        timeout = PT0.3M
         abort on timeout = True
 [scheduling]
     runahead limit = PT18H
@@ -17,4 +17,6 @@
     [[bar]]
         script = true
     [[shutdown]]
-        script = cylc shutdown $CYLC_SUITE_REG_NAME
+        script = """
+sleep 5
+cylc shutdown $CYLC_SUITE_REG_NAME"""
diff --git a/tests/special/07-clock-triggered-360.t b/tests/special/07-clock-triggered-360.t
index 520759d..59d3916 100644
--- a/tests/special/07-clock-triggered-360.t
+++ b/tests/special/07-clock-triggered-360.t
@@ -24,7 +24,7 @@ install_suite $TEST_NAME_BASE clock-360
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-validate
 run_fail $TEST_NAME cylc validate $SUITE_NAME
-grep_ok "ERROR: clock-triggered tasks require \[scheduling\]cycling mode=" \
+grep_ok "ERROR: clock-trigger tasks require \[scheduling\]cycling mode=" \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
 purge_suite $SUITE_NAME
diff --git a/tests/pyc/00-simple.t b/tests/suite-state/03-options.t
old mode 100644
new mode 100755
similarity index 78%
rename from tests/pyc/00-simple.t
rename to tests/suite-state/03-options.t
index 8e33215..098a016
--- a/tests/pyc/00-simple.t
+++ b/tests/suite-state/03-options.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,11 +15,14 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test extra pyc files that may allow imports from non-existent modules.
+# Test running of cylc suite-state with various CLI options
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 1
+install_suite $TEST_NAME_BASE options
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE
-py_files=$(find "$CYLC_DIR" -name "*.pyc" -type f | sed "s/pyc$/py/g")
-run_ok $TEST_NAME ls $py_files
+suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+#-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
+exit 0
diff --git a/tests/remote/00-basic.t b/tests/suite-state/04-template.t
old mode 100644
new mode 100755
similarity index 68%
copy from tests/remote/00-basic.t
copy to tests/suite-state/04-template.t
index 394d435..70d13d7
--- a/tests/remote/00-basic.t
+++ b/tests/suite-state/04-template.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,30 +15,28 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test remote host settings.
+# Test cylc suite-state "template" option
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 4
+set_test_number 3
 #-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE basic
+install_suite ${TEST_NAME_BASE}_ref template_ref
+SUITE_NAME_REF=$SUITE_NAME
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
+TEST_NAME=$TEST_NAME_BASE-ref
+suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME_REF
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
+TEST_NAME=$TEST_NAME_BASE-cli-template
+run_ok $TEST_NAME cylc suite-state $SUITE_NAME_REF -p 20100101T0000Z \
+        --template=%Y --max-polls=1
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-userathost
-SUITE_RUN_DIR=$(cylc get-global-config --print-run-dir)/$SUITE_NAME
-echo $CYLC_TEST_TASK_OWNER@$CYLC_TEST_TASK_HOST > userathost
-cmp_ok userathost - <<__OUT__
-$(sqlite3 $SUITE_RUN_DIR/cylc-suite.db "select host from task_states where name='foo'")
-__OUT__
+install_suite $TEST_NAME_BASE template
+TEST_NAME=$TEST_NAME_BASE-runtime
 #-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-hostonly
-echo $CYLC_TEST_TASK_HOST > hostonly
-cmp_ok hostonly - <<__OUT__
-$(sqlite3 $SUITE_RUN_DIR/cylc-suite.db "select host from task_states where name='bar'")
-__OUT__
+suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME \
+        --set=REF_SUITE=$SUITE_NAME_REF
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME_REF
 purge_suite $SUITE_NAME
+#-------------------------------------------------------------------------------
+exit 0
diff --git a/tests/suite-state/options/reference.log b/tests/suite-state/options/reference.log
new file mode 100644
index 0000000..268b8d3
--- /dev/null
+++ b/tests/suite-state/options/reference.log
@@ -0,0 +1,49 @@
+2015-05-27T08:45:11Z INFO - port:7766
+2015-05-27T08:45:11Z INFO - Suite starting at 2015-05-27T08:45:11Z
+2015-05-27T08:45:11Z INFO - Run mode: live
+2015-05-27T08:45:11Z INFO - Initial point: 20100101T0000Z
+2015-05-27T08:45:11Z INFO - Final point: 20100103T0000Z
+2015-05-27T08:45:11Z INFO - Cold Start 20100101T0000Z
+2015-05-27T08:45:11Z INFO - [foo.20100101T0000Z] -job(01) initiate job-submit
+2015-05-27T08:45:11Z INFO - [foo.20100101T0000Z] -triggered off []
+2015-05-27T08:45:12Z INFO - 32068
+
+2015-05-27T08:45:12Z INFO - [foo.20100101T0000Z] -submit_method_id=32068
+2015-05-27T08:45:12Z INFO - [foo.20100101T0000Z] -submission succeeded
+2015-05-27T08:45:13Z INFO - [foo.20100101T0000Z] -(current:submitted)> foo.20100101T0000Z started at 2015-05-27T08:45:12Z
+2015-05-27T08:45:13Z INFO - [foo.20100101T0000Z] -(current:running)> foo.20100101T0000Z succeeded at 2015-05-27T08:45:13Z
+2015-05-27T08:45:14Z INFO - [foo.20100102T0000Z] -job(01) initiate job-submit
+2015-05-27T08:45:14Z INFO - [foo.20100102T0000Z] -triggered off ['foo.20100101T0000Z']
+2015-05-27T08:45:15Z INFO - 32258
+
+2015-05-27T08:45:15Z INFO - [foo.20100102T0000Z] -submit_method_id=32258
+2015-05-27T08:45:15Z INFO - [foo.20100102T0000Z] -submission succeeded
+2015-05-27T08:45:16Z INFO - [foo.20100102T0000Z] -(current:submitted)> foo.20100102T0000Z started at 2015-05-27T08:45:15Z
+2015-05-27T08:45:17Z INFO - [foo.20100102T0000Z] -(current:running)> foo.20100102T0000Z succeeded at 2015-05-27T08:45:16Z
+2015-05-27T08:45:18Z INFO - [foo.20100103T0000Z] -job(01) initiate job-submit
+2015-05-27T08:45:18Z INFO - [foo.20100103T0000Z] -triggered off ['foo.20100102T0000Z']
+2015-05-27T08:45:19Z INFO - 32454
+
+2015-05-27T08:45:19Z INFO - [foo.20100103T0000Z] -submit_method_id=32454
+2015-05-27T08:45:19Z INFO - [foo.20100103T0000Z] -submission succeeded
+2015-05-27T08:45:19Z INFO - [foo.20100104T0000Z] -holding (beyond suite stop point) 20100103T0000Z
+2015-05-27T08:45:19Z INFO - [foo.20100104T0000Z] -waiting => held
+2015-05-27T08:45:20Z INFO - [foo.20100103T0000Z] -(current:submitted)> foo.20100103T0000Z started at 2015-05-27T08:45:19Z
+2015-05-27T08:45:20Z INFO - [foo.20100103T0000Z] -(current:running)> foo.20100103T0000Z succeeded at 2015-05-27T08:45:20Z
+2015-05-27T08:45:21Z INFO - [offset_polling.20100103T0000Z] -job(01) initiate job-submit
+2015-05-27T08:45:21Z INFO - [env_polling.20100103T0000Z] -job(01) initiate job-submit
+2015-05-27T08:45:21Z INFO - [offset_polling.20100103T0000Z] -triggered off ['foo.20100103T0000Z']
+2015-05-27T08:45:21Z INFO - [env_polling.20100103T0000Z] -triggered off ['foo.20100103T0000Z']
+2015-05-27T08:45:22Z INFO - 32594
+
+2015-05-27T08:45:22Z INFO - [offset_polling.20100103T0000Z] -submit_method_id=32594
+2015-05-27T08:45:22Z INFO - [offset_polling.20100103T0000Z] -submission succeeded
+2015-05-27T08:45:22Z INFO - 32595
+
+2015-05-27T08:45:22Z INFO - [env_polling.20100103T0000Z] -submit_method_id=32595
+2015-05-27T08:45:22Z INFO - [env_polling.20100103T0000Z] -submission succeeded
+2015-05-27T08:45:23Z INFO - [offset_polling.20100103T0000Z] -(current:submitted)> offset_polling.20100103T0000Z started at 2015-05-27T08:45:22Z
+2015-05-27T08:45:23Z INFO - [env_polling.20100103T0000Z] -(current:submitted)> env_polling.20100103T0000Z started at 2015-05-27T08:45:22Z
+2015-05-27T08:45:24Z INFO - [offset_polling.20100103T0000Z] -(current:running)> offset_polling.20100103T0000Z succeeded at 2015-05-27T08:45:24Z
+2015-05-27T08:45:24Z INFO - [env_polling.20100103T0000Z] -(current:running)> env_polling.20100103T0000Z succeeded at 2015-05-27T08:45:24Z
+2015-05-27T08:45:25Z INFO - Suite shutting down at 2015-05-27T08:45:25Z
diff --git a/tests/suite-state/options/suite.rc b/tests/suite-state/options/suite.rc
new file mode 100644
index 0000000..a39345c
--- /dev/null
+++ b/tests/suite-state/options/suite.rc
@@ -0,0 +1,22 @@
+#!jinja2
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        live mode suite timeout = PT1M
+[scheduling]
+    initial cycle point = 20100101T00Z
+    final cycle point = 20100103T00Z
+    [[dependencies]]
+        [[[T00]]]
+        graph = "foo[-P1D] => foo"
+        [[[R1/20100103T00Z]]]
+        graph = """foo => env_polling
+                   foo => offset_polling
+                """
+[runtime]
+    [[foo]]
+        command scripting = true
+    [[env_polling]]
+        command scripting = cylc suite-state $CYLC_SUITE_NAME --task=foo --task-point -S succeeded
+    [[offset_polling]]
+        command scripting = cylc suite-state $CYLC_SUITE_NAME --task=foo -p 20100101T0000Z --offset=P1D
diff --git a/tests/suite-state/template/reference.log b/tests/suite-state/template/reference.log
new file mode 100644
index 0000000..564be96
--- /dev/null
+++ b/tests/suite-state/template/reference.log
@@ -0,0 +1,19 @@
+2015-11-02T15:30:50Z INFO - port:7766
+2015-11-02T15:30:50Z INFO - Suite starting at 2015-11-02T15:30:50Z
+2015-11-02T15:30:50Z INFO - Run mode: live
+2015-11-02T15:30:50Z INFO - Initial point: 20100101T0000Z
+2015-11-02T15:30:50Z INFO - Final point: 20110101T0000Z
+2015-11-02T15:30:50Z INFO - Cold Start 20100101T0000Z
+2015-11-02T15:30:50Z INFO - [poll_foo.20100101T0000Z] -triggered off []
+2015-11-02T15:30:51Z INFO - [poll_foo.20100101T0000Z] -submit_method_id=24838
+2015-11-02T15:30:51Z INFO - [poll_foo.20100101T0000Z] -submission succeeded
+2015-11-02T15:30:51Z INFO - [poll_foo.20100101T0000Z] -(current:submitted)> poll_foo.20100101T0000Z started at 2015-11-02T15:30:51Z
+2015-11-02T15:30:52Z INFO - [poll_foo.20110101T0000Z] -triggered off []
+2015-11-02T15:30:52Z INFO - [poll_foo.20100101T0000Z] -(current:running)> poll_foo.20100101T0000Z succeeded at 2015-11-02T15:30:51Z
+2015-11-02T15:30:53Z INFO - [poll_foo.20110101T0000Z] -submit_method_id=24941
+2015-11-02T15:30:53Z INFO - [poll_foo.20110101T0000Z] -submission succeeded
+2015-11-02T15:30:53Z INFO - [poll_foo.20120101T0000Z] -holding (beyond suite stop point) 20110101T0000Z
+2015-11-02T15:30:53Z INFO - [poll_foo.20120101T0000Z] -waiting => held
+2015-11-02T15:30:53Z INFO - [poll_foo.20110101T0000Z] -(current:submitted)> poll_foo.20110101T0000Z started at 2015-11-02T15:30:53Z
+2015-11-02T15:30:54Z INFO - [poll_foo.20110101T0000Z] -(current:running)> poll_foo.20110101T0000Z succeeded at 2015-11-02T15:30:53Z
+2015-11-02T15:30:55Z INFO - Suite shutting down at 2015-11-02T15:30:55Z
diff --git a/tests/suite-state/template/suite.rc b/tests/suite-state/template/suite.rc
new file mode 100644
index 0000000..ffcdc22
--- /dev/null
+++ b/tests/suite-state/template/suite.rc
@@ -0,0 +1,16 @@
+#!jinja2
+[cylc]
+    UTC mode = True
+    [[reference test]]
+        live mode suite timeout = PT1M
+[scheduling]
+    initial cycle point = 20100101T0000Z
+    final cycle point   = 20110101T0000Z
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = poll_foo<{{REF_SUITE}}::foo>
+
+[runtime]
+    [[poll_foo]]
+        [[[suite state polling]]]
+            template = %Y
diff --git a/tests/suite-state/template_ref/reference.log b/tests/suite-state/template_ref/reference.log
new file mode 100644
index 0000000..354bb7b
--- /dev/null
+++ b/tests/suite-state/template_ref/reference.log
@@ -0,0 +1,19 @@
+2015-11-02T15:06:04Z INFO - port:7766
+2015-11-02T15:06:04Z INFO - Suite starting at 2015-11-02T15:06:04Z
+2015-11-02T15:06:04Z INFO - Run mode: live
+2015-11-02T15:06:04Z INFO - Initial point: 2010
+2015-11-02T15:06:04Z INFO - Final point: 2011
+2015-11-02T15:06:04Z INFO - Cold Start 2010
+2015-11-02T15:06:04Z INFO - [foo.2010] -triggered off []
+2015-11-02T15:06:05Z INFO - [foo.2010] -submit_method_id=22643
+2015-11-02T15:06:05Z INFO - [foo.2010] -submission succeeded
+2015-11-02T15:06:05Z INFO - [foo.2010] -(current:submitted)> foo.2010 started at 2015-11-02T15:06:05Z
+2015-11-02T15:06:06Z INFO - [foo.2011] -triggered off []
+2015-11-02T15:06:06Z INFO - [foo.2010] -(current:running)> foo.2010 succeeded at 2015-11-02T15:06:05Z
+2015-11-02T15:06:07Z INFO - [foo.2011] -submit_method_id=22741
+2015-11-02T15:06:07Z INFO - [foo.2011] -submission succeeded
+2015-11-02T15:06:07Z INFO - [foo.2012] -holding (beyond suite stop point) 2011
+2015-11-02T15:06:07Z INFO - [foo.2012] -waiting => held
+2015-11-02T15:06:07Z INFO - [foo.2011] -(current:submitted)> foo.2011 started at 2015-11-02T15:06:07Z
+2015-11-02T15:06:08Z INFO - [foo.2011] -(current:running)> foo.2011 succeeded at 2015-11-02T15:06:07Z
+2015-11-02T15:06:09Z INFO - Suite shutting down at 2015-11-02T15:06:09Z
diff --git a/tests/suite-state/template_ref/suite.rc b/tests/suite-state/template_ref/suite.rc
new file mode 100644
index 0000000..2884903
--- /dev/null
+++ b/tests/suite-state/template_ref/suite.rc
@@ -0,0 +1,16 @@
+[cylc]
+    UTC mode = True
+    cycle point format = %Y
+    [[reference test]]
+        live mode suite timeout = PT1M
+
+[scheduling]
+    initial cycle point = 2010
+    final cycle point   = 2011
+    [[dependencies]]
+        [[[P1Y]]]
+            graph = foo
+
+[runtime]
+    [[foo]]
+        script = true
diff --git a/tests/vacation/00-sigusr1.t b/tests/vacation/00-sigusr1.t
index 9b886ba..cbb145b 100755
--- a/tests/vacation/00-sigusr1.t
+++ b/tests/vacation/00-sigusr1.t
@@ -19,68 +19,74 @@
 # Obviously, job vacation does not happen with background job, and the job
 # will no longer be poll-able after the kill.
 . $(dirname $0)/test_header
-#-------------------------------------------------------------------------------
-set_test_number 6
-install_suite $TEST_NAME_BASE $TEST_NAME_BASE
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test $SUITE_NAME
-#-------------------------------------------------------------------------------
-SUITE_RUN_DIR=$(cylc get-global-config --print-run-dir)/$SUITE_NAME
 
-# Make sure t1.1.1's status file is in place
-T1_STATUS_FILE=$SUITE_RUN_DIR/log/job/1/t1/01/job.status
+run_tests() {
+    set_test_number 6
+    install_suite $TEST_NAME_BASE $TEST_NAME_BASE
+    TEST_NAME=$TEST_NAME_BASE-validate
+    run_ok $TEST_NAME cylc validate $SUITE_NAME
+    TEST_NAME=$TEST_NAME_BASE-run
+    suite_run_ok $TEST_NAME cylc run --reference-test $SUITE_NAME
+    SUITE_RUN_DIR=$(cylc get-global-config --print-run-dir)/$SUITE_NAME
 
-poll '!' test -e "${T1_STATUS_FILE}"
-poll '!' grep 'CYLC_JOB_PID=' "${T1_STATUS_FILE}"
+    # Make sure t1.1.1's status file is in place
+    T1_STATUS_FILE=$SUITE_RUN_DIR/log/job/1/t1/01/job.status
 
-# Kill the job and see what happens
-T1_PID=$(awk -F= '$1=="CYLC_JOB_PID" {print $2}' "${T1_STATUS_FILE}")
-kill -s USR1 $T1_PID
-while ps $T1_PID 1>/dev/null 2>&1; do
-    sleep 1
-done
-exists_fail $T1_STATUS_FILE
-TIMEOUT=$(($(date +%s) + 120))
-while ! grep -q 'Task job script vacated by signal USR1' \
-            $SUITE_RUN_DIR/log/suite/log \
-        && (($TIMEOUT > $(date +%s)))
-do
-    sleep 1
-done
-TIMEOUT=$(($(date +%s) + 10))
-while ! sqlite3 $SUITE_RUN_DIR/cylc-suite.db \
-            'SELECT status FROM task_states WHERE name=="t1";' \
-            >"$TEST_NAME-db-t1" 2>/dev/null \
-        && (($TIMEOUT > $(date +%s)))
-do
-    sleep 1
-done
-cmp_ok "$TEST_NAME-db-t1" - <<<'submitted'
-# Start the job again and see what happens
-mkdir -p $SUITE_RUN_DIR/work/1/t1/
-touch $SUITE_RUN_DIR/work/1/t1/file # Allow t1 to complete
-$SUITE_RUN_DIR/log/job/1/t1/01/job </dev/null >/dev/null 2>&1 &
-# Wait for suite to complete
-TIMEOUT=$(($(date +%s) + 120))
-while [[ -f ~/.cylc/ports/$SUITE_NAME ]] && (($TIMEOUT > $(date +%s))); do
-    sleep 1
-done
-# Test t1 status in DB
-sqlite3 $SUITE_RUN_DIR/cylc-suite.db \
-    'SELECT status FROM task_states WHERE name=="t1";' >"$TEST_NAME-db-t1"
-cmp_ok "$TEST_NAME-db-t1" - <<<'succeeded'
-# Test reference
-TIMEOUT=$(($(date +%s) + 120))
-while ! grep -q 'DONE' $SUITE_RUN_DIR/log/suite/out \
-        && (($TIMEOUT > $(date +%s)))
-do
-    sleep 1
-done
-grep_ok 'SUITE REFERENCE TEST PASSED' $SUITE_RUN_DIR/log/suite/out
-#-------------------------------------------------------------------------------
-purge_suite $SUITE_NAME
-exit
+    poll '!' test -e "${T1_STATUS_FILE}"
+    poll '!' grep 'CYLC_JOB_PID=' "${T1_STATUS_FILE}"
+
+    # Kill the job and see what happens
+    T1_PID=$(awk -F= '$1=="CYLC_JOB_PID" {print $2}' "${T1_STATUS_FILE}")
+    kill -s USR1 $T1_PID
+    while ps $T1_PID 1>/dev/null 2>&1; do
+        sleep 1
+    done
+    run_fail "${T1_STATUS_FILE}" grep -q '^CYLC_JOB' "${T1_STATUS_FILE}"
+    TIMEOUT=$(($(date +%s) + 120))
+    while ! grep -q 'Task job script vacated by signal USR1' \
+                $SUITE_RUN_DIR/log/suite/log \
+            && (($TIMEOUT > $(date +%s)))
+    do
+        sleep 1
+    done
+    TIMEOUT=$(($(date +%s) + 10))
+    while ! sqlite3 $SUITE_RUN_DIR/cylc-suite.db \
+                'SELECT status FROM task_states WHERE name=="t1";' \
+                >"$TEST_NAME-db-t1" 2>/dev/null \
+            && (($TIMEOUT > $(date +%s)))
+    do
+        sleep 1
+    done
+    grep_ok "^\(submitted\|running\)$" "$TEST_NAME-db-t1"
+    # Start the job again and see what happens
+    mkdir -p $SUITE_RUN_DIR/work/1/t1/
+    touch $SUITE_RUN_DIR/work/1/t1/file # Allow t1 to complete
+    $SUITE_RUN_DIR/log/job/1/t1/01/job </dev/null >/dev/null 2>&1 &
+    # Wait for suite to complete
+    TIMEOUT=$(($(date +%s) + 120))
+    while [[ -f ~/.cylc/ports/$SUITE_NAME ]] && (($TIMEOUT > $(date +%s))); do
+        sleep 1
+    done
+    # Test t1 status in DB
+    sqlite3 $SUITE_RUN_DIR/cylc-suite.db \
+        'SELECT status FROM task_states WHERE name=="t1";' >"$TEST_NAME-db-t1"
+    cmp_ok "$TEST_NAME-db-t1" - <<<'succeeded'
+    # Test reference
+    TIMEOUT=$(($(date +%s) + 120))
+    while ! grep -q 'DONE' $SUITE_RUN_DIR/log/suite/out \
+            && (($TIMEOUT > $(date +%s)))
+    do
+        sleep 1
+    done
+    grep_ok 'SUITE REFERENCE TEST PASSED' $SUITE_RUN_DIR/log/suite/out
+    purge_suite $SUITE_NAME
+    exit
+}
+
+# Programs running in some environment is unable to trap SIGUSR1. E.g.:
+# An environment documented in this comment:
+# https://github.com/cylc/cylc/pull/1648#issuecomment-149348410
+trap 'run_tests' 'SIGUSR1'
+kill -s 'SIGUSR1' "$$"
+sleep 1
+skip_all 'Program not receiving SIGUSR1'
diff --git a/tests/vacation/01-loadleveler.t b/tests/vacation/01-loadleveler.t
index 7a06da9..ae096e1 100755
--- a/tests/vacation/01-loadleveler.t
+++ b/tests/vacation/01-loadleveler.t
@@ -21,15 +21,15 @@
 # require a site admin to pre-empt a job.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
+RC_PREV="[test battery][batch systems][loadleveler]"
 export CYLC_TEST_HOST=$( \
-    cylc get-global-config -i '[test battery][batch systems][loadleveler]host')
+    cylc get-global-config -i "${RC_PREV}host" 2>'/dev/null')
 if [[ -z $CYLC_TEST_HOST ]]; then
-    skip_all '[test battery][batch systems][loadleveler]host: not defined'
+    skip_all '"[test battery][batch systems][loadleveler]host": not defined'
 fi
 set_test_number 6
 export CYLC_TEST_DIRECTIVES=$( \
-    cylc get-global-config \
-    -i '[test battery][batch systems][loadleveler][directives]')
+    cylc get-global-config -i "${RC_PREV}[directives]" 2>'/dev/null')
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 set -eu
 if [[ $CYLC_TEST_HOST != 'localhost' ]]; then
diff --git a/tests/validate/09-include-missing.t b/tests/validate/09-include-missing.t
index 269674b..dfafb39 100755
--- a/tests/validate/09-include-missing.t
+++ b/tests/validate/09-include-missing.t
@@ -23,9 +23,8 @@ echo '%include foo.rc' >suite.rc
 echo '%include bar.rc' >foo.rc
 run_fail "$TEST_NAME_BASE" cylc validate suite.rc
 cmp_ok "$TEST_NAME_BASE.stderr" <<__ERR__
-ParseError: File not found: $PWD/bar.rc
-   via $PWD/foo.rc
-   via $PWD/suite.rc
+FileParseError:
+Include-file not found: bar.rc via foo.rc from $PWD/suite.rc
 __ERR__
 #-------------------------------------------------------------------------------
 exit
diff --git a/tests/validate/10-bad-sequence-interval.t b/tests/validate/10-bad-sequence-interval.t
index d45d303..5ce5513 100755
--- a/tests/validate/10-bad-sequence-interval.t
+++ b/tests/validate/10-bad-sequence-interval.t
@@ -26,4 +26,5 @@ run_fail "$TEST_NAME_BASE" cylc validate suite.rc
 grep_ok "Invalid ISO 8601 duration representation: PT5D" \
     "$TEST_NAME_BASE.stderr"
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/11-bad-sequence-2-digit-century.t b/tests/validate/11-bad-sequence-2-digit-century.t
index 0f0dcef..ad800eb 100644
--- a/tests/validate/11-bad-sequence-2-digit-century.t
+++ b/tests/validate/11-bad-sequence-2-digit-century.t
@@ -25,4 +25,5 @@ TEST_NAME=$TEST_NAME_BASE-val
 run_fail "$TEST_NAME" cylc validate suite.rc
 grep_ok "2 digit centuries not allowed" "$TEST_NAME.stderr"
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/11-fail-mixed-syntax-formats-1.t b/tests/validate/11-fail-mixed-syntax-formats-1.t
index 64d07c4..42f87c9 100755
--- a/tests/validate/11-fail-mixed-syntax-formats-1.t
+++ b/tests/validate/11-fail-mixed-syntax-formats-1.t
@@ -30,4 +30,5 @@ vs pre-cylc-6 syntax \
 (\[scheduling\]\[\[dependencies\]\]\[\[\[0\]\]\]: old-style cycling)" \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/12-fail-mixed-syntax-formats-2.t b/tests/validate/12-fail-mixed-syntax-formats-2.t
index 78a7f77..ea289ef 100755
--- a/tests/validate/12-fail-mixed-syntax-formats-2.t
+++ b/tests/validate/12-fail-mixed-syntax-formats-2.t
@@ -30,4 +30,5 @@ vs pre-cylc-6 syntax \
 (graphnode foo\[T-24\]: old-style offset)" \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/13-fail-mixed-syntax-formats-3.t b/tests/validate/13-fail-mixed-syntax-formats-3.t
index 31e913d..8207396 100644
--- a/tests/validate/13-fail-mixed-syntax-formats-3.t
+++ b/tests/validate/13-fail-mixed-syntax-formats-3.t
@@ -30,4 +30,5 @@ vs pre-cylc-6 syntax \
 (integer interval: \[runtime\]\[root\]\[event hooks\]execution timeout = 3)" \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/14-fail-mixed-syntax-formats-4.t b/tests/validate/14-fail-mixed-syntax-formats-4.t
index 63019c1..17505c8 100755
--- a/tests/validate/14-fail-mixed-syntax-formats-4.t
+++ b/tests/validate/14-fail-mixed-syntax-formats-4.t
@@ -30,4 +30,5 @@ vs post-cylc-6 syntax \
 (\[scheduling\]\[\[dependencies\]\]\[\[\[T12\]\]\]: ISO 8601-style cycling)" \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/15-fail-mixed-syntax-formats-5.t b/tests/validate/15-fail-mixed-syntax-formats-5.t
index 921b03e..a3949e3 100644
--- a/tests/validate/15-fail-mixed-syntax-formats-5.t
+++ b/tests/validate/15-fail-mixed-syntax-formats-5.t
@@ -29,4 +29,5 @@ grep_ok "Conflicting syntax: pre-cylc-6 syntax \
 vs post-cylc-6 syntax \
 (graphnode: foo\[-PT24H\]: ISO 8601 offset)" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/16-fail-mixed-syntax-formats-6.t b/tests/validate/16-fail-mixed-syntax-formats-6.t
index 36beaa9..dfc47c4 100644
--- a/tests/validate/16-fail-mixed-syntax-formats-6.t
+++ b/tests/validate/16-fail-mixed-syntax-formats-6.t
@@ -29,4 +29,5 @@ grep_ok "Conflicting syntax: post-cylc-6 syntax \
 vs pre-cylc-6 syntax \
 (initial/final cycle point format: CCYYMMDDhh)" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/17-fail-mixed-syntax-formats-7.t b/tests/validate/17-fail-mixed-syntax-formats-7.t
index 1975e09..ae7d3d0 100644
--- a/tests/validate/17-fail-mixed-syntax-formats-7.t
+++ b/tests/validate/17-fail-mixed-syntax-formats-7.t
@@ -29,4 +29,5 @@ grep_ok "Conflicting syntax: pre-cylc-6 syntax \
 vs post-cylc-6 syntax \
 (graphnode: cold_foo\[^T00\]: ISO 8601 offset)" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/18-fail-mixed-syntax-formats-8.t b/tests/validate/18-fail-mixed-syntax-formats-8.t
index 0a84bbe..1b054c0 100644
--- a/tests/validate/18-fail-mixed-syntax-formats-8.t
+++ b/tests/validate/18-fail-mixed-syntax-formats-8.t
@@ -29,4 +29,5 @@ grep_ok "Conflicting syntax: post-cylc-6 syntax \
 vs pre-cylc-6 syntax \
 (start-up tasks: cold_foo)" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/19-fail-mixed-syntax-formats-9.t b/tests/validate/19-fail-mixed-syntax-formats-9.t
index 73f6806..30a9bed 100644
--- a/tests/validate/19-fail-mixed-syntax-formats-9.t
+++ b/tests/validate/19-fail-mixed-syntax-formats-9.t
@@ -30,4 +30,5 @@ vs pre-cylc-6 syntax \
 (\[scheduling\]\[\[dependencies\]\]graph: mixed with date-time cycling)" \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/20-fail-no-scheduling.t b/tests/validate/20-fail-no-scheduling.t
index f89bb99..7c20da6 100644
--- a/tests/validate/20-fail-no-scheduling.t
+++ b/tests/validate/20-fail-no-scheduling.t
@@ -24,6 +24,7 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
-grep_ok "No suite dependency graph defined\." $TEST_NAME.stderr
+grep_ok "missing \[scheduling\] section" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/21-fail-no-dependencies.t b/tests/validate/21-fail-no-dependencies.t
index f89bb99..0489948 100644
--- a/tests/validate/21-fail-no-dependencies.t
+++ b/tests/validate/21-fail-no-dependencies.t
@@ -26,4 +26,5 @@ TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
 grep_ok "No suite dependency graph defined\." $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/21-fail-no-dependencies/suite.rc b/tests/validate/21-fail-no-dependencies/suite.rc
index d7fc7cc..525ce34 100644
--- a/tests/validate/21-fail-no-dependencies/suite.rc
+++ b/tests/validate/21-fail-no-dependencies/suite.rc
@@ -1 +1,2 @@
 [scheduling]
+    [[dependencies]]
diff --git a/tests/validate/22-fail-no-graph-async.t b/tests/validate/22-fail-no-graph-async.t
index f89bb99..0489948 100644
--- a/tests/validate/22-fail-no-graph-async.t
+++ b/tests/validate/22-fail-no-graph-async.t
@@ -26,4 +26,5 @@ TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
 grep_ok "No suite dependency graph defined\." $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/23-fail-no-graph-sequence.t b/tests/validate/23-fail-no-graph-sequence.t
index f89bb99..0489948 100644
--- a/tests/validate/23-fail-no-graph-sequence.t
+++ b/tests/validate/23-fail-no-graph-sequence.t
@@ -26,4 +26,5 @@ TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
 grep_ok "No suite dependency graph defined\." $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/24-fail-year-bounds.t b/tests/validate/24-fail-year-bounds.t
index e2359d2..ba2a446 100644
--- a/tests/validate/24-fail-year-bounds.t
+++ b/tests/validate/24-fail-year-bounds.t
@@ -27,4 +27,5 @@ run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
 grep_ok "incompatible with \[cylc\]cycle point num expanded year digits = 0" \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/25-fail-mixed-syntax-formats-10.t b/tests/validate/25-fail-mixed-syntax-formats-10.t
index 70fcf21..d01182d 100644
--- a/tests/validate/25-fail-mixed-syntax-formats-10.t
+++ b/tests/validate/25-fail-mixed-syntax-formats-10.t
@@ -30,4 +30,5 @@ vs post-cylc-6 syntax \
 (ISO 8601 interval: \[runtime\]\[A\]retry delays = PT30M)" \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/26-fail-initial-greater-final.t b/tests/validate/26-fail-initial-greater-final.t
index e9f2e7e..a42b57d 100644
--- a/tests/validate/26-fail-initial-greater-final.t
+++ b/tests/validate/26-fail-initial-greater-final.t
@@ -27,4 +27,5 @@ run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
 grep_ok "The initial cycle point:20141208T0000Z is after the final cycle \
 point:20141207T0000Z."    $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/28-fail-graph-double-ampsand/suite.rc b/tests/validate/28-fail-graph-double-ampsand/suite.rc
deleted file mode 100644
index 6bbf064..0000000
--- a/tests/validate/28-fail-graph-double-ampsand/suite.rc
+++ /dev/null
@@ -1,14 +0,0 @@
-[scheduling]
-    [[dependencies]]
-            graph = """
-                    foo && bar => baz
-            """
-[runtime]
-    [[foo]] 
-        script = echo "foo"
-        
-    [[bar]]
-        script = echo "bar"
-
-    [[baz]]
-        script = echo "baz"
diff --git a/tests/graph-equivalence/00-oneline.t b/tests/validate/28-fail-graph-double-conditionals.t
similarity index 57%
copy from tests/graph-equivalence/00-oneline.t
copy to tests/validate/28-fail-graph-double-conditionals.t
index ff0eacf..deeae2e 100644
--- a/tests/graph-equivalence/00-oneline.t
+++ b/tests/validate/28-fail-graph-double-conditionals.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,33 +15,40 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test graph="a=>b=>c" gives the same result as
-#      graph = """a => b\
-#                   => c"""
+# Test validation fails '&&' and '||' in the graph.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
-set_test_number 5
-#-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE test1
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
-run_ok $TEST_NAME cylc validate $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-suite_run_ok $TEST_NAME cylc run --reference-test --debug $SUITE_NAME
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-check-a
-cylc run $SUITE_NAME --hold
-cylc show $SUITE_NAME a.1 | sed -n "/prerequisites/,/outputs/p" > a-prereqs
-cmp_ok $TEST_SOURCE_DIR/splitline_refs/a-ref a-prereqs
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-check-b
-cylc show $SUITE_NAME b.1 | sed -n "/prerequisites/,/outputs/p" > b-prereqs
-cmp_ok $TEST_SOURCE_DIR/splitline_refs/b-ref b-prereqs
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-check-c
-cylc show $SUITE_NAME c.1 | sed -n "/prerequisites/,/outputs/p" > c-prereqs
-cmp_ok $TEST_SOURCE_DIR/splitline_refs/c-ref c-prereqs
-#-------------------------------------------------------------------------------
-cylc shutdown $SUITE_NAME --now -f
-purge_suite $SUITE_NAME
+set_test_number 8
+#-------------------------------------------------------------------------------
+cat > suite.rc <<__END__
+[scheduling]
+    [[dependencies]]
+        graph = foo && bar => baz
+__END__
+#-------------------------------------------------------------------------------
+TEST_NAME=${TEST_NAME_BASE}-async-and
+run_fail $TEST_NAME cylc validate --debug -v suite.rc
+grep_ok "ERROR: the graph AND operator is '&': " $TEST_NAME.stderr
+#-------------------------------------------------------------------------------
+TEST_NAME=${TEST_NAME_BASE}-async-or
+sed -i -e 's/&&/||/' suite.rc
+run_fail $TEST_NAME cylc validate --debug -v suite.rc
+grep_ok "ERROR: the graph OR operator is '|': " $TEST_NAME.stderr
+#-------------------------------------------------------------------------------
+cat > suite.rc <<__END__
+[scheduling]
+    initial cycle point = 2015
+    [[dependencies]]
+        [[[R1]]]
+            graph = foo && bar => baz
+__END__
+#-------------------------------------------------------------------------------
+TEST_NAME=${TEST_NAME_BASE}-cycling-and
+run_fail $TEST_NAME cylc validate --debug -v suite.rc
+grep_ok "ERROR: the graph AND operator is '&': " $TEST_NAME.stderr
+#-------------------------------------------------------------------------------
+TEST_NAME=${TEST_NAME_BASE}-cycling-or
+sed -i -e 's/&&/||/' suite.rc
+run_fail $TEST_NAME cylc validate --debug -v suite.rc
+grep_ok "ERROR: the graph OR operator is '|': " $TEST_NAME.stderr
+
diff --git a/tests/validate/30-fail-max-active-cycle-points-zero.t b/tests/validate/30-fail-max-active-cycle-points-zero.t
index 5a582a9..f84efcb 100644
--- a/tests/validate/30-fail-max-active-cycle-points-zero.t
+++ b/tests/validate/30-fail-max-active-cycle-points-zero.t
@@ -26,4 +26,5 @@ TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
 grep_ok "ERROR: max cycle points must be greater than 0" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/32-fail-not-integer.t b/tests/validate/32-fail-not-integer.t
index ec3d490..8740b91 100644
--- a/tests/validate/32-fail-not-integer.t
+++ b/tests/validate/32-fail-not-integer.t
@@ -26,4 +26,5 @@ TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
 grep_ok "Conflicting syntax: integer vs cycling suite" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/33-fail-graph-bracket-missing.t b/tests/validate/33-fail-graph-bracket-missing.t
index 7fc92be..84b1c75 100644
--- a/tests/validate/33-fail-graph-bracket-missing.t
+++ b/tests/validate/33-fail-graph-bracket-missing.t
@@ -27,4 +27,5 @@ TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
 grep_ok "ERROR: missing bracket in: " $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/28-fail-graph-double-ampsand.t b/tests/validate/34-fail-graph-cycles.t
similarity index 85%
copy from tests/validate/28-fail-graph-double-ampsand.t
copy to tests/validate/34-fail-graph-cycles.t
index 71f5d10..3bc24c7 100644
--- a/tests/validate/28-fail-graph-double-ampsand.t
+++ b/tests/validate/34-fail-graph-cycles.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation with && in the graph.
+# Test validation of a suite with cyclic dependendence fails.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,7 +23,8 @@ set_test_number 2
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE
-run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
-grep_ok "ERROR: Illegal '&&' in 'graph' at" $TEST_NAME.stderr
+run_fail $TEST_NAME cylc validate --debug -v $SUITE_NAME
+grep_ok "ERROR: cyclic dependence detected" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/34-fail-graph-cycles/suite.rc b/tests/validate/34-fail-graph-cycles/suite.rc
new file mode 100644
index 0000000..c5a0e4f
--- /dev/null
+++ b/tests/validate/34-fail-graph-cycles/suite.rc
@@ -0,0 +1,4 @@
+title = A suite with cyclic dependence.
+[scheduling]
+    [[dependencies]]
+        graph = a => b => c => d => a => z
diff --git a/tests/validate/28-fail-graph-double-ampsand.t b/tests/validate/35-fail-self-edges.t
similarity index 90%
rename from tests/validate/28-fail-graph-double-ampsand.t
rename to tests/validate/35-fail-self-edges.t
index 71f5d10..fb8f4a2 100644
--- a/tests/validate/28-fail-graph-double-ampsand.t
+++ b/tests/validate/35-fail-self-edges.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation with && in the graph.
+# Test validation of a suite with self-edges fails.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -24,6 +24,7 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
-grep_ok "ERROR: Illegal '&&' in 'graph' at" $TEST_NAME.stderr
+grep_ok "ERROR, self-edge detected:" $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/35-fail-self-edges/suite.rc b/tests/validate/35-fail-self-edges/suite.rc
new file mode 100644
index 0000000..6ef6f0c
--- /dev/null
+++ b/tests/validate/35-fail-self-edges/suite.rc
@@ -0,0 +1,8 @@
+title = A suite with self-edges.
+[scheduling]
+    [[dependencies]]
+        graph = FAM:succeed-all => f & g => z
+[runtime]
+    [[FAM]]
+    [[f,g,h]]
+       inherit = FAM
diff --git a/tests/events/00-suite.t b/tests/validate/36-pass-special-tasks-non-word-names.t
old mode 100644
new mode 100755
similarity index 63%
copy from tests/events/00-suite.t
copy to tests/validate/36-pass-special-tasks-non-word-names.t
index d9dcf9b..6166518
--- a/tests/events/00-suite.t
+++ b/tests/validate/36-pass-special-tasks-non-word-names.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,16 +15,27 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Test validation of special tasks names with non-word characters
 . "$(dirname "$0")/test_header"
-set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
-
-run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+set_test_number 1
+cat >'suite.rc' <<'__SUITE_RC__'
+[scheduling]
+    initial cycle point = 20200202
+    final cycle point = 20300303
+    [[special tasks]]
+        clock-triggered = t-1, t+1, t%1, t at 1
+    [[dependencies]]
+        [[[P1D]]]
+            graph = """
+t-1
+t+1
+t%1
+t at 1
+"""
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+[runtime]
+    [[t-1, t+1, t%1, t at 1]]
+        script = true
+__SUITE_RC__
+run_ok "${TEST_NAME_BASE}" cylc validate --strict "${PWD}/suite.rc"
 exit
diff --git a/tests/validate/30-fail-max-active-cycle-points-zero.t b/tests/validate/37-fail-double-runahead.t
similarity index 81%
copy from tests/validate/30-fail-max-active-cycle-points-zero.t
copy to tests/validate/37-fail-double-runahead.t
index 5a582a9..cbd2013 100644
--- a/tests/validate/30-fail-max-active-cycle-points-zero.t
+++ b/tests/validate/37-fail-double-runahead.t
@@ -15,7 +15,8 @@
 # You should have received a copy of the GNU General Public License
 # along with this program. If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation with max cycle points = 0 in schelduling.
+# Test validation catches use of 'runahead limit' and 'max active cycle points'
+# which are mutually exclusive.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,7 +24,9 @@ set_test_number 2
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE
-run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
-grep_ok "ERROR: max cycle points must be greater than 0" $TEST_NAME.stderr
+run_fail $TEST_NAME cylc validate --debug -v $SUITE_NAME
+grep_ok "ERROR: use 'runahead limit' OR 'max active cycle points', not both" \
+  $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/37-fail-double-runahead/suite.rc b/tests/validate/37-fail-double-runahead/suite.rc
new file mode 100644
index 0000000..369b3ca
--- /dev/null
+++ b/tests/validate/37-fail-double-runahead/suite.rc
@@ -0,0 +1,7 @@
+[scheduling]
+    initial cycle point = 2015-01-01
+    runahead limit = P2D
+    max active cycle points = 2
+    [[dependencies]]
+        [[[P1D]]]
+            graph = foo
diff --git a/tests/events/00-suite.t b/tests/validate/38-clock-trigger-task-not-defined.t
old mode 100644
new mode 100755
similarity index 63%
copy from tests/events/00-suite.t
copy to tests/validate/38-clock-trigger-task-not-defined.t
index d9dcf9b..6e47a88
--- a/tests/events/00-suite.t
+++ b/tests/validate/38-clock-trigger-task-not-defined.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,16 +15,23 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Test validation of special tasks names with non-word characters
 . "$(dirname "$0")/test_header"
 set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
-
-run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
-
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+cat >'suite.rc' <<'__SUITE_RC__'
+[scheduling]
+    initial cycle point = 20200101
+    [[special tasks]]
+        clock-trigger = foo(PT0M)
+    [[dependencies]]
+        [[[T00]]]
+            graph = bar
+[runtime]
+    [[bar]]
+        script = true
+__SUITE_RC__
+run_fail "${TEST_NAME_BASE}" cylc validate --strict "${PWD}/suite.rc"
+cmp_ok "${TEST_NAME_BASE}.stderr" <<'__ERR__'
+'ERROR: clock-trigger task "foo" is not defined.'
+__ERR__
 exit
diff --git a/tests/validate/24-fail-year-bounds.t b/tests/validate/39-degenerate-point-format.t
old mode 100644
new mode 100755
similarity index 91%
copy from tests/validate/24-fail-year-bounds.t
copy to tests/validate/39-degenerate-point-format.t
index e2359d2..2fc9eac
--- a/tests/validate/24-fail-year-bounds.t
+++ b/tests/validate/39-degenerate-point-format.t
@@ -24,7 +24,8 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE
 run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
-grep_ok "incompatible with \[cylc\]cycle point num expanded year digits = 0" \
+grep_ok "Sequence R/2015-08/P1D, point format %Y-%m: equal adjacent points: 2015-08 => 2015-08." \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/39-degenerate-point-format/suite.rc b/tests/validate/39-degenerate-point-format/suite.rc
new file mode 100644
index 0000000..4c1bfa8
--- /dev/null
+++ b/tests/validate/39-degenerate-point-format/suite.rc
@@ -0,0 +1,7 @@
+[cylc]
+    cycle point format = %Y-%m
+[scheduling]
+    initial cycle point = 2015-08
+    [[dependencies]]
+        [[[P1D]]]
+            graph = foo
diff --git a/tests/events/00-suite.t b/tests/validate/40-fail-suicide-left.t
old mode 100644
new mode 100755
similarity index 67%
copy from tests/events/00-suite.t
copy to tests/validate/40-fail-suicide-left.t
index d9dcf9b..05ce35a
--- a/tests/events/00-suite.t
+++ b/tests/validate/40-fail-suicide-left.t
@@ -15,16 +15,21 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Test validatation error message, LHS suicide.
 . "$(dirname "$0")/test_header"
 set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
 
-run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+cat >'suite.rc' <<'__SUITE_RC__'
+[scheduling]
+    [[dependencies]]
+        graph = """!dont-kill-me => no-problem"""
+[runtime]
+    [[dont-kill-me, no-problem]]
+__SUITE_RC__
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+run_fail "${TEST_NAME_BASE}" cylc validate 'suite.rc'
+cmp_ok "${TEST_NAME_BASE}.stderr" <<'__ERR__'
+!dont-kill-me => no-problem
+'ERROR: suicide must be on the right of a trigger (!dont-kill-me)'
+__ERR__
 exit
diff --git a/tests/events/00-suite.t b/tests/validate/41-mixed-syntax-global-suite.t
old mode 100644
new mode 100755
similarity index 51%
copy from tests/events/00-suite.t
copy to tests/validate/41-mixed-syntax-global-suite.t
index d9dcf9b..acdb938
--- a/tests/events/00-suite.t
+++ b/tests/validate/41-mixed-syntax-global-suite.t
@@ -15,16 +15,45 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Validate and run the events/suite test suite
+# Test validatation, global.rc and suite.rc with opposing syntax.
 . "$(dirname "$0")/test_header"
 set_test_number 2
-install_suite "${TEST_NAME_BASE}" 'suite'
 
-run_ok "${TEST_NAME_BASE}-validate" cylc validate "${SUITE_NAME}"
-suite_run_ok "${TEST_NAME_BASE}-run" \
-    cylc run --reference-test --debug "${SUITE_NAME}"
+cat >'global.rc' <<'__GLOBAL_RC__'
+[cylc]
+    [[event hooks]]
+        timeout = P1D
+__GLOBAL_RC__
 
-for SUFFIX in '' '-shutdown' '-startup' '-timeout'; do
-    purge_suite "${SUITE_NAME}${SUFFIX}"
-done
+cat >'suite.rc' <<'__SUITE_RC__'
+[scheduling]
+    [[dependencies]]
+        graph = t0
+[runtime]
+    [[t0]]
+        script = true
+        [[[events]]]
+            execution timeout = 10
+__SUITE_RC__
+
+CYLC_CONF_PATH="${PWD}" run_ok "${TEST_NAME_BASE}" cylc validate 'suite.rc'
+
+cat >'global.rc' <<'__GLOBAL_RC__'
+[cylc]
+    [[event hooks]]
+        timeout = 1440
+__GLOBAL_RC__
+
+cat >'suite.rc' <<'__SUITE_RC__'
+[scheduling]
+    [[dependencies]]
+        graph = t0
+[runtime]
+    [[t0]]
+        script = true
+        [[[events]]]
+            execution timeout = PT10M
+__SUITE_RC__
+
+CYLC_CONF_PATH="${PWD}" run_ok "${TEST_NAME_BASE}" cylc validate 'suite.rc'
 exit
diff --git a/tests/validate/11-bad-sequence-2-digit-century.t b/tests/validate/42-jinja2-template-syntax-error-main.t
similarity index 71%
copy from tests/validate/11-bad-sequence-2-digit-century.t
copy to tests/validate/42-jinja2-template-syntax-error-main.t
index 0f0dcef..88b05b0 100644
--- a/tests/validate/11-bad-sequence-2-digit-century.t
+++ b/tests/validate/42-jinja2-template-syntax-error-main.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation for a bad sequence interval.
+# Test validation for a bad Jinja2 TemplateSyntaxError suite.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,6 +23,16 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-val
 run_fail "$TEST_NAME" cylc validate suite.rc
-grep_ok "2 digit centuries not allowed" "$TEST_NAME.stderr"
+cmp_ok "$TEST_NAME.stderr" <<'__ERROR__'
+Jinja2Error:
+  File "<unknown>", line 6, in template
+TemplateSyntaxError: Encountered unknown tag 'end'. Jinja was looking for the following tags: 'elif' or 'else' or 'endif'. The innermost block that needs to be closed is 'if'.
+Context lines:
+    [[dependencies]]
+        {% if true %}
+        graph = foo
+        {% end if %	<-- Jinja2Error
+__ERROR__
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/42-jinja2-template-syntax-error-main/suite.rc b/tests/validate/42-jinja2-template-syntax-error-main/suite.rc
new file mode 100644
index 0000000..d58304e
--- /dev/null
+++ b/tests/validate/42-jinja2-template-syntax-error-main/suite.rc
@@ -0,0 +1,10 @@
+#!jinja2
+
+[scheduling]
+    [[dependencies]]
+        {% if true %}
+        graph = foo
+        {% end if %
+[runtime]
+    [[foo]]
+        script = sleep 1
diff --git a/tests/validate/11-bad-sequence-2-digit-century.t b/tests/validate/43-jinja2-template-syntax-error-cylc-include.t
similarity index 70%
copy from tests/validate/11-bad-sequence-2-digit-century.t
copy to tests/validate/43-jinja2-template-syntax-error-cylc-include.t
index 0f0dcef..a607110 100644
--- a/tests/validate/11-bad-sequence-2-digit-century.t
+++ b/tests/validate/43-jinja2-template-syntax-error-cylc-include.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation for a bad sequence interval.
+# Test validation for a bad Jinja2 TemplateSyntaxError in a suite cylc include.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,6 +23,16 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-val
 run_fail "$TEST_NAME" cylc validate suite.rc
-grep_ok "2 digit centuries not allowed" "$TEST_NAME.stderr"
+cmp_ok "$TEST_NAME.stderr" <<'__ERROR__'
+Jinja2Error:
+  File "<unknown>", line 7, in template
+TemplateSyntaxError: Encountered unknown tag 'end'. Jinja was looking for the following tags: 'elif' or 'else' or 'endif'. The innermost block that needs to be closed is 'if'.
+Context lines:
+# This is a bit of graph configuration.
+        {% if true %}
+        graph = foo
+        {% end if %	<-- Jinja2Error
+__ERROR__
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/43-jinja2-template-syntax-error-cylc-include/suite-includeme.rc b/tests/validate/43-jinja2-template-syntax-error-cylc-include/suite-includeme.rc
new file mode 100644
index 0000000..b96dc3c
--- /dev/null
+++ b/tests/validate/43-jinja2-template-syntax-error-cylc-include/suite-includeme.rc
@@ -0,0 +1,4 @@
+# This is a bit of graph configuration.
+        {% if true %}
+        graph = foo
+        {% end if %
diff --git a/tests/validate/43-jinja2-template-syntax-error-cylc-include/suite.rc b/tests/validate/43-jinja2-template-syntax-error-cylc-include/suite.rc
new file mode 100644
index 0000000..fe255da
--- /dev/null
+++ b/tests/validate/43-jinja2-template-syntax-error-cylc-include/suite.rc
@@ -0,0 +1,8 @@
+#!jinja2
+
+[scheduling]
+    [[dependencies]]
+%include suite-includeme.rc
+[runtime]
+    [[foo]]
+        script = sleep 1
diff --git a/tests/validate/11-bad-sequence-2-digit-century.t b/tests/validate/44-jinja2-template-syntax-error-jinja-include.t
similarity index 72%
copy from tests/validate/11-bad-sequence-2-digit-century.t
copy to tests/validate/44-jinja2-template-syntax-error-jinja-include.t
index 0f0dcef..16ca14b 100644
--- a/tests/validate/11-bad-sequence-2-digit-century.t
+++ b/tests/validate/44-jinja2-template-syntax-error-jinja-include.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation for a bad sequence interval.
+# Test validation for a bad Jinja2 TemplateSyntaxError in a jinja include.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,6 +23,13 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-val
 run_fail "$TEST_NAME" cylc validate suite.rc
-grep_ok "2 digit centuries not allowed" "$TEST_NAME.stderr"
+sed -i 's/^  File ".*", line/  File "FILE", line/g' "$TEST_NAME.stderr"
+cmp_ok "$TEST_NAME.stderr" <<'__ERROR__'
+Jinja2Error:
+  File "FILE", line 3, in template
+    {% end if %
+TemplateSyntaxError: Encountered unknown tag 'end'. Jinja was looking for the following tags: 'elif' or 'else' or 'endif'. The innermost block that needs to be closed is 'if'.
+__ERROR__
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/44-jinja2-template-syntax-error-jinja-include/suite-includeme.rc b/tests/validate/44-jinja2-template-syntax-error-jinja-include/suite-includeme.rc
new file mode 100644
index 0000000..cc60db3
--- /dev/null
+++ b/tests/validate/44-jinja2-template-syntax-error-jinja-include/suite-includeme.rc
@@ -0,0 +1,3 @@
+        {% if true %}
+        graph = foo
+        {% end if %
diff --git a/tests/validate/44-jinja2-template-syntax-error-jinja-include/suite.rc b/tests/validate/44-jinja2-template-syntax-error-jinja-include/suite.rc
new file mode 100644
index 0000000..61282d1
--- /dev/null
+++ b/tests/validate/44-jinja2-template-syntax-error-jinja-include/suite.rc
@@ -0,0 +1,7 @@
+#!jinja2
+[scheduling]
+    [[dependencies]]
+{% include 'suite-includeme.rc' %}
+[runtime]
+    [[foo]]
+        script = sleep 1
diff --git a/tests/validate/11-bad-sequence-2-digit-century.t b/tests/validate/45-jinja2-template-error-main.t
similarity index 72%
copy from tests/validate/11-bad-sequence-2-digit-century.t
copy to tests/validate/45-jinja2-template-error-main.t
index 0f0dcef..7b4b543 100644
--- a/tests/validate/11-bad-sequence-2-digit-century.t
+++ b/tests/validate/45-jinja2-template-error-main.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation for a bad sequence interval.
+# Test validation for a filter Jinja2 error with no line number.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,6 +23,14 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-val
 run_fail "$TEST_NAME" cylc validate suite.rc
-grep_ok "2 digit centuries not allowed" "$TEST_NAME.stderr"
+# Filter Python version specific output, e.g.:
+#   File "/usr/lib/python2.6/site-packages/jinja2/filters.py", line 183, in do_dictsort
+sed -i '/File.*in do_dictsort/d' "$TEST_NAME.stderr"
+cmp_ok "$TEST_NAME.stderr" <<'__ERROR__'
+Jinja2Error:
+    raise FilterArgumentError('You can only sort by either '
+FilterArgumentError: You can only sort by either "key" or "value"
+__ERROR__
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/45-jinja2-template-error-main/suite.rc b/tests/validate/45-jinja2-template-error-main/suite.rc
new file mode 100644
index 0000000..9f7e475
--- /dev/null
+++ b/tests/validate/45-jinja2-template-error-main/suite.rc
@@ -0,0 +1,8 @@
+#!jinja2
+{% set foo = {} %}
+[scheduling]
+    [[dependencies]]
+        graph = {{ foo|dictsort(by='by') }}
+[runtime]
+    [[foo]]
+        script = sleep 1
diff --git a/tests/validate/11-bad-sequence-2-digit-century.t b/tests/validate/46-jinja2-template-not-found.t
similarity index 78%
copy from tests/validate/11-bad-sequence-2-digit-century.t
copy to tests/validate/46-jinja2-template-not-found.t
index 0f0dcef..1930b66 100644
--- a/tests/validate/11-bad-sequence-2-digit-century.t
+++ b/tests/validate/46-jinja2-template-not-found.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation for a bad sequence interval.
+# Test validation for a template-not-found, no-line-number Jinja2 error.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,6 +23,13 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-val
 run_fail "$TEST_NAME" cylc validate suite.rc
-grep_ok "2 digit centuries not allowed" "$TEST_NAME.stderr"
+sed -i 's/^  File ".*/  File "FILE", line NN, in ROUTINE/g' "$TEST_NAME.stderr"
+cmp_ok "$TEST_NAME.stderr" <<'__ERROR__'
+Jinja2Error:
+  File "FILE", line NN, in ROUTINE
+    raise TemplateNotFound(template)
+TemplateNotFound: suite-foo.rc
+__ERROR__
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/46-jinja2-template-not-found/suite.rc b/tests/validate/46-jinja2-template-not-found/suite.rc
new file mode 100644
index 0000000..b1f4d80
--- /dev/null
+++ b/tests/validate/46-jinja2-template-not-found/suite.rc
@@ -0,0 +1,6 @@
+#!jinja2
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+{% include 'suite-foo.rc' %}
diff --git a/tests/validate/11-bad-sequence-2-digit-century.t b/tests/validate/47-jinja2-type-error.t
similarity index 81%
copy from tests/validate/11-bad-sequence-2-digit-century.t
copy to tests/validate/47-jinja2-type-error.t
index 0f0dcef..5ca303e 100644
--- a/tests/validate/11-bad-sequence-2-digit-century.t
+++ b/tests/validate/47-jinja2-type-error.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation for a bad sequence interval.
+# Test validation for a Jinja2 type error, with no line number info.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,6 +23,11 @@ install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE-val
 run_fail "$TEST_NAME" cylc validate suite.rc
-grep_ok "2 digit centuries not allowed" "$TEST_NAME.stderr"
+cmp_ok "$TEST_NAME.stderr" <<'__ERROR__'
+Jinja2Error:
+  File "<template>", line 5, in top-level template code
+TypeError: unsupported operand type(s) for /: 'int' and 'str'
+__ERROR__
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/47-jinja2-type-error/suite.rc b/tests/validate/47-jinja2-type-error/suite.rc
new file mode 100644
index 0000000..487aeec
--- /dev/null
+++ b/tests/validate/47-jinja2-type-error/suite.rc
@@ -0,0 +1,6 @@
+#!jinja2
+
+[scheduling]
+    [[dependencies]]
+        graph = foo
+{{ 1 / 'foo' }}
diff --git a/tests/validate/24-fail-year-bounds.t b/tests/validate/48-fail-bad-vis-nod-attrs.t
similarity index 85%
copy from tests/validate/24-fail-year-bounds.t
copy to tests/validate/48-fail-bad-vis-nod-attrs.t
index e2359d2..eca71be 100644
--- a/tests/validate/24-fail-year-bounds.t
+++ b/tests/validate/48-fail-bad-vis-nod-attrs.t
@@ -15,7 +15,7 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test validation with a new-style cycle point and an async graph.
+# Test fail validation of bad vis node attributes.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 2
@@ -23,8 +23,10 @@ set_test_number 2
 install_suite $TEST_NAME_BASE $TEST_NAME_BASE
 #-------------------------------------------------------------------------------
 TEST_NAME=$TEST_NAME_BASE
-run_fail $TEST_NAME cylc validate --debug -v -v $SUITE_NAME
-grep_ok "incompatible with \[cylc\]cycle point num expanded year digits = 0" \
+run_fail $TEST_NAME cylc validate --debug -v $SUITE_NAME
+grep_ok \
+    "Node attributes must be of the form 'key1=value1', 'key2=value2', etc." \
     $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
+purge_suite $SUITE_NAME
 exit
diff --git a/tests/validate/48-fail-bad-vis-nod-attrs/suite.rc b/tests/validate/48-fail-bad-vis-nod-attrs/suite.rc
new file mode 100644
index 0000000..1d5532f
--- /dev/null
+++ b/tests/validate/48-fail-bad-vis-nod-attrs/suite.rc
@@ -0,0 +1,8 @@
+[scheduling]
+    [[dependencies]]
+        graph = foo
+[runtime]
+    [[foo]]
+[visualization]
+    [[node attributes]]
+        foo = "blue" # Should be foo = "fillcolor=blue".
diff --git a/tests/cyclers/26-no_final_cycle_point.t b/tests/validate/49-fail-no-graph.t
similarity index 65%
copy from tests/cyclers/26-no_final_cycle_point.t
copy to tests/validate/49-fail-no-graph.t
index 62b1f20..1f8e553 100644
--- a/tests/cyclers/26-no_final_cycle_point.t
+++ b/tests/validate/49-fail-no-graph.t
@@ -1,7 +1,7 @@
 #!/bin/bash
 # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
 # Copyright (C) 2008-2015 NIWA
-# 
+#
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation, either version 3 of the License, or
@@ -15,21 +15,26 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 #-------------------------------------------------------------------------------
-# Test intercycle dependencies.
+# Test validation fails if no graph is defined.
 . $(dirname $0)/test_header
 #-------------------------------------------------------------------------------
 set_test_number 4
 #-------------------------------------------------------------------------------
-install_suite $TEST_NAME_BASE no_final_cycle_point
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-validate
-run_fail $TEST_NAME cylc validate $SUITE_NAME
-grep_ok "This suite requires a final cycle point\." \
-    $TEST_NAME.stderr
-#-------------------------------------------------------------------------------
-TEST_NAME=$TEST_NAME_BASE-run
-run_fail $TEST_NAME cylc run --debug $SUITE_NAME
-grep_ok "This suite requires a final cycle point\." \
-    $TEST_NAME.stderr
+TEST_NAME=${TEST_NAME_BASE}-empty-graph
+cat > suite.rc <<__END__
+[scheduling]
+    [[dependencies]]
+        graph = ""
+__END__
+run_fail $TEST_NAME cylc validate --debug -v suite.rc
+grep_ok "No suite dependency graph defined." $TEST_NAME.stderr
 #-------------------------------------------------------------------------------
-purge_suite $SUITE_NAME
+TEST_NAME=${TEST_NAME_BASE}-no-graph
+cat > suite.rc <<__END__
+[scheduling]
+    initial cycle point = 2015
+    [[dependencies]]
+        [[[R1]]]
+__END__
+run_fail $TEST_NAME cylc validate --debug -v suite.rc
+grep_ok "No suite dependency graph defined." $TEST_NAME.stderr

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/cylc.git



More information about the debian-science-commits mailing list