[Debtorrent-commits] r1 - in /debtorrent: ./ branches/ branches/upstream/ branches/upstream/current/ branches/upstream/current/BitTornado/ branches/upstream/current/BitTornado/BT1/ branches/upstream/current/BitTornado/BT1/CVS/ branches/upstream/current/BitTornado/CVS/ branches/upstream/current/CVS/ branches/upstream/current/docs/ branches/upstream/current/docs/CVS/ branches/upstream/current/docs/man/ branches/upstream/current/docs/man/CVS/ branches/upstream/current/icons/ branches/upstream/current/icons/CVS/ branches/upstream/current/targets/ branches/upstream/current/targets/CVS/ branches/upstream/current/test/ branches/upstream/current/test/CVS/ branches/upstream/current/test/multitracker/ branches/upstream/current/test/multitracker/CVS/ branches/upstream/current/test/multitracker/allowed/ branches/upstream/current/test/multitracker/allowed/CVS/ branches/upstream/current/test/tracker/ branches/upstream/current/test/tracker/CVS/ branches/upstream/current/thosts/ branches/upstream/current/thosts/CVS/

camrdale-guest at users.alioth.debian.org camrdale-guest at users.alioth.debian.org
Wed Apr 25 16:32:02 UTC 2007


Author: camrdale-guest
Date: Sat Apr 14 18:47:18 2007
New Revision: 1

URL: http://svn.debian.org/wsvn/debtorrent/?sc=1&rev=1
Log:
[svn-inject] Installing original source of bittornado

Added:
    debtorrent/
    debtorrent/branches/
    debtorrent/branches/upstream/
    debtorrent/branches/upstream/current/
    debtorrent/branches/upstream/current/.cvsignore
    debtorrent/branches/upstream/current/BitTornado/
    debtorrent/branches/upstream/current/BitTornado/.cvsignore
    debtorrent/branches/upstream/current/BitTornado/BT1/
    debtorrent/branches/upstream/current/BitTornado/BT1/.cvsignore
    debtorrent/branches/upstream/current/BitTornado/BT1/CVS/
    debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries
    debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Extra
    debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Old
    debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Repository
    debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Root
    debtorrent/branches/upstream/current/BitTornado/BT1/Choker.py
    debtorrent/branches/upstream/current/BitTornado/BT1/Connecter.py
    debtorrent/branches/upstream/current/BitTornado/BT1/Downloader.py
    debtorrent/branches/upstream/current/BitTornado/BT1/DownloaderFeedback.py
    debtorrent/branches/upstream/current/BitTornado/BT1/Encrypter.py
    debtorrent/branches/upstream/current/BitTornado/BT1/FileSelector.py
    debtorrent/branches/upstream/current/BitTornado/BT1/Filter.py
    debtorrent/branches/upstream/current/BitTornado/BT1/HTTPDownloader.py
    debtorrent/branches/upstream/current/BitTornado/BT1/NatCheck.py
    debtorrent/branches/upstream/current/BitTornado/BT1/PiecePicker.py
    debtorrent/branches/upstream/current/BitTornado/BT1/Rerequester.py
    debtorrent/branches/upstream/current/BitTornado/BT1/Statistics.py
    debtorrent/branches/upstream/current/BitTornado/BT1/Storage.py
    debtorrent/branches/upstream/current/BitTornado/BT1/StorageWrapper.py
    debtorrent/branches/upstream/current/BitTornado/BT1/StreamCheck.py
    debtorrent/branches/upstream/current/BitTornado/BT1/T2T.py
    debtorrent/branches/upstream/current/BitTornado/BT1/Uploader.py
    debtorrent/branches/upstream/current/BitTornado/BT1/__init__.py
    debtorrent/branches/upstream/current/BitTornado/BT1/btformats.py
    debtorrent/branches/upstream/current/BitTornado/BT1/fakeopen.py
    debtorrent/branches/upstream/current/BitTornado/BT1/makemetafile.py
    debtorrent/branches/upstream/current/BitTornado/BT1/track.py
    debtorrent/branches/upstream/current/BitTornado/BTcrypto.py
    debtorrent/branches/upstream/current/BitTornado/CVS/
    debtorrent/branches/upstream/current/BitTornado/CVS/Entries
    debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Extra
    debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Log
    debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Old
    debtorrent/branches/upstream/current/BitTornado/CVS/Repository
    debtorrent/branches/upstream/current/BitTornado/CVS/Root
    debtorrent/branches/upstream/current/BitTornado/ConfigDir.py
    debtorrent/branches/upstream/current/BitTornado/ConfigReader.py
    debtorrent/branches/upstream/current/BitTornado/ConnChoice.py
    debtorrent/branches/upstream/current/BitTornado/CreateIcons.py
    debtorrent/branches/upstream/current/BitTornado/CurrentRateMeasure.py
    debtorrent/branches/upstream/current/BitTornado/HTTPHandler.py
    debtorrent/branches/upstream/current/BitTornado/PSYCO.py
    debtorrent/branches/upstream/current/BitTornado/RateLimiter.py
    debtorrent/branches/upstream/current/BitTornado/RateMeasure.py
    debtorrent/branches/upstream/current/BitTornado/RawServer.py
    debtorrent/branches/upstream/current/BitTornado/ServerPortHandler.py
    debtorrent/branches/upstream/current/BitTornado/SocketHandler.py
    debtorrent/branches/upstream/current/BitTornado/__init__.py
    debtorrent/branches/upstream/current/BitTornado/bencode.py
    debtorrent/branches/upstream/current/BitTornado/bitfield.py
    debtorrent/branches/upstream/current/BitTornado/clock.py
    debtorrent/branches/upstream/current/BitTornado/download_bt1.py
    debtorrent/branches/upstream/current/BitTornado/inifile.py
    debtorrent/branches/upstream/current/BitTornado/iprangeparse.py
    debtorrent/branches/upstream/current/BitTornado/launchmanycore.py
    debtorrent/branches/upstream/current/BitTornado/natpunch.py
    debtorrent/branches/upstream/current/BitTornado/parseargs.py
    debtorrent/branches/upstream/current/BitTornado/parsedir.py
    debtorrent/branches/upstream/current/BitTornado/piecebuffer.py
    debtorrent/branches/upstream/current/BitTornado/selectpoll.py
    debtorrent/branches/upstream/current/BitTornado/subnetparse.py
    debtorrent/branches/upstream/current/BitTornado/torrentlistparse.py
    debtorrent/branches/upstream/current/BitTornado/zurllib.py
    debtorrent/branches/upstream/current/CVS/
    debtorrent/branches/upstream/current/CVS/Entries
    debtorrent/branches/upstream/current/CVS/Entries.Extra
    debtorrent/branches/upstream/current/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/CVS/Entries.Log
    debtorrent/branches/upstream/current/CVS/Entries.Old
    debtorrent/branches/upstream/current/CVS/Repository
    debtorrent/branches/upstream/current/CVS/Root
    debtorrent/branches/upstream/current/LICENSE.txt
    debtorrent/branches/upstream/current/README.txt
    debtorrent/branches/upstream/current/bittorrent.nsi
    debtorrent/branches/upstream/current/bt-t-make.py   (with props)
    debtorrent/branches/upstream/current/bt_MakeCreateIcons.py   (with props)
    debtorrent/branches/upstream/current/btcompletedir.py   (with props)
    debtorrent/branches/upstream/current/btcompletedirgui.py   (with props)
    debtorrent/branches/upstream/current/btcopyannounce.py   (with props)
    debtorrent/branches/upstream/current/btdownloadcurses.py   (with props)
    debtorrent/branches/upstream/current/btdownloadgui.py   (with props)
    debtorrent/branches/upstream/current/btdownloadheadless.py   (with props)
    debtorrent/branches/upstream/current/btlaunchmany.py   (with props)
    debtorrent/branches/upstream/current/btlaunchmanycurses.py   (with props)
    debtorrent/branches/upstream/current/btmakemetafile.py   (with props)
    debtorrent/branches/upstream/current/btmaketorrentgui.py   (with props)
    debtorrent/branches/upstream/current/btreannounce.py   (with props)
    debtorrent/branches/upstream/current/btrename.py   (with props)
    debtorrent/branches/upstream/current/btsethttpseeds.py   (with props)
    debtorrent/branches/upstream/current/btshowmetainfo.py   (with props)
    debtorrent/branches/upstream/current/bttrack.py   (with props)
    debtorrent/branches/upstream/current/completedir.nsi
    debtorrent/branches/upstream/current/docs/
    debtorrent/branches/upstream/current/docs/BUILD.windows.txt
    debtorrent/branches/upstream/current/docs/CVS/
    debtorrent/branches/upstream/current/docs/CVS/Entries
    debtorrent/branches/upstream/current/docs/CVS/Entries.Extra
    debtorrent/branches/upstream/current/docs/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/docs/CVS/Entries.Log
    debtorrent/branches/upstream/current/docs/CVS/Entries.Old
    debtorrent/branches/upstream/current/docs/CVS/Repository
    debtorrent/branches/upstream/current/docs/CVS/Root
    debtorrent/branches/upstream/current/docs/FAQ.txt
    debtorrent/branches/upstream/current/docs/IMPORTANT-multitracker-readme.txt
    debtorrent/branches/upstream/current/docs/INSTALL.unix.txt
    debtorrent/branches/upstream/current/docs/README-Psyco.txt
    debtorrent/branches/upstream/current/docs/credits.txt
    debtorrent/branches/upstream/current/docs/man/
    debtorrent/branches/upstream/current/docs/man/CVS/
    debtorrent/branches/upstream/current/docs/man/CVS/Entries
    debtorrent/branches/upstream/current/docs/man/CVS/Entries.Extra
    debtorrent/branches/upstream/current/docs/man/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/docs/man/CVS/Entries.Old
    debtorrent/branches/upstream/current/docs/man/CVS/Repository
    debtorrent/branches/upstream/current/docs/man/CVS/Root
    debtorrent/branches/upstream/current/docs/man/bittorrent-downloader.bittornado.1
    debtorrent/branches/upstream/current/docs/man/bittorrent-multi-downloader.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btcompletedir.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btcompletedirgui.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btcopyannounce.1
    debtorrent/branches/upstream/current/docs/man/btdownloadcurses.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btdownloadgui.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btdownloadheadless.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btlaunchmany.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btlaunchmanycurses.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btmakemetafile.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btmaketorrentgui.1
    debtorrent/branches/upstream/current/docs/man/btreannounce.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btrename.bittornado.1
    debtorrent/branches/upstream/current/docs/man/btsethttpseeds.1
    debtorrent/branches/upstream/current/docs/man/btshowmetainfo.bittornado.1
    debtorrent/branches/upstream/current/docs/man/bttrack.bittornado.1
    debtorrent/branches/upstream/current/docs/multitracker-spec.txt
    debtorrent/branches/upstream/current/docs/webseed-spec.txt
    debtorrent/branches/upstream/current/icons/
    debtorrent/branches/upstream/current/icons/CVS/
    debtorrent/branches/upstream/current/icons/CVS/Entries
    debtorrent/branches/upstream/current/icons/CVS/Entries.Extra
    debtorrent/branches/upstream/current/icons/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/icons/CVS/Entries.Old
    debtorrent/branches/upstream/current/icons/CVS/Repository
    debtorrent/branches/upstream/current/icons/CVS/Root
    debtorrent/branches/upstream/current/icons/alloc.gif   (with props)
    debtorrent/branches/upstream/current/icons/black.ico   (with props)
    debtorrent/branches/upstream/current/icons/black1.ico   (with props)
    debtorrent/branches/upstream/current/icons/blue.ico   (with props)
    debtorrent/branches/upstream/current/icons/green.ico   (with props)
    debtorrent/branches/upstream/current/icons/green1.ico   (with props)
    debtorrent/branches/upstream/current/icons/icon_bt.ico   (with props)
    debtorrent/branches/upstream/current/icons/icon_done.ico   (with props)
    debtorrent/branches/upstream/current/icons/red.ico   (with props)
    debtorrent/branches/upstream/current/icons/white.ico   (with props)
    debtorrent/branches/upstream/current/icons/yellow.ico   (with props)
    debtorrent/branches/upstream/current/icons/yellow1.ico   (with props)
    debtorrent/branches/upstream/current/ipranges.portugal.txt
    debtorrent/branches/upstream/current/setup.py   (with props)
    debtorrent/branches/upstream/current/targets/
    debtorrent/branches/upstream/current/targets/CVS/
    debtorrent/branches/upstream/current/targets/CVS/Entries
    debtorrent/branches/upstream/current/targets/CVS/Entries.Extra
    debtorrent/branches/upstream/current/targets/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/targets/CVS/Entries.Old
    debtorrent/branches/upstream/current/targets/CVS/Repository
    debtorrent/branches/upstream/current/targets/CVS/Root
    debtorrent/branches/upstream/current/targets/default(toobig).gif   (with props)
    debtorrent/branches/upstream/current/targets/default-large.gif   (with props)
    debtorrent/branches/upstream/current/targets/default-small.gif   (with props)
    debtorrent/branches/upstream/current/targets/default.gif   (with props)
    debtorrent/branches/upstream/current/targets/zip.gif   (with props)
    debtorrent/branches/upstream/current/test/
    debtorrent/branches/upstream/current/test/CVS/
    debtorrent/branches/upstream/current/test/CVS/Entries
    debtorrent/branches/upstream/current/test/CVS/Entries.Extra
    debtorrent/branches/upstream/current/test/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/test/CVS/Entries.Old
    debtorrent/branches/upstream/current/test/CVS/Repository
    debtorrent/branches/upstream/current/test/CVS/Root
    debtorrent/branches/upstream/current/test/multitracker/
    debtorrent/branches/upstream/current/test/multitracker/CVS/
    debtorrent/branches/upstream/current/test/multitracker/CVS/Entries
    debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Extra
    debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Log
    debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Old
    debtorrent/branches/upstream/current/test/multitracker/CVS/Repository
    debtorrent/branches/upstream/current/test/multitracker/CVS/Root
    debtorrent/branches/upstream/current/test/multitracker/README.txt
    debtorrent/branches/upstream/current/test/multitracker/allowed/
    debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/
    debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries
    debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Extra
    debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Old
    debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Repository
    debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Root
    debtorrent/branches/upstream/current/test/multitracker/allowed/blah.torrent   (with props)
    debtorrent/branches/upstream/current/test/multitracker/tracker0.bat
    debtorrent/branches/upstream/current/test/multitracker/tracker1.bat
    debtorrent/branches/upstream/current/test/multitracker/tracker2.bat
    debtorrent/branches/upstream/current/test/multitracker/tracker3.bat
    debtorrent/branches/upstream/current/test/tracker/
    debtorrent/branches/upstream/current/test/tracker/CVS/
    debtorrent/branches/upstream/current/test/tracker/CVS/Entries
    debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Extra
    debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Old
    debtorrent/branches/upstream/current/test/tracker/CVS/Repository
    debtorrent/branches/upstream/current/test/tracker/CVS/Root
    debtorrent/branches/upstream/current/test/tracker/FAQ.txt
    debtorrent/branches/upstream/current/test/tracker/FAQ.txt.torrent
    debtorrent/branches/upstream/current/test/tracker/tracker.bat
    debtorrent/branches/upstream/current/thosts/
    debtorrent/branches/upstream/current/thosts/ASS.thost
    debtorrent/branches/upstream/current/thosts/CVS/
    debtorrent/branches/upstream/current/thosts/CVS/Entries
    debtorrent/branches/upstream/current/thosts/CVS/Entries.Extra
    debtorrent/branches/upstream/current/thosts/CVS/Entries.Extra.Old
    debtorrent/branches/upstream/current/thosts/CVS/Entries.Old
    debtorrent/branches/upstream/current/thosts/CVS/Repository
    debtorrent/branches/upstream/current/thosts/CVS/Root
    debtorrent/branches/upstream/current/thosts/ILA.thost
    debtorrent/branches/upstream/current/thosts/Plucker.thost
    debtorrent/branches/upstream/current/wincompletedirsetup.py   (with props)
    debtorrent/branches/upstream/current/winsetup.py   (with props)

Added: debtorrent/branches/upstream/current/.cvsignore
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/.cvsignore?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/.cvsignore (added)
+++ debtorrent/branches/upstream/current/.cvsignore Sat Apr 14 18:47:18 2007
@@ -1,0 +1,4 @@
+statefile
+*.pyc
+*.old
+*.new

Added: debtorrent/branches/upstream/current/BitTornado/.cvsignore
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/.cvsignore?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/.cvsignore (added)
+++ debtorrent/branches/upstream/current/BitTornado/.cvsignore Sat Apr 14 18:47:18 2007
@@ -1,0 +1,4 @@
+statefile
+*.pyc
+*.old
+*.new

Added: debtorrent/branches/upstream/current/BitTornado/BT1/.cvsignore
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/.cvsignore?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/.cvsignore (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/.cvsignore Sat Apr 14 18:47:18 2007
@@ -1,0 +1,4 @@
+statefile
+*.pyc
+*.old
+*.newCopy of IntraStorage.py

Added: debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,24 @@
+/.cvsignore/1.1/Tue Feb 24 17:22:05 2004//
+/Choker.py/1.14/Wed Jan  5 23:22:41 2005//
+/Connecter.py/1.23/Wed Dec 13 03:38:09 2006//
+/Downloader.py/1.45/Sat Mar  4 05:53:16 2006//
+/DownloaderFeedback.py/1.14/Tue Dec 12 05:57:35 2006//
+/Encrypter.py/1.49/Fri Dec 22 02:03:45 2006//
+/FileSelector.py/1.24/Sat Mar  4 05:53:16 2006//
+/Filter.py/1.1/Wed Dec 22 05:34:31 2004//
+/HTTPDownloader.py/1.16/Wed Jan 26 23:17:04 2005//
+/NatCheck.py/1.4/Sat Dec 23 20:18:26 2006//
+/PiecePicker.py/1.30/Tue Nov 14 04:47:49 2006//
+/Rerequester.py/1.27/Fri Dec 22 02:09:49 2006//
+/Statistics.py/1.21/Tue Apr 26 17:56:25 2005//
+/Storage.py/1.26/Mon Oct  9 20:17:07 2006//
+/StorageWrapper.py/1.56/Sat Mar  4 05:53:16 2006//
+/StreamCheck.py/1.1/Sat May 15 18:15:02 2004//
+/T2T.py/1.6/Thu Dec 21 20:41:23 2006//
+/Uploader.py/1.10/Thu Apr 14 17:59:27 2005//
+/__init__.py/1.4/Tue Feb 24 21:29:22 2004//
+/btformats.py/1.3/Tue May 25 19:00:58 2004//
+/fakeopen.py/1.1/Tue Feb 24 17:22:05 2004//
+/makemetafile.py/1.6/Tue Jan  4 04:49:28 2005//
+/track.py/1.65/Sat Dec 23 20:18:26 2006//
+D

Added: debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,23 @@
+/.cvsignore////*///
+/Choker.py////*///
+/Connecter.py////*///
+/Downloader.py////*///
+/DownloaderFeedback.py////*///
+/Encrypter.py////*///
+/FileSelector.py////*///
+/Filter.py////*///
+/HTTPDownloader.py////*///
+/NatCheck.py////*///
+/PiecePicker.py////*///
+/Rerequester.py////*///
+/Statistics.py////*///
+/Storage.py////*///
+/StorageWrapper.py////*///
+/StreamCheck.py////*///
+/T2T.py////*///
+/Uploader.py////*///
+/__init__.py////*///
+/btformats.py////*///
+/fakeopen.py////*///
+/makemetafile.py////*///
+/track.py////*///

Added: debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/BitTornado/BT1

Added: debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Root (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Choker.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Choker.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Choker.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Choker.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,128 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from random import randrange, shuffle
+from BitTornado.clock import clock
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+class Choker:
+    def __init__(self, config, schedule, picker, done = lambda: False):
+        self.config = config
+        self.round_robin_period = config['round_robin_period']
+        self.schedule = schedule
+        self.picker = picker
+        self.connections = []
+        self.last_preferred = 0
+        self.last_round_robin = clock()
+        self.done = done
+        self.super_seed = False
+        self.paused = False
+        schedule(self._round_robin, 5)
+
+    def set_round_robin_period(self, x):
+        self.round_robin_period = x
+
+    def _round_robin(self):
+        self.schedule(self._round_robin, 5)
+        if self.super_seed:
+            cons = range(len(self.connections))
+            to_close = []
+            count = self.config['min_uploads']-self.last_preferred
+            if count > 0:   # optimization
+                shuffle(cons)
+            for c in cons:
+                i = self.picker.next_have(self.connections[c], count > 0)
+                if i is None:
+                    continue
+                if i < 0:
+                    to_close.append(self.connections[c])
+                    continue
+                self.connections[c].send_have(i)
+                count -= 1
+            for c in to_close:
+                c.close()
+        if self.last_round_robin + self.round_robin_period < clock():
+            self.last_round_robin = clock()
+            for i in xrange(1, len(self.connections)):
+                c = self.connections[i]
+                u = c.get_upload()
+                if u.is_choked() and u.is_interested():
+                    self.connections = self.connections[i:] + self.connections[:i]
+                    break
+        self._rechoke()
+
+    def _rechoke(self):
+        preferred = []
+        maxuploads = self.config['max_uploads']
+        if self.paused:
+            for c in self.connections:
+                c.get_upload().choke()
+            return
+        if maxuploads > 1:
+            for c in self.connections:
+                u = c.get_upload()
+                if not u.is_interested():
+                    continue
+                if self.done():
+                    r = u.get_rate()
+                else:
+                    d = c.get_download()
+                    r = d.get_rate()
+                    if r < 1000 or d.is_snubbed():
+                        continue
+                preferred.append((-r, c))
+            self.last_preferred = len(preferred)
+            preferred.sort()
+            del preferred[maxuploads-1:]
+            preferred = [x[1] for x in preferred]
+        count = len(preferred)
+        hit = False
+        to_unchoke = []
+        for c in self.connections:
+            u = c.get_upload()
+            if c in preferred:
+                to_unchoke.append(u)
+            else:
+                if count < maxuploads or not hit:
+                    to_unchoke.append(u)
+                    if u.is_interested():
+                        count += 1
+                        hit = True
+                else:
+                    u.choke()
+        for u in to_unchoke:
+            u.unchoke()
+
+    def connection_made(self, connection, p = None):
+        if p is None:
+            p = randrange(-2, len(self.connections) + 1)
+        self.connections.insert(max(p, 0), connection)
+        self._rechoke()
+
+    def connection_lost(self, connection):
+        self.connections.remove(connection)
+        self.picker.lost_peer(connection)
+        if connection.get_upload().is_interested() and not connection.get_upload().is_choked():
+            self._rechoke()
+
+    def interested(self, connection):
+        if not connection.get_upload().is_choked():
+            self._rechoke()
+
+    def not_interested(self, connection):
+        if not connection.get_upload().is_choked():
+            self._rechoke()
+
+    def set_super_seed(self):
+        while self.connections:             # close all connections
+            self.connections[0].close()
+        self.picker.set_superseed()
+        self.super_seed = True
+
+    def pause(self, flag):
+        self.paused = flag
+        self._rechoke()

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Connecter.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Connecter.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Connecter.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Connecter.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,328 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.bitfield import Bitfield
+from BitTornado.clock import clock
+from binascii import b2a_hex
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG1 = False
+DEBUG2 = False
+
+def toint(s):
+    return long(b2a_hex(s), 16)
+
+def tobinary(i):
+    return (chr(i >> 24) + chr((i >> 16) & 0xFF) + 
+        chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
+CHOKE = chr(0)
+UNCHOKE = chr(1)
+INTERESTED = chr(2)
+NOT_INTERESTED = chr(3)
+# index
+HAVE = chr(4)
+# index, bitfield
+BITFIELD = chr(5)
+# index, begin, length
+REQUEST = chr(6)
+# index, begin, piece
+PIECE = chr(7)
+# index, begin, piece
+CANCEL = chr(8)
+
+class Connection:
+    def __init__(self, connection, connecter, ccount):
+        self.connection = connection
+        self.connecter = connecter
+        self.ccount = ccount
+        self.got_anything = False
+        self.next_upload = None
+        self.outqueue = []
+        self.partial_message = None
+        self.download = None
+        self.send_choke_queued = False
+        self.just_unchoked = None
+
+    def get_ip(self, real=False):
+        return self.connection.get_ip(real)
+
+    def get_id(self):
+        return self.connection.get_id()
+
+    def get_readable_id(self):
+        return self.connection.get_readable_id()
+
+    def close(self):
+        if DEBUG1:
+            print (self.ccount,'connection closed')
+        self.connection.close()
+
+    def is_locally_initiated(self):
+        return self.connection.is_locally_initiated()
+
+    def is_encrypted(self):
+        return self.connection.is_encrypted()
+
+    def send_interested(self):
+        self._send_message(INTERESTED)
+
+    def send_not_interested(self):
+        self._send_message(NOT_INTERESTED)
+
+    def send_choke(self):
+        if self.partial_message:
+            self.send_choke_queued = True
+        else:
+            self._send_message(CHOKE)
+            self.upload.choke_sent()
+            self.just_unchoked = 0
+
+    def send_unchoke(self):
+        if self.send_choke_queued:
+            self.send_choke_queued = False
+            if DEBUG1:
+                print (self.ccount,'CHOKE SUPPRESSED')
+        else:
+            self._send_message(UNCHOKE)
+            if ( self.partial_message or self.just_unchoked is None
+                 or not self.upload.interested or self.download.active_requests ):
+                self.just_unchoked = 0
+            else:
+                self.just_unchoked = clock()
+
+    def send_request(self, index, begin, length):
+        self._send_message(REQUEST + tobinary(index) + 
+            tobinary(begin) + tobinary(length))
+        if DEBUG1:
+            print (self.ccount,'sent request',index,begin,begin+length)
+
+    def send_cancel(self, index, begin, length):
+        self._send_message(CANCEL + tobinary(index) + 
+            tobinary(begin) + tobinary(length))
+        if DEBUG1:
+            print (self.ccount,'sent cancel',index,begin,begin+length)
+
+    def send_bitfield(self, bitfield):
+        self._send_message(BITFIELD + bitfield)
+
+    def send_have(self, index):
+        self._send_message(HAVE + tobinary(index))
+
+    def send_keepalive(self):
+        self._send_message('')
+
+    def _send_message(self, s):
+        if DEBUG2:
+            if s:
+                print (self.ccount,'SENDING MESSAGE',ord(s[0]),len(s))
+            else:
+                print (self.ccount,'SENDING MESSAGE',-1,0)
+        s = tobinary(len(s))+s
+        if self.partial_message:
+            self.outqueue.append(s)
+        else:
+            self.connection.send_message_raw(s)
+
+    def send_partial(self, bytes):
+        if self.connection.closed:
+            return 0
+        if self.partial_message is None:
+            s = self.upload.get_upload_chunk()
+            if s is None:
+                return 0
+            index, begin, piece = s
+            self.partial_message = ''.join((
+                            tobinary(len(piece) + 9), PIECE,
+                            tobinary(index), tobinary(begin), piece.tostring() ))
+            if DEBUG1:
+                print (self.ccount,'sending chunk',index,begin,begin+len(piece))
+
+        if bytes < len(self.partial_message):
+            self.connection.send_message_raw(self.partial_message[:bytes])
+            self.partial_message = self.partial_message[bytes:]
+            return bytes
+
+        q = [self.partial_message]
+        self.partial_message = None
+        if self.send_choke_queued:
+            self.send_choke_queued = False
+            self.outqueue.append(tobinary(1)+CHOKE)
+            self.upload.choke_sent()
+            self.just_unchoked = 0
+        q.extend(self.outqueue)
+        self.outqueue = []
+        q = ''.join(q)
+        self.connection.send_message_raw(q)
+        return len(q)
+
+    def get_upload(self):
+        return self.upload
+
+    def get_download(self):
+        return self.download
+
+    def set_download(self, download):
+        self.download = download
+
+    def backlogged(self):
+        return not self.connection.is_flushed()
+
+    def got_request(self, i, p, l):
+        self.upload.got_request(i, p, l)
+        if self.just_unchoked:
+            self.connecter.ratelimiter.ping(clock() - self.just_unchoked)
+            self.just_unchoked = 0
+    
+
+
+
+class Connecter:
+    def __init__(self, make_upload, downloader, choker, numpieces,
+            totalup, config, ratelimiter, sched = None):
+        self.downloader = downloader
+        self.make_upload = make_upload
+        self.choker = choker
+        self.numpieces = numpieces
+        self.config = config
+        self.ratelimiter = ratelimiter
+        self.rate_capped = False
+        self.sched = sched
+        self.totalup = totalup
+        self.rate_capped = False
+        self.connections = {}
+        self.external_connection_made = 0
+        self.ccount = 0
+
+    def how_many_connections(self):
+        return len(self.connections)
+
+    def connection_made(self, connection):
+        self.ccount += 1
+        c = Connection(connection, self, self.ccount)
+        if DEBUG2:
+            print (c.ccount,'connection made')
+        self.connections[connection] = c
+        c.upload = self.make_upload(c, self.ratelimiter, self.totalup)
+        c.download = self.downloader.make_download(c)
+        self.choker.connection_made(c)
+        return c
+
+    def connection_lost(self, connection):
+        c = self.connections[connection]
+        if DEBUG2:
+            print (c.ccount,'connection closed')
+        del self.connections[connection]
+        if c.download:
+            c.download.disconnected()
+        self.choker.connection_lost(c)
+
+    def connection_flushed(self, connection):
+        conn = self.connections[connection]
+        if conn.next_upload is None and (conn.partial_message is not None
+               or len(conn.upload.buffer) > 0):
+            self.ratelimiter.queue(conn)
+            
+    def got_piece(self, i):
+        for co in self.connections.values():
+            co.send_have(i)
+
+    def got_message(self, connection, message):
+        c = self.connections[connection]
+        t = message[0]
+        if DEBUG2:
+            print (c.ccount,'message received',ord(t))
+        if t == BITFIELD and c.got_anything:
+            if DEBUG2:
+                print (c.ccount,'misplaced bitfield')
+            connection.close()
+            return
+        c.got_anything = True
+        if (t in [CHOKE, UNCHOKE, INTERESTED, NOT_INTERESTED] and 
+                len(message) != 1):
+            if DEBUG2:
+                print (c.ccount,'bad message length')
+            connection.close()
+            return
+        if t == CHOKE:
+            c.download.got_choke()
+        elif t == UNCHOKE:
+            c.download.got_unchoke()
+        elif t == INTERESTED:
+            if not c.download.have.complete():
+                c.upload.got_interested()
+        elif t == NOT_INTERESTED:
+            c.upload.got_not_interested()
+        elif t == HAVE:
+            if len(message) != 5:
+                if DEBUG2:
+                    print (c.ccount,'bad message length')
+                connection.close()
+                return
+            i = toint(message[1:])
+            if i >= self.numpieces:
+                if DEBUG2:
+                    print (c.ccount,'bad piece number')
+                connection.close()
+                return
+            if c.download.got_have(i):
+                c.upload.got_not_interested()
+        elif t == BITFIELD:
+            try:
+                b = Bitfield(self.numpieces, message[1:])
+            except ValueError:
+                if DEBUG2:
+                    print (c.ccount,'bad bitfield')
+                connection.close()
+                return
+            if c.download.got_have_bitfield(b):
+                c.upload.got_not_interested()
+        elif t == REQUEST:
+            if len(message) != 13:
+                if DEBUG2:
+                    print (c.ccount,'bad message length')
+                connection.close()
+                return
+            i = toint(message[1:5])
+            if i >= self.numpieces:
+                if DEBUG2:
+                    print (c.ccount,'bad piece number')
+                connection.close()
+                return
+            c.got_request(i, toint(message[5:9]), 
+                toint(message[9:]))
+        elif t == CANCEL:
+            if len(message) != 13:
+                if DEBUG2:
+                    print (c.ccount,'bad message length')
+                connection.close()
+                return
+            i = toint(message[1:5])
+            if i >= self.numpieces:
+                if DEBUG2:
+                    print (c.ccount,'bad piece number')
+                connection.close()
+                return
+            c.upload.got_cancel(i, toint(message[5:9]), 
+                toint(message[9:]))
+        elif t == PIECE:
+            if len(message) <= 9:
+                if DEBUG2:
+                    print (c.ccount,'bad message length')
+                connection.close()
+                return
+            i = toint(message[1:5])
+            if i >= self.numpieces:
+                if DEBUG2:
+                    print (c.ccount,'bad piece number')
+                connection.close()
+                return
+            if c.download.got_piece(i, toint(message[5:9]), message[9:]):
+                self.got_piece(i)
+        else:
+            connection.close()

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Downloader.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Downloader.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Downloader.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Downloader.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,594 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.CurrentRateMeasure import Measure
+from BitTornado.bitfield import Bitfield
+from random import shuffle
+from BitTornado.clock import clock
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+EXPIRE_TIME = 60 * 60
+
+class PerIPStats: 	 
+    def __init__(self, ip):
+        self.numgood = 0
+        self.bad = {}
+        self.numconnections = 0
+        self.lastdownload = None
+        self.peerid = None
+
+class BadDataGuard:
+    def __init__(self, download):
+        self.download = download
+        self.ip = download.ip
+        self.downloader = download.downloader
+        self.stats = self.downloader.perip[self.ip]
+        self.lastindex = None
+
+    def failed(self, index, bump = False):
+        self.stats.bad.setdefault(index, 0)
+        self.downloader.gotbaddata[self.ip] = 1
+        self.stats.bad[index] += 1
+        if len(self.stats.bad) > 1:
+            if self.download is not None:
+                self.downloader.try_kick(self.download)
+            elif self.stats.numconnections == 1 and self.stats.lastdownload is not None:
+                self.downloader.try_kick(self.stats.lastdownload)
+        if len(self.stats.bad) >= 3 and len(self.stats.bad) > int(self.stats.numgood/30):
+            self.downloader.try_ban(self.ip)
+        elif bump:
+            self.downloader.picker.bump(index)
+
+    def good(self, index):
+        # lastindex is a hack to only increase numgood by one for each good
+        # piece, however many chunks come from the connection(s) from this IP
+        if index != self.lastindex:
+            self.stats.numgood += 1
+            self.lastindex = index
+
+class SingleDownload:
+    def __init__(self, downloader, connection):
+        self.downloader = downloader
+        self.connection = connection
+        self.choked = True
+        self.interested = False
+        self.active_requests = []
+        self.measure = Measure(downloader.max_rate_period)
+        self.peermeasure = Measure(downloader.max_rate_period)
+        self.have = Bitfield(downloader.numpieces)
+        self.last = -1000
+        self.last2 = -1000
+        self.example_interest = None
+        self.backlog = 2
+        self.ip = connection.get_ip()
+        self.guard = BadDataGuard(self)
+
+    def _backlog(self, just_unchoked):
+        self.backlog = min(
+            2+int(4*self.measure.get_rate()/self.downloader.chunksize),
+            (2*just_unchoked)+self.downloader.queue_limit() )
+        if self.backlog > 50:
+            self.backlog = max(50, self.backlog * 0.075)
+        return self.backlog
+    
+    def disconnected(self):
+        self.downloader.lost_peer(self)
+        if self.have.complete():
+            self.downloader.picker.lost_seed()
+        else:
+            for i in xrange(len(self.have)):
+                if self.have[i]:
+                    self.downloader.picker.lost_have(i)
+        if self.have.complete() and self.downloader.storage.is_endgame():
+            self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+        self._letgo()
+        self.guard.download = None
+
+    def _letgo(self):
+        if self.downloader.queued_out.has_key(self):
+            del self.downloader.queued_out[self]
+        if not self.active_requests:
+            return
+        if self.downloader.endgamemode:
+            self.active_requests = []
+            return
+        lost = {}
+        for index, begin, length in self.active_requests:
+            self.downloader.storage.request_lost(index, begin, length)
+            lost[index] = 1
+        lost = lost.keys()
+        self.active_requests = []
+        if self.downloader.paused:
+            return
+        ds = [d for d in self.downloader.downloads if not d.choked]
+        shuffle(ds)
+        for d in ds:
+            d._request_more()
+        for d in self.downloader.downloads:
+            if d.choked and not d.interested:
+                for l in lost:
+                    if d.have[l] and self.downloader.storage.do_I_have_requests(l):
+                        d.send_interested()
+                        break
+
+    def got_choke(self):
+        if not self.choked:
+            self.choked = True
+            self._letgo()
+
+    def got_unchoke(self):
+        if self.choked:
+            self.choked = False
+            if self.interested:
+                self._request_more(new_unchoke = True)
+            self.last2 = clock()
+
+    def is_choked(self):
+        return self.choked
+
+    def is_interested(self):
+        return self.interested
+
+    def send_interested(self):
+        if not self.interested:
+            self.interested = True
+            self.connection.send_interested()
+            if not self.choked:
+                self.last2 = clock()
+
+    def send_not_interested(self):
+        if self.interested:
+            self.interested = False
+            self.connection.send_not_interested()
+
+    def got_piece(self, index, begin, piece):
+        length = len(piece)
+        try:
+            self.active_requests.remove((index, begin, length))
+        except ValueError:
+            self.downloader.discarded += length
+            return False
+        if self.downloader.endgamemode:
+            self.downloader.all_requests.remove((index, begin, length))
+        self.last = clock()
+        self.last2 = clock()
+        self.measure.update_rate(length)
+        self.downloader.measurefunc(length)
+        if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard):
+            self.downloader.piece_flunked(index)
+            return False
+        if self.downloader.storage.do_I_have(index):
+            self.downloader.picker.complete(index)
+        if self.downloader.endgamemode:
+            for d in self.downloader.downloads:
+                if d is not self:
+                  if d.interested:
+                    if d.choked:
+                        assert not d.active_requests
+                        d.fix_download_endgame()
+                    else:
+                        try:
+                            d.active_requests.remove((index, begin, length))
+                        except ValueError:
+                            continue
+                        d.connection.send_cancel(index, begin, length)
+                        d.fix_download_endgame()
+                  else:
+                      assert not d.active_requests
+        self._request_more()
+        self.downloader.check_complete(index)
+        return self.downloader.storage.do_I_have(index)
+
+    def _request_more(self, new_unchoke = False):
+        assert not self.choked
+        if self.downloader.endgamemode:
+            self.fix_download_endgame(new_unchoke)
+            return
+        if self.downloader.paused:
+            return
+        if len(self.active_requests) >= self._backlog(new_unchoke):
+            if not (self.active_requests or self.backlog):
+                self.downloader.queued_out[self] = 1
+            return
+        lost_interests = []
+        while len(self.active_requests) < self.backlog:
+            interest = self.downloader.picker.next(self.have,
+                               self.downloader.storage.do_I_have_requests,
+                               self.downloader.too_many_partials())
+            if interest is None:
+                break
+            self.example_interest = interest
+            self.send_interested()
+            loop = True
+            while len(self.active_requests) < self.backlog and loop:
+                begin, length = self.downloader.storage.new_request(interest)
+                self.downloader.picker.requested(interest)
+                self.active_requests.append((interest, begin, length))
+                self.connection.send_request(interest, begin, length)
+                self.downloader.chunk_requested(length)
+                if not self.downloader.storage.do_I_have_requests(interest):
+                    loop = False
+                    lost_interests.append(interest)
+        if not self.active_requests:
+            self.send_not_interested()
+        if lost_interests:
+            for d in self.downloader.downloads:
+                if d.active_requests or not d.interested:
+                    continue
+                if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest):
+                    continue
+                for lost in lost_interests:
+                    if d.have[lost]:
+                        break
+                else:
+                    continue
+                interest = self.downloader.picker.next(d.have,
+                                   self.downloader.storage.do_I_have_requests,
+                                   self.downloader.too_many_partials())
+                if interest is None:
+                    d.send_not_interested()
+                else:
+                    d.example_interest = interest
+        if self.downloader.storage.is_endgame():
+            self.downloader.start_endgame()
+
+
+    def fix_download_endgame(self, new_unchoke = False):
+        if self.downloader.paused:
+            return
+        if len(self.active_requests) >= self._backlog(new_unchoke):
+            if not (self.active_requests or self.backlog) and not self.choked:
+                self.downloader.queued_out[self] = 1
+            return
+        want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests]
+        if not (self.active_requests or want):
+            self.send_not_interested()
+            return
+        if want:
+            self.send_interested()
+        if self.choked:
+            return
+        shuffle(want)
+        del want[self.backlog - len(self.active_requests):]
+        self.active_requests.extend(want)
+        for piece, begin, length in want:
+            self.connection.send_request(piece, begin, length)
+            self.downloader.chunk_requested(length)
+
+    def got_have(self, index):
+        if index == self.downloader.numpieces-1:
+            self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length)
+            self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length)
+        else:
+            self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length)
+            self.peermeasure.update_rate(self.downloader.storage.piece_length)
+        if not self.have[index]:
+            self.have[index] = True
+            self.downloader.picker.got_have(index)
+            if self.have.complete():
+                self.downloader.picker.became_seed()
+                if self.downloader.storage.am_I_complete():
+                    self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+                    self.connection.close()
+            elif self.downloader.endgamemode:
+                self.fix_download_endgame()
+            elif ( not self.downloader.paused
+                   and not self.downloader.picker.is_blocked(index)
+                   and self.downloader.storage.do_I_have_requests(index) ):
+                if not self.choked:
+                    self._request_more()
+                else:
+                    self.send_interested()
+        return self.have.complete()
+
+    def _check_interests(self):
+        if self.interested or self.downloader.paused:
+            return
+        for i in xrange(len(self.have)):
+            if ( self.have[i] and not self.downloader.picker.is_blocked(i)
+                 and ( self.downloader.endgamemode
+                       or self.downloader.storage.do_I_have_requests(i) ) ):
+                self.send_interested()
+                return
+
+    def got_have_bitfield(self, have):
+        if self.downloader.storage.am_I_complete() and have.complete():
+            if self.downloader.super_seeding:
+                self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too
+            self.connection.close()
+            self.downloader.add_disconnected_seed(self.connection.get_readable_id())
+            return False
+        self.have = have
+        if have.complete():
+            self.downloader.picker.got_seed()
+        else:
+            for i in xrange(len(have)):
+                if have[i]:
+                    self.downloader.picker.got_have(i)
+        if self.downloader.endgamemode and not self.downloader.paused:
+            for piece, begin, length in self.downloader.all_requests:
+                if self.have[piece]:
+                    self.send_interested()
+                    break
+        else:
+            self._check_interests()
+        return have.complete()
+
+    def get_rate(self):
+        return self.measure.get_rate()
+
+    def is_snubbed(self):
+        if ( self.interested and not self.choked
+             and clock() - self.last2 > self.downloader.snub_time ):
+            for index, begin, length in self.active_requests:
+                self.connection.send_cancel(index, begin, length)
+            self.got_choke()    # treat it just like a choke
+        return clock() - self.last > self.downloader.snub_time
+
+
+class Downloader:
+    def __init__(self, storage, picker, backlog, max_rate_period,
+                 numpieces, chunksize, measurefunc, snub_time,
+                 kickbans_ok, kickfunc, banfunc):
+        self.storage = storage
+        self.picker = picker
+        self.backlog = backlog
+        self.max_rate_period = max_rate_period
+        self.measurefunc = measurefunc
+        self.totalmeasure = Measure(max_rate_period*storage.piece_length/storage.request_size)
+        self.numpieces = numpieces
+        self.chunksize = chunksize
+        self.snub_time = snub_time
+        self.kickfunc = kickfunc
+        self.banfunc = banfunc
+        self.disconnectedseeds = {}
+        self.downloads = []
+        self.perip = {}
+        self.gotbaddata = {}
+        self.kicked = {}
+        self.banned = {}
+        self.kickbans_ok = kickbans_ok
+        self.kickbans_halted = False
+        self.super_seeding = False
+        self.endgamemode = False
+        self.endgame_queued_pieces = []
+        self.all_requests = []
+        self.discarded = 0L
+#        self.download_rate = 25000  # 25K/s test rate
+        self.download_rate = 0
+        self.bytes_requested = 0
+        self.last_time = clock()
+        self.queued_out = {}
+        self.requeueing = False
+        self.paused = False
+
+    def set_download_rate(self, rate):
+        self.download_rate = rate * 1000
+        self.bytes_requested = 0
+
+    def queue_limit(self):
+        if not self.download_rate:
+            return 10e10    # that's a big queue!
+        t = clock()
+        self.bytes_requested -= (t - self.last_time) * self.download_rate
+        self.last_time = t
+        if not self.requeueing and self.queued_out and self.bytes_requested < 0:
+            self.requeueing = True
+            q = self.queued_out.keys()
+            shuffle(q)
+            self.queued_out = {}
+            for d in q:
+                d._request_more()
+            self.requeueing = False
+        if -self.bytes_requested > 5*self.download_rate:
+            self.bytes_requested = -5*self.download_rate
+        return max(int(-self.bytes_requested/self.chunksize),0)
+
+    def chunk_requested(self, size):
+        self.bytes_requested += size
+
+    external_data_received = chunk_requested
+
+    def make_download(self, connection):
+        ip = connection.get_ip()
+        if self.perip.has_key(ip):
+            perip = self.perip[ip]
+        else:
+            perip = self.perip.setdefault(ip, PerIPStats(ip))
+        perip.peerid = connection.get_readable_id()
+        perip.numconnections += 1
+        d = SingleDownload(self, connection)
+        perip.lastdownload = d
+        self.downloads.append(d)
+        return d
+
+    def piece_flunked(self, index):
+        if self.paused:
+            return
+        if self.endgamemode:
+            if self.downloads:
+                while self.storage.do_I_have_requests(index):
+                    nb, nl = self.storage.new_request(index)
+                    self.all_requests.append((index, nb, nl))
+                for d in self.downloads:
+                    d.fix_download_endgame()
+                return
+            self._reset_endgame()
+            return
+        ds = [d for d in self.downloads if not d.choked]
+        shuffle(ds)
+        for d in ds:
+            d._request_more()
+        ds = [d for d in self.downloads if not d.interested and d.have[index]]
+        for d in ds:
+            d.example_interest = index
+            d.send_interested()
+
+    def has_downloaders(self):
+        return len(self.downloads)
+
+    def lost_peer(self, download):
+        ip = download.ip
+        self.perip[ip].numconnections -= 1
+        if self.perip[ip].lastdownload == download:
+            self.perip[ip].lastdownload = None
+        self.downloads.remove(download)
+        if self.endgamemode and not self.downloads: # all peers gone
+            self._reset_endgame()
+
+    def _reset_endgame(self):            
+        self.storage.reset_endgame(self.all_requests)
+        self.endgamemode = False
+        self.all_requests = []
+        self.endgame_queued_pieces = []
+
+
+    def add_disconnected_seed(self, id):
+#        if not self.disconnectedseeds.has_key(id):
+#            self.picker.seed_seen_recently()
+        self.disconnectedseeds[id]=clock()
+
+#	def expire_disconnected_seeds(self):
+
+    def num_disconnected_seeds(self):
+        # first expire old ones
+        expired = []
+        for id,t in self.disconnectedseeds.items():
+            if clock() - t > EXPIRE_TIME:     #Expire old seeds after so long
+                expired.append(id)
+        for id in expired:
+#            self.picker.seed_disappeared()
+            del self.disconnectedseeds[id]
+        return len(self.disconnectedseeds)
+        # if this isn't called by a stats-gathering function
+        # it should be scheduled to run every minute or two.
+
+    def _check_kicks_ok(self):
+        if len(self.gotbaddata) > 10:
+            self.kickbans_ok = False
+            self.kickbans_halted = True
+        return self.kickbans_ok and len(self.downloads) > 2
+
+    def try_kick(self, download):
+        if self._check_kicks_ok():
+            download.guard.download = None
+            ip = download.ip
+            id = download.connection.get_readable_id()
+            self.kicked[ip] = id
+            self.perip[ip].peerid = id
+            self.kickfunc(download.connection)
+        
+    def try_ban(self, ip):
+        if self._check_kicks_ok():
+            self.banfunc(ip)
+            self.banned[ip] = self.perip[ip].peerid
+            if self.kicked.has_key(ip):
+                del self.kicked[ip]
+
+    def set_super_seed(self):
+        self.super_seeding = True
+
+    def check_complete(self, index):
+        if self.endgamemode and not self.all_requests:
+            self.endgamemode = False
+        if self.endgame_queued_pieces and not self.endgamemode:
+            self.requeue_piece_download()
+        if self.storage.am_I_complete():
+            assert not self.all_requests
+            assert not self.endgamemode
+            for d in [i for i in self.downloads if i.have.complete()]:
+                d.connection.send_have(index)   # be nice, tell the other seed you completed
+                self.add_disconnected_seed(d.connection.get_readable_id())
+                d.connection.close()
+            return True
+        return False
+
+    def too_many_partials(self):
+        return len(self.storage.dirty) > (len(self.downloads)/2)
+
+
+    def cancel_piece_download(self, pieces):
+        if self.endgamemode:
+            if self.endgame_queued_pieces:
+                for piece in pieces:
+                    try:
+                        self.endgame_queued_pieces.remove(piece)
+                    except:
+                        pass
+            new_all_requests = []
+            for index, nb, nl in self.all_requests:
+                if index in pieces:
+                    self.storage.request_lost(index, nb, nl)
+                else:
+                    new_all_requests.append((index, nb, nl))
+            self.all_requests = new_all_requests
+
+        for d in self.downloads:
+            hit = False
+            for index, nb, nl in d.active_requests:
+                if index in pieces:
+                    hit = True
+                    d.connection.send_cancel(index, nb, nl)
+                    if not self.endgamemode:
+                        self.storage.request_lost(index, nb, nl)
+            if hit:
+                d.active_requests = [ r for r in d.active_requests
+                                      if r[0] not in pieces ]
+                d._request_more()
+            if not self.endgamemode and d.choked:
+                d._check_interests()
+
+    def requeue_piece_download(self, pieces = []):
+        if self.endgame_queued_pieces:
+            for piece in pieces:
+                if not piece in self.endgame_queued_pieces:
+                    self.endgame_queued_pieces.append(piece)
+            pieces = self.endgame_queued_pieces
+        if self.endgamemode:
+            if self.all_requests:
+                self.endgame_queued_pieces = pieces
+                return
+            self.endgamemode = False
+            self.endgame_queued_pieces = None
+           
+        ds = [d for d in self.downloads]
+        shuffle(ds)
+        for d in ds:
+            if d.choked:
+                d._check_interests()
+            else:
+                d._request_more()
+
+    def start_endgame(self):
+        assert not self.endgamemode
+        self.endgamemode = True
+        assert not self.all_requests
+        for d in self.downloads:
+            if d.active_requests:
+                assert d.interested and not d.choked
+            for request in d.active_requests:
+                assert not request in self.all_requests
+                self.all_requests.append(request)
+        for d in self.downloads:
+            d.fix_download_endgame()
+
+    def pause(self, flag):
+        self.paused = flag
+        if flag:
+            for d in self.downloads:
+                for index, begin, length in d.active_requests:
+                    d.connection.send_cancel(index, begin, length)
+                d._letgo()
+                d.send_not_interested()
+            if self.endgamemode:
+                self._reset_endgame()
+        else:
+            shuffle(self.downloads)
+            for d in self.downloads:
+                d._check_interests()
+                if d.interested and not d.choked:
+                    d._request_more()

Added: debtorrent/branches/upstream/current/BitTornado/BT1/DownloaderFeedback.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/DownloaderFeedback.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/DownloaderFeedback.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/DownloaderFeedback.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,154 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from urllib import quote
+from threading import Event
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+INIT_STATE = (('R','R+'),('L','L+'))
+
+class DownloaderFeedback:
+    def __init__(self, choker, httpdl, add_task, upfunc, downfunc,
+            ratemeasure, leftfunc, file_length, finflag, sp, statistics,
+            statusfunc = None, interval = None):
+        self.choker = choker
+        self.httpdl = httpdl
+        self.add_task = add_task
+        self.upfunc = upfunc
+        self.downfunc = downfunc
+        self.ratemeasure = ratemeasure
+        self.leftfunc = leftfunc
+        self.file_length = file_length
+        self.finflag = finflag
+        self.sp = sp
+        self.statistics = statistics
+        self.lastids = []
+        self.spewdata = None
+        self.doneprocessing = Event()
+        self.doneprocessing.set()
+        if statusfunc:
+            self.autodisplay(statusfunc, interval)
+        
+
+    def _rotate(self):
+        cs = self.choker.connections
+        for id in self.lastids:
+            for i in xrange(len(cs)):
+                if cs[i].get_id() == id:
+                    return cs[i:] + cs[:i]
+        return cs
+
+    def spews(self):
+        l = []
+        cs = self._rotate()
+        self.lastids = [c.get_id() for c in cs]
+        for c in cs:
+            a = {}
+            a['id'] = c.get_readable_id()
+            a['ip'] = c.get_ip()
+            a['optimistic'] = (c is self.choker.connections[0])
+            a['direction'] = INIT_STATE[c.is_locally_initiated()][c.is_encrypted()]
+            u = c.get_upload()
+            a['uprate'] = int(u.measure.get_rate())
+            a['uinterested'] = u.is_interested()
+            a['uchoked'] = u.is_choked()
+            d = c.get_download()
+            a['downrate'] = int(d.measure.get_rate())
+            a['dinterested'] = d.is_interested()
+            a['dchoked'] = d.is_choked()
+            a['snubbed'] = d.is_snubbed()
+            a['utotal'] = d.connection.upload.measure.get_total()
+            a['dtotal'] = d.connection.download.measure.get_total()
+            if len(d.connection.download.have) > 0:
+                a['completed'] = float(len(d.connection.download.have)-d.connection.download.have.numfalse)/float(len(d.connection.download.have))
+            else:
+                a['completed'] = 1.0
+            a['speed'] = d.connection.download.peermeasure.get_rate()
+
+            l.append(a)                                               
+
+        for dl in self.httpdl.get_downloads():
+            if dl.goodseed:
+                a = {}
+                a['id'] = 'http seed'
+                a['ip'] = dl.baseurl
+                a['optimistic'] = False
+                a['direction'] = 'L'
+                a['uprate'] = 0
+                a['uinterested'] = False
+                a['uchoked'] = False
+                a['downrate'] = int(dl.measure.get_rate())
+                a['dinterested'] = True
+                a['dchoked'] = not dl.active
+                a['snubbed'] = not dl.active
+                a['utotal'] = None
+                a['dtotal'] = dl.measure.get_total()
+                a['completed'] = 1.0
+                a['speed'] = None
+
+                l.append(a)
+
+        return l
+
+
+    def gather(self, displayfunc = None):
+        s = {'stats': self.statistics.update()}
+        if self.sp.isSet():
+            s['spew'] = self.spews()
+        else:
+            s['spew'] = None
+        s['up'] = self.upfunc()
+        if self.finflag.isSet():
+            s['done'] = self.file_length
+            return s
+        s['down'] = self.downfunc()
+        obtained, desired = self.leftfunc()
+        s['done'] = obtained
+        s['wanted'] = desired
+        if desired > 0:
+            s['frac'] = float(obtained)/desired
+        else:
+            s['frac'] = 1.0
+        if desired == obtained:
+            s['time'] = 0
+        else:
+            s['time'] = self.ratemeasure.get_time_left(desired-obtained)
+        return s        
+
+
+    def display(self, displayfunc):
+        if not self.doneprocessing.isSet():
+            return
+        self.doneprocessing.clear()
+        stats = self.gather()
+        if self.finflag.isSet():
+            displayfunc(dpflag = self.doneprocessing,
+                upRate = stats['up'],
+                statistics = stats['stats'], spew = stats['spew'])
+        elif stats['time'] is not None:
+            displayfunc(dpflag = self.doneprocessing,
+                fractionDone = stats['frac'], sizeDone = stats['done'],
+                downRate = stats['down'], upRate = stats['up'],
+                statistics = stats['stats'], spew = stats['spew'],
+                timeEst = stats['time'])
+        else:
+            displayfunc(dpflag = self.doneprocessing,
+                fractionDone = stats['frac'], sizeDone = stats['done'],
+                downRate = stats['down'], upRate = stats['up'],
+                statistics = stats['stats'], spew = stats['spew'])
+
+
+    def autodisplay(self, displayfunc, interval):
+        self.displayfunc = displayfunc
+        self.interval = interval
+        self._autodisplay()
+
+    def _autodisplay(self):
+        self.add_task(self._autodisplay, self.interval)
+        self.display(self.displayfunc)

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Encrypter.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Encrypter.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Encrypter.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Encrypter.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,657 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from binascii import b2a_hex
+from socket import error as socketerror
+from urllib import quote
+from traceback import print_exc
+from BitTornado.BTcrypto import Crypto
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    bool = lambda x: not not x
+
+DEBUG = False
+
+MAX_INCOMPLETE = 8
+
+protocol_name = 'BitTorrent protocol'
+option_pattern = chr(0)*8
+
+def toint(s):
+    return long(b2a_hex(s), 16)
+
+def tobinary16(i):
+    return chr((i >> 8) & 0xFF) + chr(i & 0xFF)
+
+hexchars = '0123456789ABCDEF'
+hexmap = []
+for i in xrange(256):
+    hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F])
+
+def tohex(s):
+    r = []
+    for c in s:
+        r.append(hexmap[ord(c)])
+    return ''.join(r)
+
+def make_readable(s):
+    if not s:
+        return ''
+    if quote(s).find('%') >= 0:
+        return tohex(s)
+    return '"'+s+'"'
+   
+
+class IncompleteCounter:
+    def __init__(self):
+        self.c = 0
+    def increment(self):
+        self.c += 1
+    def decrement(self):
+        self.c -= 1
+    def toomany(self):
+        return self.c >= MAX_INCOMPLETE
+    
+incompletecounter = IncompleteCounter()
+
+
+# header, options, download id, my id, [length, message]
+
+class Connection:
+    def __init__(self, Encoder, connection, id,
+                 ext_handshake=False, encrypted = None, options = None):
+        self.Encoder = Encoder
+        self.connection = connection
+        self.connecter = Encoder.connecter
+        self.id = id
+        self.locally_initiated = (id != None)
+        self.readable_id = make_readable(id)
+        self.complete = False
+        self.keepalive = lambda: None
+        self.closed = False
+        self.buffer = ''
+        self.bufferlen = None
+        self.log = None
+        self.read = self._read
+        self.write = self._write
+        self.cryptmode = 0
+        self.encrypter = None
+        if self.locally_initiated:
+            incompletecounter.increment()
+            if encrypted:
+                self.encrypted = True
+                self.encrypter = Crypto(True)
+                self.write(self.encrypter.pubkey+self.encrypter.padding())
+            else:
+                self.encrypted = False
+                self.write(chr(len(protocol_name)) + protocol_name + 
+                    option_pattern + self.Encoder.download_id )
+            self.next_len, self.next_func = 1+len(protocol_name), self.read_header
+        elif ext_handshake:
+            self.Encoder.connecter.external_connection_made += 1
+            if encrypted:   # passed an already running encrypter
+                self.encrypter = encrypted
+                self.encrypted = True
+                self._start_crypto()
+                self.next_len, self.next_func = 14, self.read_crypto_block3c
+            else:
+                self.encrypted = False
+                self.options = options
+                self.write(self.Encoder.my_id)
+                self.next_len, self.next_func = 20, self.read_peer_id
+        else:
+            self.encrypted = None       # don't know yet
+            self.next_len, self.next_func = 1+len(protocol_name), self.read_header
+        self.Encoder.raw_server.add_task(self._auto_close, 30)
+
+
+    def _log_start(self):   # only called with DEBUG = True
+        self.log = open('peerlog.'+self.get_ip()+'.txt','a')
+        self.log.write('connected - ')
+        if self.locally_initiated:
+            self.log.write('outgoing\n')
+        else:
+            self.log.write('incoming\n')
+        self._logwritefunc = self.write
+        self.write = self._log_write
+
+    def _log_write(self, s):
+        self.log.write('w:'+b2a_hex(s)+'\n')
+        self._logwritefunc(s)
+        
+
+    def get_ip(self, real=False):
+        return self.connection.get_ip(real)
+
+    def get_id(self):
+        return self.id
+
+    def get_readable_id(self):
+        return self.readable_id
+
+    def is_locally_initiated(self):
+        return self.locally_initiated
+
+    def is_encrypted(self):
+        return bool(self.encrypted)
+
+    def is_flushed(self):
+        return self.connection.is_flushed()
+
+    def _read_header(self, s):
+        if s == chr(len(protocol_name))+protocol_name:
+            return 8, self.read_options
+        return None
+
+    def read_header(self, s):
+        if self._read_header(s):
+            if self.encrypted or self.Encoder.config['crypto_stealth']:
+                return None
+            return 8, self.read_options
+        if self.locally_initiated and not self.encrypted:
+            return None
+        elif not self.Encoder.config['crypto_allowed']:
+            return None
+        if not self.encrypted:
+            self.encrypted = True
+            self.encrypter = Crypto(self.locally_initiated)
+        self._write_buffer(s)
+        return self.encrypter.keylength, self.read_crypto_header
+
+    ################## ENCRYPTION SUPPORT ######################
+
+    def _start_crypto(self):
+        self.encrypter.setrawaccess(self._read,self._write)
+        self.write = self.encrypter.write
+        self.read = self.encrypter.read
+        if self.buffer:
+            self.buffer = self.encrypter.decrypt(self.buffer)
+
+    def _end_crypto(self):
+        self.read = self._read
+        self.write = self._write
+        self.encrypter = None
+
+    def read_crypto_header(self, s):
+        self.encrypter.received_key(s)
+        self.encrypter.set_skey(self.Encoder.download_id)
+        if self.locally_initiated:
+            if self.Encoder.config['crypto_only']:
+                cryptmode = '\x00\x00\x00\x02'    # full stream encryption
+            else:
+                cryptmode = '\x00\x00\x00\x03'    # header or full stream
+            padc = self.encrypter.padding()
+            self.write( self.encrypter.block3a
+                      + self.encrypter.block3b
+                      + self.encrypter.encrypt(
+                            ('\x00'*8)            # VC
+                          + cryptmode             # acceptable crypto modes
+                          + tobinary16(len(padc))
+                          + padc                  # PadC
+                          + '\x00\x00' ) )        # no initial payload data
+            self._max_search = 520
+            return 1, self.read_crypto_block4a
+        self.write(self.encrypter.pubkey+self.encrypter.padding())
+        self._max_search = 520
+        return 0, self.read_crypto_block3a
+
+    def _search_for_pattern(self, s, pat):
+        p = s.find(pat)
+        if p < 0:
+            if len(s) >= len(pat):
+                self._max_search -= len(s)+1-len(pat)
+            if self._max_search < 0:
+                self.close()
+                return False
+            self._write_buffer(s[1-len(pat):])
+            return False
+        self._write_buffer(s[p+len(pat):])
+        return True
+
+    ### INCOMING CONNECTION ###
+
+    def read_crypto_block3a(self, s):
+        if not self._search_for_pattern(s,self.encrypter.block3a):
+            return -1, self.read_crypto_block3a     # wait for more data
+        return len(self.encrypter.block3b), self.read_crypto_block3b
+
+    def read_crypto_block3b(self, s):
+        if s != self.encrypter.block3b:
+            return None
+        self.Encoder.connecter.external_connection_made += 1
+        self._start_crypto()
+        return 14, self.read_crypto_block3c
+
+    def read_crypto_block3c(self, s):
+        if s[:8] != ('\x00'*8):             # check VC
+            return None
+        self.cryptmode = toint(s[8:12]) % 4
+        if self.cryptmode == 0:
+            return None                     # no encryption selected
+        if ( self.cryptmode == 1            # only header encryption
+             and self.Encoder.config['crypto_only'] ):
+            return None
+        padlen = (ord(s[12])<<8)+ord(s[13])
+        if padlen > 512:
+            return None
+        return padlen+2, self.read_crypto_pad3
+
+    def read_crypto_pad3(self, s):
+        s = s[-2:]
+        ialen = (ord(s[0])<<8)+ord(s[1])
+        if ialen > 65535:
+            return None
+        if self.cryptmode == 1:
+            cryptmode = '\x00\x00\x00\x01'    # header only encryption
+        else:
+            cryptmode = '\x00\x00\x00\x02'    # full stream encryption
+        padd = self.encrypter.padding()
+        self.write( ('\x00'*8)            # VC
+                  + cryptmode             # encryption mode
+                  + tobinary16(len(padd))
+                  + padd )                # PadD
+        if ialen:
+            return ialen, self.read_crypto_ia
+        return self.read_crypto_block3done()
+
+    def read_crypto_ia(self, s):
+        if DEBUG:
+            self._log_start()
+            self.log.write('r:'+b2a_hex(s)+'(ia)\n')
+            if self.buffer:
+                self.log.write('r:'+b2a_hex(self.buffer)+'(buffer)\n')
+        return self.read_crypto_block3done(s)
+
+    def read_crypto_block3done(self, ia=''):
+        if DEBUG:
+            if not self.log:
+                self._log_start()
+        if self.cryptmode == 1:     # only handshake encryption
+            assert not self.buffer  # oops; check for exceptions to this
+            self._end_crypto()
+        if ia:
+            self._write_buffer(ia)
+        return 1+len(protocol_name), self.read_encrypted_header
+
+    ### OUTGOING CONNECTION ###
+
+    def read_crypto_block4a(self, s):
+        if not self._search_for_pattern(s,self.encrypter.VC_pattern()):
+            return -1, self.read_crypto_block4a     # wait for more data
+        self._start_crypto()
+        return 6, self.read_crypto_block4b
+
+    def read_crypto_block4b(self, s):
+        self.cryptmode = toint(s[:4]) % 4
+        if self.cryptmode == 1:             # only header encryption
+            if self.Encoder.config['crypto_only']:
+                return None
+        elif self.cryptmode != 2:
+            return None                     # unknown encryption
+        padlen = (ord(s[4])<<8)+ord(s[5])
+        if padlen > 512:
+            return None
+        if padlen:
+            return padlen, self.read_crypto_pad4
+        return self.read_crypto_block4done()
+
+    def read_crypto_pad4(self, s):
+        # discard data
+        return self.read_crypto_block4done()
+
+    def read_crypto_block4done(self):
+        if DEBUG:
+            self._log_start()
+        if self.cryptmode == 1:     # only handshake encryption
+            if not self.buffer:  # oops; check for exceptions to this
+                return None
+            self._end_crypto()
+        self.write(chr(len(protocol_name)) + protocol_name + 
+            option_pattern + self.Encoder.download_id)
+        return 1+len(protocol_name), self.read_encrypted_header
+
+    ### START PROTOCOL OVER ENCRYPTED CONNECTION ###
+
+    def read_encrypted_header(self, s):
+        return self._read_header(s)
+
+    ################################################
+
+    def read_options(self, s):
+        self.options = s
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if ( s != self.Encoder.download_id
+             or not self.Encoder.check_ip(ip=self.get_ip()) ):
+            return None
+        if not self.locally_initiated:
+            if not self.encrypted:
+                self.Encoder.connecter.external_connection_made += 1
+            self.write(chr(len(protocol_name)) + protocol_name + 
+                option_pattern + self.Encoder.download_id + self.Encoder.my_id)
+        return 20, self.read_peer_id
+
+    def read_peer_id(self, s):
+        if not self.encrypted and self.Encoder.config['crypto_only']:
+            return None     # allows older trackers to ping,
+                            # but won't proceed w/ connections
+        if not self.id:
+            self.id = s
+            self.readable_id = make_readable(s)
+        else:
+            if s != self.id:
+                return None
+        self.complete = self.Encoder.got_id(self)
+        if not self.complete:
+            return None
+        if self.locally_initiated:
+            self.write(self.Encoder.my_id)
+            incompletecounter.decrement()
+        self._switch_to_read2()
+        c = self.Encoder.connecter.connection_made(self)
+        self.keepalive = c.send_keepalive
+        return 4, self.read_len
+
+    def read_len(self, s):
+        l = toint(s)
+        if l > self.Encoder.max_len:
+            return None
+        return l, self.read_message
+
+    def read_message(self, s):
+        if s != '':
+            self.connecter.got_message(self, s)
+        return 4, self.read_len
+
+    def read_dead(self, s):
+        return None
+
+    def _auto_close(self):
+        if not self.complete:
+            self.close()
+
+    def close(self):
+        if not self.closed:
+            self.connection.close()
+            self.sever()
+
+    def sever(self):
+        if self.log:
+            self.log.write('closed\n')
+            self.log.close()
+        self.closed = True
+        del self.Encoder.connections[self.connection]
+        if self.complete:
+            self.connecter.connection_lost(self)
+        elif self.locally_initiated:
+            incompletecounter.decrement()
+
+    def send_message_raw(self, message):
+        self.write(message)
+
+    def _write(self, message):
+        if not self.closed:
+            self.connection.write(message)
+
+    def data_came_in(self, connection, s):
+        self.read(s)
+
+    def _write_buffer(self, s):
+        self.buffer = s+self.buffer
+
+    def _read(self, s):
+        if self.log:
+            self.log.write('r:'+b2a_hex(s)+'\n')
+        self.Encoder.measurefunc(len(s))
+        self.buffer += s
+        while True:
+            if self.closed:
+                return
+            # self.next_len = # of characters function expects
+            # or 0 = all characters in the buffer
+            # or -1 = wait for next read, then all characters in the buffer
+            # not compatible w/ keepalives, switch out after all negotiation complete
+            if self.next_len <= 0:
+                m = self.buffer
+                self.buffer = ''
+            elif len(self.buffer) >= self.next_len:
+                m = self.buffer[:self.next_len]
+                self.buffer = self.buffer[self.next_len:]
+            else:
+                return
+            try:
+                x = self.next_func(m)
+            except:
+                self.next_len, self.next_func = 1, self.read_dead
+                raise
+            if x is None:
+                self.close()
+                return
+            self.next_len, self.next_func = x
+            if self.next_len < 0:  # already checked buffer
+                return             # wait for additional data
+            if self.bufferlen is not None:
+                self._read2('')
+                return
+
+    def _switch_to_read2(self):
+        self._write_buffer = None
+        if self.encrypter:
+            self.encrypter.setrawaccess(self._read2,self._write)
+        else:
+            self.read = self._read2
+        self.bufferlen = len(self.buffer)
+        self.buffer = [self.buffer]
+
+    def _read2(self, s):    # more efficient, requires buffer['',''] & bufferlen
+        if self.log:
+            self.log.write('r:'+b2a_hex(s)+'\n')
+        self.Encoder.measurefunc(len(s))
+        while True:
+            if self.closed:
+                return
+            p = self.next_len-self.bufferlen
+            if self.next_len == 0:
+                m = ''
+            elif s:
+                if p > len(s):
+                    self.buffer.append(s)
+                    self.bufferlen += len(s)
+                    return
+                self.bufferlen = len(s)-p
+                self.buffer.append(s[:p])
+                m = ''.join(self.buffer)
+                if p == len(s):
+                    self.buffer = []
+                else:
+                    self.buffer=[s[p:]]
+                s = ''
+            elif p <= 0:
+                # assert len(self.buffer) == 1
+                s = self.buffer[0]
+                self.bufferlen = len(s)-self.next_len
+                m = s[:self.next_len]
+                if p == 0:
+                    self.buffer = []
+                else:
+                    self.buffer = [s[self.next_len:]]
+                s = ''
+            else:
+                return
+            try:
+                x = self.next_func(m)
+            except:
+                self.next_len, self.next_func = 1, self.read_dead
+                raise
+            if x is None:
+                self.close()
+                return
+            self.next_len, self.next_func = x
+            if self.next_len < 0:  # already checked buffer
+                return             # wait for additional data
+            
+
+    def connection_flushed(self, connection):
+        if self.complete:
+            self.connecter.connection_flushed(self)
+
+    def connection_lost(self, connection):
+        if self.Encoder.connections.has_key(connection):
+            self.sever()
+
+
+class _dummy_banlist:
+    def includes(self, x):
+        return False
+
+class Encoder:
+    def __init__(self, connecter, raw_server, my_id, max_len,
+            schedulefunc, keepalive_delay, download_id, 
+            measurefunc, config, bans=_dummy_banlist() ):
+        self.raw_server = raw_server
+        self.connecter = connecter
+        self.my_id = my_id
+        self.max_len = max_len
+        self.schedulefunc = schedulefunc
+        self.keepalive_delay = keepalive_delay
+        self.download_id = download_id
+        self.measurefunc = measurefunc
+        self.config = config
+        self.connections = {}
+        self.banned = {}
+        self.external_bans = bans
+        self.to_connect = []
+        self.paused = False
+        if self.config['max_connections'] == 0:
+            self.max_connections = 2 ** 30
+        else:
+            self.max_connections = self.config['max_connections']
+        schedulefunc(self.send_keepalives, keepalive_delay)
+
+    def send_keepalives(self):
+        self.schedulefunc(self.send_keepalives, self.keepalive_delay)
+        if self.paused:
+            return
+        for c in self.connections.values():
+            c.keepalive()
+
+    def start_connections(self, list):
+        if not self.to_connect:
+            self.raw_server.add_task(self._start_connection_from_queue)
+        self.to_connect = list
+
+    def _start_connection_from_queue(self):
+        if self.connecter.external_connection_made:
+            max_initiate = self.config['max_initiate']
+        else:
+            max_initiate = int(self.config['max_initiate']*1.5)
+        cons = len(self.connections)
+        if cons >= self.max_connections or cons >= max_initiate:
+            delay = 60
+        elif self.paused or incompletecounter.toomany():
+            delay = 1
+        else:
+            delay = 0
+            dns, id, encrypted = self.to_connect.pop(0)
+            self.start_connection(dns, id, encrypted)
+        if self.to_connect:
+            self.raw_server.add_task(self._start_connection_from_queue, delay)
+
+    def start_connection(self, dns, id, encrypted = None):
+        if ( self.paused
+             or len(self.connections) >= self.max_connections
+             or id == self.my_id
+             or not self.check_ip(ip=dns[0]) ):
+            return True
+        if self.config['crypto_only']:
+            if encrypted is None or encrypted:  # fails on encrypted = 0
+                encrypted = True
+            else:
+                return True
+        for v in self.connections.values():
+            if v is None:
+                continue
+            if id and v.id == id:
+                return True
+            ip = v.get_ip(True)
+            if self.config['security'] and ip != 'unknown' and ip == dns[0]:
+                return True
+        try:
+            c = self.raw_server.start_connection(dns)
+            con = Connection(self, c, id, encrypted = encrypted)
+            self.connections[c] = con
+            c.set_handler(con)
+        except socketerror:
+            return False
+        return True
+
+    def _start_connection(self, dns, id, encrypted = None):
+        def foo(self=self, dns=dns, id=id, encrypted=encrypted):
+            self.start_connection(dns, id, encrypted)
+        self.schedulefunc(foo, 0)
+
+    def check_ip(self, connection=None, ip=None):
+        if not ip:
+            ip = connection.get_ip(True)
+        if self.config['security'] and self.banned.has_key(ip):
+            return False
+        if self.external_bans.includes(ip):
+            return False
+        return True
+
+    def got_id(self, connection):
+        if connection.id == self.my_id:
+            self.connecter.external_connection_made -= 1
+            return False
+        ip = connection.get_ip(True)
+        for v in self.connections.values():
+            if connection is not v:
+                if connection.id == v.id:
+                    if ip == v.get_ip(True):
+                        v.close()
+                    else:
+                        return False
+                if self.config['security'] and ip != 'unknown' and ip == v.get_ip(True):
+                    v.close()
+        return True
+
+    def external_connection_made(self, connection):
+        if self.paused or len(self.connections) >= self.max_connections:
+            connection.close()
+            return False
+        con = Connection(self, connection, None)
+        self.connections[connection] = con
+        connection.set_handler(con)
+        return True
+
+    def externally_handshaked_connection_made(self, connection, options,
+                                              already_read, encrypted = None):
+        if ( self.paused
+             or len(self.connections) >= self.max_connections
+             or not self.check_ip(connection=connection) ):
+            connection.close()
+            return False
+        con = Connection(self, connection, None,
+                ext_handshake = True, encrypted = encrypted, options = options)
+        self.connections[connection] = con
+        connection.set_handler(con)
+        if already_read:
+            con.data_came_in(con, already_read)
+        return True
+
+    def close_all(self):
+        for c in self.connections.values():
+            c.close()
+        self.connections = {}
+
+    def ban(self, ip):
+        self.banned[ip] = 1
+
+    def pause(self, flag):
+        self.paused = flag

Added: debtorrent/branches/upstream/current/BitTornado/BT1/FileSelector.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/FileSelector.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/FileSelector.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/FileSelector.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,245 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from random import shuffle
+from traceback import print_exc
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+
+class FileSelector:
+    def __init__(self, files, piece_length, bufferdir,
+                 storage, storagewrapper, sched, failfunc):
+        self.files = files
+        self.storage = storage
+        self.storagewrapper = storagewrapper
+        self.sched = sched
+        self.failfunc = failfunc
+        self.downloader = None
+        self.picker = None
+
+        storage.set_bufferdir(bufferdir)
+        
+        self.numfiles = len(files)
+        self.priority = [1] * self.numfiles
+        self.new_priority = None
+        self.new_partials = None
+        self.filepieces = []
+        total = 0L
+        for file, length in files:
+            if not length:
+                self.filepieces.append(())
+            else:
+                pieces = range( int(total/piece_length),
+                                int((total+length-1)/piece_length)+1 )
+                self.filepieces.append(tuple(pieces))
+                total += length
+        self.numpieces = int((total+piece_length-1)/piece_length)
+        self.piece_priority = [1] * self.numpieces
+        
+
+
+    def init_priority(self, new_priority):
+        try:
+            assert len(new_priority) == self.numfiles
+            for v in new_priority:
+                assert type(v) in (type(0),type(0L))
+                assert v >= -1
+                assert v <= 2
+        except:
+#           print_exc()            
+            return False
+        try:
+            files_updated = False
+            for f in xrange(self.numfiles):
+                if new_priority[f] < 0:
+                    self.storage.disable_file(f)
+                    files_updated = True
+            if files_updated:
+                self.storage.reset_file_status()
+            self.new_priority = new_priority
+        except (IOError, OSError), e:
+            self.failfunc("can't open partial file for "
+                          + self.files[f][0] + ': ' + str(e))
+            return False
+        return True
+
+    '''
+    d['priority'] = [file #1 priority [,file #2 priority...] ]
+                    a list of download priorities for each file.
+                    Priority may be -1, 0, 1, 2.  -1 = download disabled,
+                    0 = highest, 1 = normal, 2 = lowest.
+    Also see Storage.pickle and StorageWrapper.pickle for additional keys.
+    '''
+    def unpickle(self, d):
+        if d.has_key('priority'):
+            if not self.init_priority(d['priority']):
+                return
+        pieces = self.storage.unpickle(d)
+        if not pieces:  # don't bother, nothing restoreable
+            return
+        new_piece_priority = self._get_piece_priority_list(self.new_priority)
+        self.storagewrapper.reblock([i == -1 for i in new_piece_priority])
+        self.new_partials = self.storagewrapper.unpickle(d, pieces)
+
+
+    def tie_in(self, picker, cancelfunc, requestmorefunc, rerequestfunc):
+        self.picker = picker
+        self.cancelfunc = cancelfunc
+        self.requestmorefunc = requestmorefunc
+        self.rerequestfunc = rerequestfunc
+
+        if self.new_priority:
+            self.priority = self.new_priority
+            self.new_priority = None
+            self.new_piece_priority = self._set_piece_priority(self.priority)
+
+        if self.new_partials:
+            shuffle(self.new_partials)
+            for p in self.new_partials:
+                self.picker.requested(p)
+        self.new_partials = None
+        
+
+    def _set_files_disabled(self, old_priority, new_priority):
+        old_disabled = [p == -1 for p in old_priority]
+        new_disabled = [p == -1 for p in new_priority]
+        data_to_update = []
+        for f in xrange(self.numfiles):
+            if new_disabled[f] != old_disabled[f]:
+                data_to_update.extend(self.storage.get_piece_update_list(f))
+        buffer = []
+        for piece, start, length in data_to_update:
+            if self.storagewrapper.has_data(piece):
+                data = self.storagewrapper.read_raw(piece, start, length)
+                if data is None:
+                    return False
+                buffer.append((piece, start, data))
+
+        files_updated = False        
+        try:
+            for f in xrange(self.numfiles):
+                if new_disabled[f] and not old_disabled[f]:
+                    self.storage.disable_file(f)
+                    files_updated = True
+                if old_disabled[f] and not new_disabled[f]:
+                    self.storage.enable_file(f)
+                    files_updated = True
+        except (IOError, OSError), e:
+            if new_disabled[f]:
+                msg = "can't open partial file for "
+            else:
+                msg = 'unable to open '
+            self.failfunc(msg + self.files[f][0] + ': ' + str(e))
+            return False
+        if files_updated:
+            self.storage.reset_file_status()
+
+        changed_pieces = {}
+        for piece, start, data in buffer:
+            if not self.storagewrapper.write_raw(piece, start, data):
+                return False
+            data.release()
+            changed_pieces[piece] = 1
+        if not self.storagewrapper.doublecheck_data(changed_pieces):
+            return False
+
+        return True        
+
+
+    def _get_piece_priority_list(self, file_priority_list):
+        l = [-1] * self.numpieces
+        for f in xrange(self.numfiles):
+            if file_priority_list[f] == -1:
+                continue
+            for i in self.filepieces[f]:
+                if l[i] == -1:
+                    l[i] = file_priority_list[f]
+                    continue
+                l[i] = min(l[i],file_priority_list[f])
+        return l
+        
+
+    def _set_piece_priority(self, new_priority):
+        was_complete = self.storagewrapper.am_I_complete()
+        new_piece_priority = self._get_piece_priority_list(new_priority)
+        pieces = range(self.numpieces)
+        shuffle(pieces)
+        new_blocked = []
+        new_unblocked = []
+        for piece in pieces:
+            self.picker.set_priority(piece,new_piece_priority[piece])
+            o = self.piece_priority[piece] == -1
+            n = new_piece_priority[piece] == -1
+            if n and not o:
+                new_blocked.append(piece)
+            if o and not n:
+                new_unblocked.append(piece)
+        if new_blocked:
+            self.cancelfunc(new_blocked)
+        self.storagewrapper.reblock([i == -1 for i in new_piece_priority])
+        if new_unblocked:
+            self.requestmorefunc(new_unblocked)
+        if was_complete and not self.storagewrapper.am_I_complete():
+            self.rerequestfunc()
+
+        return new_piece_priority        
+
+
+    def set_priorities_now(self, new_priority = None):
+        if not new_priority:
+            new_priority = self.new_priority
+            self.new_priority = None    # potential race condition
+            if not new_priority:
+                return
+        old_priority = self.priority
+        self.priority = new_priority
+        if not self._set_files_disabled(old_priority, new_priority):
+            return
+        self.piece_priority = self._set_piece_priority(new_priority)
+
+    def set_priorities(self, new_priority):
+        self.new_priority = new_priority
+        self.sched(self.set_priorities_now)
+        
+    def set_priority(self, f, p):
+        new_priority = self.get_priorities()
+        new_priority[f] = p
+        self.set_priorities(new_priority)
+
+    def get_priorities(self):
+        priority = self.new_priority
+        if not priority:
+            priority = self.priority    # potential race condition
+        return [i for i in priority]
+
+    def __setitem__(self, index, val):
+        self.set_priority(index, val)
+
+    def __getitem__(self, index):
+        try:
+            return self.new_priority[index]
+        except:
+            return self.priority[index]
+
+
+    def finish(self):
+        for f in xrange(self.numfiles):
+            if self.priority[f] == -1:
+                self.storage.delete_file(f)
+
+    def pickle(self):
+        d = {'priority': self.priority}
+        try:
+            s = self.storage.pickle()
+            sw = self.storagewrapper.pickle()
+            for k in s.keys():
+                d[k] = s[k]
+            for k in sw.keys():
+                d[k] = sw[k]
+        except (IOError, OSError):
+            pass
+        return d

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Filter.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Filter.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Filter.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Filter.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,12 @@
+class Filter:
+    def __init__(self, callback):
+        self.callback = callback
+
+    def check(self, ip, paramslist, headers):
+
+        def params(key, default = None, l = paramslist):
+            if l.has_key(key):
+                return l[key][0]
+            return default
+
+        return None

Added: debtorrent/branches/upstream/current/BitTornado/BT1/HTTPDownloader.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/HTTPDownloader.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/HTTPDownloader.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/HTTPDownloader.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,251 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado.CurrentRateMeasure import Measure
+from random import randint
+from urlparse import urlparse
+from httplib import HTTPConnection
+from urllib import quote
+from threading import Thread
+from BitTornado.__init__ import product_name,version_short
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+EXPIRE_TIME = 60 * 60
+
+VERSION = product_name+'/'+version_short
+
+class haveComplete:
+    def complete(self):
+        return True
+    def __getitem__(self, x):
+        return True
+haveall = haveComplete()
+
+class SingleDownload:
+    def __init__(self, downloader, url):
+        self.downloader = downloader
+        self.baseurl = url
+        try:
+            (scheme, self.netloc, path, pars, query, fragment) = urlparse(url)
+        except:
+            self.downloader.errorfunc('cannot parse http seed address: '+url)
+            return
+        if scheme != 'http':
+            self.downloader.errorfunc('http seed url not http: '+url)
+            return
+        try:
+            self.connection = HTTPConnection(self.netloc)
+        except:
+            self.downloader.errorfunc('cannot connect to http seed: '+url)
+            return
+        self.seedurl = path
+        if pars:
+            self.seedurl += ';'+pars
+        self.seedurl += '?'
+        if query:
+            self.seedurl += query+'&'
+        self.seedurl += 'info_hash='+quote(self.downloader.infohash)
+
+        self.measure = Measure(downloader.max_rate_period)
+        self.index = None
+        self.url = ''
+        self.requests = []
+        self.request_size = 0
+        self.endflag = False
+        self.error = None
+        self.retry_period = 30
+        self._retry_period = None
+        self.errorcount = 0
+        self.goodseed = False
+        self.active = False
+        self.cancelled = False
+        self.resched(randint(2,10))
+
+    def resched(self, len = None):
+        if len is None:
+            len = self.retry_period
+        if self.errorcount > 3:
+            len = len * (self.errorcount - 2)
+        self.downloader.rawserver.add_task(self.download, len)
+
+    def _want(self, index):
+        if self.endflag:
+            return self.downloader.storage.do_I_have_requests(index)
+        else:
+            return self.downloader.storage.is_unstarted(index)
+
+    def download(self):
+        self.cancelled = False
+        if self.downloader.picker.am_I_complete():
+            self.downloader.downloads.remove(self)
+            return
+        self.index = self.downloader.picker.next(haveall, self._want)
+        if ( self.index is None and not self.endflag
+                     and not self.downloader.peerdownloader.has_downloaders() ):
+            self.endflag = True
+            self.index = self.downloader.picker.next(haveall, self._want)
+        if self.index is None:
+            self.endflag = True
+            self.resched()
+        else:
+            self.url = ( self.seedurl+'&piece='+str(self.index) )
+            self._get_requests()
+            if self.request_size < self.downloader.storage._piecelen(self.index):
+                self.url += '&ranges='+self._request_ranges()
+            rq = Thread(target = self._request)
+            rq.setDaemon(False)
+            rq.start()
+            self.active = True
+
+    def _request(self):
+        import encodings.ascii
+        import encodings.punycode
+        import encodings.idna
+        
+        self.error = None
+        self.received_data = None
+        try:
+            self.connection.request('GET',self.url, None,
+                                {'User-Agent': VERSION})
+            r = self.connection.getresponse()
+            self.connection_status = r.status
+            self.received_data = r.read()
+        except Exception, e:
+            self.error = 'error accessing http seed: '+str(e)
+            try:
+                self.connection.close()
+            except:
+                pass
+            try:
+                self.connection = HTTPConnection(self.netloc)
+            except:
+                self.connection = None  # will cause an exception and retry next cycle
+        self.downloader.rawserver.add_task(self.request_finished)
+
+    def request_finished(self):
+        self.active = False
+        if self.error is not None:
+            if self.goodseed:
+                self.downloader.errorfunc(self.error)
+            self.errorcount += 1
+        if self.received_data:
+            self.errorcount = 0
+            if not self._got_data():
+                self.received_data = None
+        if not self.received_data:
+            self._release_requests()
+            self.downloader.peerdownloader.piece_flunked(self.index)
+        if self._retry_period:
+            self.resched(self._retry_period)
+            self._retry_period = None
+            return
+        self.resched()
+
+    def _got_data(self):
+        if self.connection_status == 503:   # seed is busy
+            try:
+                self.retry_period = max(int(self.received_data),5)
+            except:
+                pass
+            return False
+        if self.connection_status != 200:
+            self.errorcount += 1
+            return False
+        self._retry_period = 1
+        if len(self.received_data) != self.request_size:
+            if self.goodseed:
+                self.downloader.errorfunc('corrupt data from http seed - redownloading')
+            return False
+        self.measure.update_rate(len(self.received_data))
+        self.downloader.measurefunc(len(self.received_data))
+        if self.cancelled:
+            return False
+        if not self._fulfill_requests():
+            return False
+        if not self.goodseed:
+            self.goodseed = True
+            self.downloader.seedsfound += 1
+        if self.downloader.storage.do_I_have(self.index):
+            self.downloader.picker.complete(self.index)
+            self.downloader.peerdownloader.check_complete(self.index)
+            self.downloader.gotpiecefunc(self.index)
+        return True
+    
+    def _get_requests(self):
+        self.requests = []
+        self.request_size = 0L
+        while self.downloader.storage.do_I_have_requests(self.index):
+            r = self.downloader.storage.new_request(self.index)
+            self.requests.append(r)
+            self.request_size += r[1]
+        self.requests.sort()
+
+    def _fulfill_requests(self):
+        start = 0L
+        success = True
+        while self.requests:
+            begin, length = self.requests.pop(0)
+            if not self.downloader.storage.piece_came_in(self.index, begin,
+                            self.received_data[start:start+length]):
+                success = False
+                break
+            start += length
+        return success
+
+    def _release_requests(self):
+        for begin, length in self.requests:
+            self.downloader.storage.request_lost(self.index, begin, length)
+        self.requests = []
+
+    def _request_ranges(self):
+        s = ''
+        begin, length = self.requests[0]
+        for begin1, length1 in self.requests[1:]:
+            if begin + length == begin1:
+                length += length1
+                continue
+            else:
+                if s:
+                    s += ','
+                s += str(begin)+'-'+str(begin+length-1)
+                begin, length = begin1, length1
+        if s:
+            s += ','
+        s += str(begin)+'-'+str(begin+length-1)
+        return s
+        
+    
+class HTTPDownloader:
+    def __init__(self, storage, picker, rawserver,
+                 finflag, errorfunc, peerdownloader,
+                 max_rate_period, infohash, measurefunc, gotpiecefunc):
+        self.storage = storage
+        self.picker = picker
+        self.rawserver = rawserver
+        self.finflag = finflag
+        self.errorfunc = errorfunc
+        self.peerdownloader = peerdownloader
+        self.infohash = infohash
+        self.max_rate_period = max_rate_period
+        self.gotpiecefunc = gotpiecefunc
+        self.measurefunc = measurefunc
+        self.downloads = []
+        self.seedsfound = 0
+
+    def make_download(self, url):
+        self.downloads.append(SingleDownload(self, url))
+        return self.downloads[-1]
+
+    def get_downloads(self):
+        if self.finflag.isSet():
+            return []
+        return self.downloads
+
+    def cancel_piece_download(self, pieces):
+        for d in self.downloads:
+            if d.active and d.index in pieces:
+                d.cancelled = True

Added: debtorrent/branches/upstream/current/BitTornado/BT1/NatCheck.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/NatCheck.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/NatCheck.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/NatCheck.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,219 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from socket import error as socketerror
+from traceback import print_exc
+from BitTornado.BTcrypto import Crypto, CRYPTO_OK
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+CHECK_PEER_ID_ENCRYPTED = True
+
+protocol_name = 'BitTorrent protocol'
+
+# header, reserved, download id, my id, [length, message]
+
+class NatCheck:
+    def __init__(self, resultfunc, downloadid, peerid, ip, port, rawserver,
+                 encrypted = False):
+        self.resultfunc = resultfunc
+        self.downloadid = downloadid
+        self.peerid = peerid
+        self.ip = ip
+        self.port = port
+        self.encrypted = encrypted
+        self.closed = False
+        self.buffer = ''
+        self.read = self._read
+        self.write = self._write
+        try:
+            self.connection = rawserver.start_connection((ip, port), self)
+            if encrypted:
+                self._dc = not(CRYPTO_OK and CHECK_PEER_ID_ENCRYPTED)
+                self.encrypter = Crypto(True, disable_crypto = self._dc)
+                self.write(self.encrypter.pubkey+self.encrypter.padding())
+            else:
+                self.encrypter = None
+                self.write(chr(len(protocol_name)) + protocol_name +
+                    (chr(0) * 8) + downloadid)
+        except socketerror:
+            self.answer(False)
+        except IOError:
+            self.answer(False)
+        self.next_len, self.next_func = 1+len(protocol_name), self.read_header
+
+    def answer(self, result):
+        self.closed = True
+        try:
+            self.connection.close()
+        except AttributeError:
+            pass
+        self.resultfunc(result, self.downloadid, self.peerid, self.ip, self.port)
+
+    def _read_header(self, s):
+        if s == chr(len(protocol_name))+protocol_name:
+            return 8, self.read_options
+        return None
+
+    def read_header(self, s):
+        if self._read_header(s):
+            if self.encrypted:
+                return None
+            return 8, self.read_options
+        if not self.encrypted:
+            return None
+        self._write_buffer(s)
+        return self.encrypter.keylength, self.read_crypto_header
+
+    ################## ENCRYPTION SUPPORT ######################
+
+    def _start_crypto(self):
+        self.encrypter.setrawaccess(self._read,self._write)
+        self.write = self.encrypter.write
+        self.read = self.encrypter.read
+        if self.buffer:
+            self.buffer = self.encrypter.decrypt(self.buffer)
+
+    def read_crypto_header(self, s):
+        self.encrypter.received_key(s)
+        self.encrypter.set_skey(self.downloadid)
+        cryptmode = '\x00\x00\x00\x02'    # full stream encryption
+        padc = self.encrypter.padding()
+        self.write( self.encrypter.block3a
+                  + self.encrypter.block3b
+                  + self.encrypter.encrypt(
+                        ('\x00'*8)            # VC
+                      + cryptmode             # acceptable crypto modes
+                      + tobinary16(len(padc))
+                      + padc                  # PadC
+                      + '\x00\x00' ) )        # no initial payload data
+        self._max_search = 520
+        return 1, self.read_crypto_block4a
+
+    def _search_for_pattern(self, s, pat):
+        p = s.find(pat)
+        if p < 0:
+            if len(s) >= len(pat):
+                self._max_search -= len(s)+1-len(pat)
+            if self._max_search < 0:
+                self.close()
+                return False
+            self._write_buffer(s[1-len(pat):])
+            return False
+        self._write_buffer(s[p+len(pat):])
+        return True
+
+    ### OUTGOING CONNECTION ###
+
+    def read_crypto_block4a(self, s):
+        if not self._search_for_pattern(s,self.encrypter.VC_pattern()):
+            return -1, self.read_crypto_block4a     # wait for more data
+        if self._dc:                        # can't or won't go any further
+            self.answer(True)
+            return None
+        self._start_crypto()
+        return 6, self.read_crypto_block4b
+
+    def read_crypto_block4b(self, s):
+        self.cryptmode = toint(s[:4]) % 4
+        if self.cryptmode != 2:
+            return None                     # unknown encryption
+        padlen = (ord(s[4])<<8)+ord(s[5])
+        if padlen > 512:
+            return None
+        if padlen:
+            return padlen, self.read_crypto_pad4
+        return self.read_crypto_block4done()
+
+    def read_crypto_pad4(self, s):
+        # discard data
+        return self.read_crypto_block4done()
+
+    def read_crypto_block4done(self):
+        if DEBUG:
+            self._log_start()
+        if self.cryptmode == 1:     # only handshake encryption
+            if not self.buffer:  # oops; check for exceptions to this
+                return None
+            self._end_crypto()
+        self.write(chr(len(protocol_name)) + protocol_name + 
+            option_pattern + self.Encoder.download_id)
+        return 1+len(protocol_name), self.read_encrypted_header
+
+    ### START PROTOCOL OVER ENCRYPTED CONNECTION ###
+
+    def read_encrypted_header(self, s):
+        return self._read_header(s)
+
+    ################################################
+
+    def read_options(self, s):
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if s != self.downloadid:
+            return None
+        return 20, self.read_peer_id
+
+    def read_peer_id(self, s):
+        if s != self.peerid:
+            return None
+        self.answer(True)
+        return None
+
+    def _write(self, message):
+        if not self.closed:
+            self.connection.write(message)
+
+    def data_came_in(self, connection, s):
+        self.read(s)
+
+    def _write_buffer(self, s):
+        self.buffer = s+self.buffer
+
+    def _read(self, s):
+        self.buffer += s
+        while True:
+            if self.closed:
+                return
+            # self.next_len = # of characters function expects
+            # or 0 = all characters in the buffer
+            # or -1 = wait for next read, then all characters in the buffer
+            # not compatible w/ keepalives, switch out after all negotiation complete
+            if self.next_len <= 0:
+                m = self.buffer
+                self.buffer = ''
+            elif len(self.buffer) >= self.next_len:
+                m = self.buffer[:self.next_len]
+                self.buffer = self.buffer[self.next_len:]
+            else:
+                return
+            try:
+                x = self.next_func(m)
+            except:
+                if not self.closed:
+                    self.answer(False)
+                return
+            if x is None:
+                if not self.closed:
+                    self.answer(False)
+                return
+            self.next_len, self.next_func = x
+            if self.next_len < 0:  # already checked buffer
+                return             # wait for additional data
+            if self.bufferlen is not None:
+                self._read2('')
+                return
+
+    def connection_lost(self, connection):
+        if not self.closed:
+            self.closed = True
+            self.resultfunc(False, self.downloadid, self.peerid, self.ip, self.port)
+
+    def connection_flushed(self, connection):
+        pass

Added: debtorrent/branches/upstream/current/BitTornado/BT1/PiecePicker.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/PiecePicker.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/PiecePicker.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/PiecePicker.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,322 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from random import randrange, shuffle
+from BitTornado.clock import clock
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+class PiecePicker:
+    def __init__(self, numpieces,
+                 rarest_first_cutoff = 1, rarest_first_priority_cutoff = 3,
+                 priority_step = 20):
+        self.rarest_first_cutoff = rarest_first_cutoff
+        self.rarest_first_priority_cutoff = rarest_first_priority_cutoff + priority_step
+        self.priority_step = priority_step
+        self.cutoff = rarest_first_priority_cutoff
+        self.numpieces = numpieces
+        self.started = []
+        self.totalcount = 0
+        self.numhaves = [0] * numpieces
+        self.priority = [1] * numpieces
+        self.removed_partials = {}
+        self.crosscount = [numpieces]
+        self.crosscount2 = [numpieces]
+        self.has = [0] * numpieces
+        self.numgot = 0
+        self.done = False
+        self.seed_connections = {}
+        self.past_ips = {}
+        self.seed_time = None
+        self.superseed = False
+        self.seeds_connected = 0
+        self._init_interests()
+
+    def _init_interests(self):
+        self.interests = [[] for x in xrange(self.priority_step)]
+        self.level_in_interests = [self.priority_step] * self.numpieces
+        interests = range(self.numpieces)
+        shuffle(interests)
+        self.pos_in_interests = [0] * self.numpieces
+        for i in xrange(self.numpieces):
+            self.pos_in_interests[interests[i]] = i
+        self.interests.append(interests)
+
+
+    def got_have(self, piece):
+        self.totalcount+=1
+        numint = self.numhaves[piece]
+        self.numhaves[piece] += 1
+        self.crosscount[numint] -= 1
+        if numint+1==len(self.crosscount):
+            self.crosscount.append(0)
+        self.crosscount[numint+1] += 1
+        if not self.done:
+            numintplus = numint+self.has[piece]
+            self.crosscount2[numintplus] -= 1
+            if numintplus+1 == len(self.crosscount2):
+                self.crosscount2.append(0)
+            self.crosscount2[numintplus+1] += 1
+            numint = self.level_in_interests[piece]
+            self.level_in_interests[piece] += 1
+        if self.superseed:
+            self.seed_got_haves[piece] += 1
+            numint = self.level_in_interests[piece]
+            self.level_in_interests[piece] += 1
+        elif self.has[piece] or self.priority[piece] == -1:
+            return
+        if numint == len(self.interests) - 1:
+            self.interests.append([])
+        self._shift_over(piece, self.interests[numint], self.interests[numint + 1])
+
+    def lost_have(self, piece):
+        self.totalcount-=1
+        numint = self.numhaves[piece]
+        self.numhaves[piece] -= 1
+        self.crosscount[numint] -= 1
+        self.crosscount[numint-1] += 1
+        if not self.done:
+            numintplus = numint+self.has[piece]
+            self.crosscount2[numintplus] -= 1
+            self.crosscount2[numintplus-1] += 1
+            numint = self.level_in_interests[piece]
+            self.level_in_interests[piece] -= 1
+        if self.superseed:
+            numint = self.level_in_interests[piece]
+            self.level_in_interests[piece] -= 1
+        elif self.has[piece] or self.priority[piece] == -1:
+            return
+        self._shift_over(piece, self.interests[numint], self.interests[numint - 1])
+
+    def _shift_over(self, piece, l1, l2):
+        assert self.superseed or (not self.has[piece] and self.priority[piece] >= 0)
+        parray = self.pos_in_interests
+        p = parray[piece]
+        assert l1[p] == piece
+        q = l1[-1]
+        l1[p] = q
+        parray[q] = p
+        del l1[-1]
+        newp = randrange(len(l2)+1)
+        if newp == len(l2):
+            parray[piece] = len(l2)
+            l2.append(piece)
+        else:
+            old = l2[newp]
+            parray[old] = len(l2)
+            l2.append(old)
+            l2[newp] = piece
+            parray[piece] = newp
+
+
+    def got_seed(self):
+        self.seeds_connected += 1
+        self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected,0)
+
+    def became_seed(self):
+        self.got_seed()
+        self.totalcount -= self.numpieces
+        self.numhaves = [i-1 for i in self.numhaves]
+        if self.superseed or not self.done:
+            self.level_in_interests = [i-1 for i in self.level_in_interests]
+            if self.interests:
+                del self.interests[0]
+        del self.crosscount[0]
+        if not self.done:
+            del self.crosscount2[0]
+
+    def lost_seed(self):
+        self.seeds_connected -= 1
+        self.cutoff = max(self.rarest_first_priority_cutoff-self.seeds_connected,0)
+
+
+    def requested(self, piece):
+        if piece not in self.started:
+            self.started.append(piece)
+
+    def _remove_from_interests(self, piece, keep_partial = False):
+        l = self.interests[self.level_in_interests[piece]]
+        p = self.pos_in_interests[piece]
+        assert l[p] == piece
+        q = l[-1]
+        l[p] = q
+        self.pos_in_interests[q] = p
+        del l[-1]
+        try:
+            self.started.remove(piece)
+            if keep_partial:
+                self.removed_partials[piece] = 1
+        except ValueError:
+            pass
+
+    def complete(self, piece):
+        assert not self.has[piece]
+        self.has[piece] = 1
+        self.numgot += 1
+        if self.numgot == self.numpieces:
+            self.done = True
+            self.crosscount2 = self.crosscount
+        else:
+            numhaves = self.numhaves[piece]
+            self.crosscount2[numhaves] -= 1
+            if numhaves+1 == len(self.crosscount2):
+                self.crosscount2.append(0)
+            self.crosscount2[numhaves+1] += 1
+        self._remove_from_interests(piece)
+
+
+    def next(self, haves, wantfunc, complete_first = False):
+        cutoff = self.numgot < self.rarest_first_cutoff
+        complete_first = (complete_first or cutoff) and not haves.complete()
+        best = None
+        bestnum = 2 ** 30
+        for i in self.started:
+            if haves[i] and wantfunc(i):
+                if self.level_in_interests[i] < bestnum:
+                    best = i
+                    bestnum = self.level_in_interests[i]
+        if best is not None:
+            if complete_first or (cutoff and len(self.interests) > self.cutoff):
+                return best
+        if haves.complete():
+            r = [ (0, min(bestnum,len(self.interests))) ]
+        elif cutoff and len(self.interests) > self.cutoff:
+            r = [ (self.cutoff, min(bestnum,len(self.interests))),
+                      (0, self.cutoff) ]
+        else:
+            r = [ (0, min(bestnum,len(self.interests))) ]
+        for lo,hi in r:
+            for i in xrange(lo,hi):
+                for j in self.interests[i]:
+                    if haves[j] and wantfunc(j):
+                        return j
+        if best is not None:
+            return best
+        return None
+
+
+    def am_I_complete(self):
+        return self.done
+    
+    def bump(self, piece):
+        l = self.interests[self.level_in_interests[piece]]
+        pos = self.pos_in_interests[piece]
+        del l[pos]
+        l.append(piece)
+        for i in range(pos,len(l)):
+            self.pos_in_interests[l[i]] = i
+        try:
+            self.started.remove(piece)
+        except:
+            pass
+
+    def set_priority(self, piece, p):
+        if self.superseed:
+            return False    # don't muck with this if you're a superseed
+        oldp = self.priority[piece]
+        if oldp == p:
+            return False
+        self.priority[piece] = p
+        if p == -1:
+            # when setting priority -1,
+            # make sure to cancel any downloads for this piece
+            if not self.has[piece]:
+                self._remove_from_interests(piece, True)
+            return True
+        if oldp == -1:
+            level = self.numhaves[piece] + (self.priority_step * p)
+            self.level_in_interests[piece] = level
+            if self.has[piece]:
+                return True
+            while len(self.interests) < level+1:
+                self.interests.append([])
+            l2 = self.interests[level]
+            parray = self.pos_in_interests
+            newp = randrange(len(l2)+1)
+            if newp == len(l2):
+                parray[piece] = len(l2)
+                l2.append(piece)
+            else:
+                old = l2[newp]
+                parray[old] = len(l2)
+                l2.append(old)
+                l2[newp] = piece
+                parray[piece] = newp
+            if self.removed_partials.has_key(piece):
+                del self.removed_partials[piece]
+                self.started.append(piece)
+            # now go to downloader and try requesting more
+            return True
+        numint = self.level_in_interests[piece]
+        newint = numint + ((p - oldp) * self.priority_step)
+        self.level_in_interests[piece] = newint
+        if self.has[piece]:
+            return False
+        while len(self.interests) < newint+1:
+            self.interests.append([])
+        self._shift_over(piece, self.interests[numint], self.interests[newint])
+        return False
+
+    def is_blocked(self, piece):
+        return self.priority[piece] < 0
+
+
+    def set_superseed(self):
+        assert self.done
+        self.superseed = True
+        self.seed_got_haves = [0] * self.numpieces
+        self._init_interests()  # assume everyone is disconnected
+
+    def next_have(self, connection, looser_upload):
+        if self.seed_time is None:
+            self.seed_time = clock()
+            return None
+        if clock() < self.seed_time+10:  # wait 10 seconds after seeing the first peers
+            return None                 # to give time to grab have lists
+        if not connection.upload.super_seeding:
+            return None
+        olddl = self.seed_connections.get(connection)
+        if olddl is None:
+            ip = connection.get_ip()
+            olddl = self.past_ips.get(ip)
+            if olddl is not None:                           # peer reconnected
+                self.seed_connections[connection] = olddl
+                if not looser_upload:
+                    self.seed_got_haves[olddl] -= 1         # penalize
+        if olddl is not None:
+            if looser_upload:
+                num = 1     # send a new have even if it hasn't spread that piece elsewhere
+            else:
+                num = 2
+            if self.seed_got_haves[olddl] < num:
+                return None
+            if not connection.upload.was_ever_interested:   # it never downloaded it?
+                connection.upload.skipped_count += 1
+                if connection.upload.skipped_count >= 3:    # probably another stealthed seed
+                    return -1                               # signal to close it
+        for tier in self.interests:
+            for piece in tier:
+                if not connection.download.have[piece]:
+                    seedint = self.level_in_interests[piece]
+                    self.level_in_interests[piece] += 1  # tweak it up one, so you don't duplicate effort
+                    if seedint == len(self.interests) - 1:
+                        self.interests.append([])
+                    self._shift_over(piece,
+                                self.interests[seedint], self.interests[seedint + 1])
+                    self.seed_got_haves[piece] = 0       # reset this
+                    self.seed_connections[connection] = piece
+                    connection.upload.seed_have_list.append(piece)
+                    return piece
+        return -1       # something screwy; terminate connection
+
+    def lost_peer(self, connection):
+        olddl = self.seed_connections.get(connection)
+        if olddl is None:
+            return
+        del self.seed_connections[connection]
+        self.past_ips[connection.get_ip()] = olddl
+        if self.seed_got_haves[olddl] == 1:
+            self.seed_got_haves[olddl] = 0

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Rerequester.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Rerequester.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Rerequester.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Rerequester.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,462 @@
+# Written by Bram Cohen
+# modified for multitracker operation by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado.zurllib import urlopen, quote
+from urlparse import urlparse, urlunparse
+from socket import gethostbyname
+from btformats import check_peers
+from BitTornado.bencode import bdecode
+from threading import Thread, Lock
+from cStringIO import StringIO
+from traceback import print_exc
+from socket import error, gethostbyname
+from random import shuffle
+from sha import sha
+from time import time
+try:
+    from os import getpid
+except ImportError:
+    def getpid():
+        return 1
+    
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
+keys = {}
+basekeydata = str(getpid()) + repr(time()) + 'tracker'
+
+def add_key(tracker):
+    key = ''
+    for i in sha(basekeydata+tracker).digest()[-6:]:
+        key += mapbase64[ord(i) & 0x3F]
+    keys[tracker] = key
+
+def get_key(tracker):
+    try:
+        return "&key="+keys[tracker]
+    except:
+        add_key(tracker)
+        return "&key="+keys[tracker]
+
+class fakeflag:
+    def __init__(self, state=False):
+        self.state = state
+    def wait(self):
+        pass
+    def isSet(self):
+        return self.state
+
+class Rerequester:
+    def __init__( self, port, myid, infohash, trackerlist, config,
+                  sched, externalsched, errorfunc, excfunc, connect,
+                  howmany, amount_left, up, down, upratefunc, downratefunc,
+                  doneflag, unpauseflag = fakeflag(True),
+                  seededfunc = None, force_rapid_update = False ):
+
+        self.sched = sched
+        self.externalsched = externalsched
+        self.errorfunc = errorfunc
+        self.excfunc = excfunc
+        self.connect = connect
+        self.howmany = howmany
+        self.amount_left = amount_left
+        self.up = up
+        self.down = down
+        self.upratefunc = upratefunc
+        self.downratefunc = downratefunc
+        self.doneflag = doneflag
+        self.unpauseflag = unpauseflag
+        self.seededfunc = seededfunc
+        self.force_rapid_update = force_rapid_update
+
+        self.ip = config.get('ip','')
+        self.minpeers = config['min_peers']
+        self.maxpeers = config['max_initiate']
+        self.interval = config['rerequest_interval']
+        self.timeout = config['http_timeout']
+
+        newtrackerlist = []        
+        for tier in trackerlist:
+            if len(tier)>1:
+                shuffle(tier)
+            newtrackerlist += [tier]
+        self.trackerlist = newtrackerlist
+
+        self.lastsuccessful = ''
+        self.rejectedmessage = 'rejected by tracker - '
+
+        self.url = ('info_hash=%s&peer_id=%s' %
+            (quote(infohash), quote(myid)))
+        if not config.get('crypto_allowed'):
+            self.url += "&port="
+        else:
+            self.url += "&supportcrypto=1"
+            if not config.get('crypto_only'):
+                    self.url += "&port="
+            else:
+                self.url += "&requirecrypto=1"            
+                if not config.get('crypto_stealth'):
+                    self.url += "&port="
+                else:
+                    self.url += "&port=0&cryptoport="
+        self.url += str(port)
+
+        seed_id = config.get('dedicated_seed_id')
+        if seed_id:
+            self.url += '&seed_id='+quote(seed_id)
+        if self.seededfunc:
+            self.url += '&check_seeded=1'
+
+        self.last = None
+        self.trackerid = None
+        self.announce_interval = 30 * 60
+        self.last_failed = True
+        self.never_succeeded = True
+        self.errorcodes = {}
+        self.lock = SuccessLock()
+        self.special = None
+        self.stopped = False
+
+    def start(self):
+        self.sched(self.c, self.interval/2)
+        self.d(0)
+
+    def c(self):
+        if self.stopped:
+            return
+        if not self.unpauseflag.isSet() and (
+            self.howmany() < self.minpeers or self.force_rapid_update ):
+            self.announce(3, self._c)
+        else:
+            self._c()
+
+    def _c(self):
+        self.sched(self.c, self.interval)
+
+    def d(self, event = 3):
+        if self.stopped:
+            return
+        if not self.unpauseflag.isSet():
+            self._d()
+            return
+        self.announce(event, self._d)
+
+    def _d(self):
+        if self.never_succeeded:
+            self.sched(self.d, 60)  # retry in 60 seconds
+        elif self.force_rapid_update:
+            return
+        else:
+            self.sched(self.d, self.announce_interval)
+
+
+    def hit(self, event = 3):
+        if not self.unpauseflag.isSet() and (
+            self.howmany() < self.minpeers or self.force_rapid_update ):
+            self.announce(event)
+
+    def announce(self, event = 3, callback = lambda: None, specialurl = None):
+
+        if specialurl is not None:
+            s = self.url+'&uploaded=0&downloaded=0&left=1'   # don't add to statistics
+            if self.howmany() >= self.maxpeers:
+                s += '&numwant=0'
+            else:
+                s += '&no_peer_id=1&compact=1'
+            self.last_failed = True         # force true, so will display an error
+            self.special = specialurl
+            self.rerequest(s, callback)
+            return
+        
+        else:
+            s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
+                (self.url, str(self.up()), str(self.down()), 
+                str(self.amount_left())))
+        if self.last is not None:
+            s += '&last=' + quote(str(self.last))
+        if self.trackerid is not None:
+            s += '&trackerid=' + quote(str(self.trackerid))
+        if self.howmany() >= self.maxpeers:
+            s += '&numwant=0'
+        else:
+            s += '&no_peer_id=1&compact=1'
+        if event != 3:
+            s += '&event=' + ['started', 'completed', 'stopped'][event]
+        if event == 2:
+            self.stopped = True
+        self.rerequest(s, callback)
+
+
+    def snoop(self, peers, callback = lambda: None):  # tracker call support
+        self.rerequest(self.url
+            +'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant='
+            +str(peers), callback)
+
+
+    def rerequest(self, s, callback):
+        if not self.lock.isfinished():  # still waiting for prior cycle to complete??
+            def retry(self = self, s = s, callback = callback):
+                self.rerequest(s, callback)
+            self.sched(retry,5)         # retry in 5 seconds
+            return
+        self.lock.reset()
+        rq = Thread(target = self._rerequest, args = [s, callback])
+        rq.setDaemon(False)
+        rq.start()
+
+    def _rerequest(self, s, callback):
+        try:
+            def fail (self = self, callback = callback):
+                self._fail(callback)
+            if self.ip:
+                try:
+                    s += '&ip=' + gethostbyname(self.ip)
+                except:
+                    self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip
+                    self.externalsched(fail)
+            self.errorcodes = {}
+            if self.special is None:
+                for t in range(len(self.trackerlist)):
+                    for tr in range(len(self.trackerlist[t])):
+                        tracker  = self.trackerlist[t][tr]
+                        if self.rerequest_single(tracker, s, callback):
+                            if not self.last_failed and tr != 0:
+                                del self.trackerlist[t][tr]
+                                self.trackerlist[t] = [tracker] + self.trackerlist[t]
+                            return
+            else:
+                tracker = self.special
+                self.special = None
+                if self.rerequest_single(tracker, s, callback):
+                    return
+            # no success from any tracker
+            self.externalsched(fail)
+        except:
+            self.exception(callback)
+
+
+    def _fail(self, callback):
+        if ( (self.upratefunc() < 100 and self.downratefunc() < 100)
+             or not self.amount_left() ):
+            for f in ['rejected', 'bad_data', 'troublecode']:
+                if self.errorcodes.has_key(f):
+                    r = self.errorcodes[f]
+                    break
+            else:
+                r = 'Problem connecting to tracker - unspecified error'
+            self.errorfunc(r)
+
+        self.last_failed = True
+        self.lock.give_up()
+        self.externalsched(callback)
+
+
+    def rerequest_single(self, t, s, callback):
+        l = self.lock.set()
+        rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback])
+        rq.setDaemon(False)
+        rq.start()
+        self.lock.wait()
+        if self.lock.success:
+            self.lastsuccessful = t
+            self.last_failed = False
+            self.never_succeeded = False
+            return True
+        if not self.last_failed and self.lastsuccessful == t:
+            # if the last tracker hit was successful, and you've just tried the tracker
+            # you'd contacted before, don't go any further, just fail silently.
+            self.last_failed = True
+            self.externalsched(callback)
+            self.lock.give_up()
+            return True
+        return False    # returns true if it wants rerequest() to exit
+
+
+    def _rerequest_single(self, t, s, l, callback):
+        try:        
+            closer = [None]
+            def timedout(self = self, l = l, closer = closer):
+                if self.lock.trip(l):
+                    self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
+                    self.lock.unwait(l)
+                try:
+                    closer[0]()
+                except:
+                    pass
+                    
+            self.externalsched(timedout, self.timeout)
+
+            err = None
+            try:
+                url,q = t.split('?',1)
+                q += '&'+s
+            except:
+                url = t
+                q = s
+            try:
+                h = urlopen(url+'?'+q)
+                closer[0] = h.close
+                data = h.read()
+            except (IOError, error), e:
+                err = 'Problem connecting to tracker - ' + str(e)
+            except:
+                err = 'Problem connecting to tracker'
+            try:
+                h.close()
+            except:
+                pass
+            if err:        
+                if self.lock.trip(l):
+                    self.errorcodes['troublecode'] = err
+                    self.lock.unwait(l)
+                return
+
+            if data == '':
+                if self.lock.trip(l):
+                    self.errorcodes['troublecode'] = 'no data from tracker'
+                    self.lock.unwait(l)
+                return
+            
+            try:
+                r = bdecode(data, sloppy=1)
+                check_peers(r)
+            except ValueError, e:
+                if self.lock.trip(l):
+                    self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
+                    self.lock.unwait(l)
+                return
+            
+            if r.has_key('failure reason'):
+                if self.lock.trip(l):
+                    self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
+                    self.lock.unwait(l)
+                return
+                
+            if self.lock.trip(l, True):     # success!
+                self.lock.unwait(l)
+            else:
+                callback = lambda: None     # attempt timed out, don't do a callback
+
+            # even if the attempt timed out, go ahead and process data
+            def add(self = self, r = r, callback = callback):
+                self.postrequest(r, callback)
+            self.externalsched(add)
+        except:
+            self.exception(callback)
+
+
+    def postrequest(self, r, callback):
+        if r.has_key('warning message'):
+                self.errorfunc('warning from tracker - ' + r['warning message'])
+        self.announce_interval = r.get('interval', self.announce_interval)
+        self.interval = r.get('min interval', self.interval)
+        self.trackerid = r.get('tracker id', self.trackerid)
+        self.last = r.get('last')
+#        ps = len(r['peers']) + self.howmany()
+        p = r['peers']
+        peers = []
+        if type(p) == type(''):
+            lenpeers = len(p)/6
+        else:
+            lenpeers = len(p)
+        cflags = r.get('crypto_flags')
+        if type(cflags) != type('') or len(cflags) != lenpeers:
+            cflags = None
+        if cflags is None:
+            cflags = [None for i in xrange(lenpeers)]
+        else:
+            cflags = [ord(x) for x in cflags]
+        if type(p) == type(''):
+            for x in xrange(0, len(p), 6):
+                ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
+                port = (ord(p[x+4]) << 8) | ord(p[x+5])
+                peers.append(((ip, port), 0, cflags[int(x/6)]))
+        else:
+            for i in xrange(len(p)):
+                x = p[i]
+                peers.append(((x['ip'].strip(), x['port']),
+                              x.get('peer id',0), cflags[i]))
+        ps = len(peers) + self.howmany()
+        if ps < self.maxpeers:
+            if self.doneflag.isSet():
+                if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
+                    self.last = None
+            else:
+                if r.get('num peers', 1000) > ps * 1.2:
+                    self.last = None
+        if self.seededfunc and r.get('seeded'):
+            self.seededfunc()
+        elif peers:
+            shuffle(peers)
+            self.connect(peers)
+        callback()
+
+    def exception(self, callback):
+        data = StringIO()
+        print_exc(file = data)
+        def r(s = data.getvalue(), callback = callback):
+            if self.excfunc:
+                self.excfunc(s)
+            else:
+                print s
+            callback()
+        self.externalsched(r)
+
+
+class SuccessLock:
+    def __init__(self):
+        self.lock = Lock()
+        self.pause = Lock()
+        self.code = 0L
+        self.success = False
+        self.finished = True
+
+    def reset(self):
+        self.success = False
+        self.finished = False
+
+    def set(self):
+        self.lock.acquire()
+        if not self.pause.locked():
+            self.pause.acquire()
+        self.first = True
+        self.code += 1L
+        self.lock.release()
+        return self.code
+
+    def trip(self, code, s = False):
+        self.lock.acquire()
+        try:
+            if code == self.code and not self.finished:
+                r = self.first
+                self.first = False
+                if s:
+                    self.finished = True
+                    self.success = True
+                return r
+        finally:
+            self.lock.release()
+
+    def give_up(self):
+        self.lock.acquire()
+        self.success = False
+        self.finished = True
+        self.lock.release()
+
+    def wait(self):
+        self.pause.acquire()
+
+    def unwait(self, code):
+        if code == self.code and self.pause.locked():
+            self.pause.release()
+
+    def isfinished(self):
+        self.lock.acquire()
+        x = self.finished
+        self.lock.release()
+        return x    

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Statistics.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Statistics.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Statistics.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Statistics.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,177 @@
+# Written by Edward Keyes
+# see LICENSE.txt for license information
+
+from threading import Event
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+class Statistics_Response:
+    pass    # empty class
+
+
+class Statistics:
+    def __init__(self, upmeasure, downmeasure, connecter, httpdl,
+                 ratelimiter, rerequest_lastfailed, fdatflag):
+        self.upmeasure = upmeasure
+        self.downmeasure = downmeasure
+        self.connecter = connecter
+        self.httpdl = httpdl
+        self.ratelimiter = ratelimiter
+        self.downloader = connecter.downloader
+        self.picker = connecter.downloader.picker
+        self.storage = connecter.downloader.storage
+        self.torrentmeasure = connecter.downloader.totalmeasure
+        self.rerequest_lastfailed = rerequest_lastfailed
+        self.fdatflag = fdatflag
+        self.fdatactive = False
+        self.piecescomplete = None
+        self.placesopen = None
+        self.storage_totalpieces = len(self.storage.hashes)
+
+
+    def set_dirstats(self, files, piece_length):
+        self.piecescomplete = 0
+        self.placesopen = 0
+        self.filelistupdated = Event()
+        self.filelistupdated.set()
+        frange = xrange(len(files))
+        self.filepieces = [[] for x in frange]
+        self.filepieces2 = [[] for x in frange]
+        self.fileamtdone = [0.0 for x in frange]
+        self.filecomplete = [False for x in frange]
+        self.fileinplace = [False for x in frange]
+        start = 0L
+        for i in frange:
+            l = files[i][1]
+            if l == 0:
+                self.fileamtdone[i] = 1.0
+                self.filecomplete[i] = True
+                self.fileinplace[i] = True
+            else:
+                fp = self.filepieces[i]
+                fp2 = self.filepieces2[i]
+                for piece in range(int(start/piece_length),
+                                   int((start+l-1)/piece_length)+1):
+                    fp.append(piece)
+                    fp2.append(piece)
+                start += l
+
+
+    def update(self):
+        s = Statistics_Response()
+        s.upTotal = self.upmeasure.get_total()
+        s.downTotal = self.downmeasure.get_total()
+        s.last_failed = self.rerequest_lastfailed()
+        s.external_connection_made = self.connecter.external_connection_made
+        if s.downTotal > 0:
+            s.shareRating = float(s.upTotal)/s.downTotal
+        elif s.upTotal == 0:
+           s.shareRating = 0.0
+        else:
+           s.shareRating = -1.0
+        s.torrentRate = self.torrentmeasure.get_rate()
+        s.torrentTotal = self.torrentmeasure.get_total()
+        s.numSeeds = self.picker.seeds_connected
+        s.numOldSeeds = self.downloader.num_disconnected_seeds()
+        s.numPeers = len(self.downloader.downloads)-s.numSeeds
+        s.numCopies = 0.0
+        for i in self.picker.crosscount:
+            if i==0:
+                s.numCopies+=1
+            else:
+                s.numCopies+=1-float(i)/self.picker.numpieces
+                break
+        if self.picker.done:
+            s.numCopies2 = s.numCopies + 1
+        else:
+            s.numCopies2 = 0.0
+            for i in self.picker.crosscount2:
+                if i==0:
+                    s.numCopies2+=1
+                else:
+                    s.numCopies2+=1-float(i)/self.picker.numpieces
+                    break
+        s.discarded = self.downloader.discarded
+        s.numSeeds += self.httpdl.seedsfound
+        s.numOldSeeds += self.httpdl.seedsfound
+        if s.numPeers == 0 or self.picker.numpieces == 0:
+            s.percentDone = 0.0
+        else:
+            s.percentDone = 100.0*(float(self.picker.totalcount)/self.picker.numpieces)/s.numPeers
+
+        s.backgroundallocating = self.storage.bgalloc_active
+        s.storage_totalpieces = len(self.storage.hashes)
+        s.storage_active = len(self.storage.stat_active)
+        s.storage_new = len(self.storage.stat_new)
+        s.storage_dirty = len(self.storage.dirty)
+        numdownloaded = self.storage.stat_numdownloaded
+        s.storage_justdownloaded = numdownloaded
+        s.storage_numcomplete = self.storage.stat_numfound + numdownloaded
+        s.storage_numflunked = self.storage.stat_numflunked
+        s.storage_isendgame = self.downloader.endgamemode
+
+        s.peers_kicked = self.downloader.kicked.items()
+        s.peers_banned = self.downloader.banned.items()
+
+        try:
+            s.upRate = int(self.ratelimiter.upload_rate/1000)
+            assert s.upRate < 5000
+        except:
+            s.upRate = 0
+        s.upSlots = self.ratelimiter.slots
+
+        if self.piecescomplete is None:     # not a multi-file torrent
+            return s
+        
+        if self.fdatflag.isSet():
+            if not self.fdatactive:
+                self.fdatactive = True
+        else:
+            self.fdatactive = False
+
+        if self.piecescomplete != self.picker.numgot:
+            for i in xrange(len(self.filecomplete)):
+                if self.filecomplete[i]:
+                    continue
+                oldlist = self.filepieces[i]
+                newlist = [ piece
+                            for piece in oldlist
+                            if not self.storage.have[piece] ]
+                if len(newlist) != len(oldlist):
+                    self.filepieces[i] = newlist
+                    self.fileamtdone[i] = (
+                        (len(self.filepieces2[i])-len(newlist))
+                         /float(len(self.filepieces2[i])) )
+                    if not newlist:
+                        self.filecomplete[i] = True
+                    self.filelistupdated.set()
+
+            self.piecescomplete = self.picker.numgot
+
+        if ( self.filelistupdated.isSet()
+                 or self.placesopen != len(self.storage.places) ):
+            for i in xrange(len(self.filecomplete)):
+                if not self.filecomplete[i] or self.fileinplace[i]:
+                    continue
+                while self.filepieces2[i]:
+                    piece = self.filepieces2[i][-1]
+                    if self.storage.places[piece] != piece:
+                        break
+                    del self.filepieces2[i][-1]
+                if not self.filepieces2[i]:
+                    self.fileinplace[i] = True
+                    self.storage.set_file_readonly(i)
+                    self.filelistupdated.set()
+
+            self.placesopen = len(self.storage.places)
+
+        s.fileamtdone = self.fileamtdone
+        s.filecomplete = self.filecomplete
+        s.fileinplace = self.fileinplace
+        s.filelistupdated = self.filelistupdated
+
+        return s
+

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Storage.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Storage.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Storage.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Storage.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,584 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.piecebuffer import BufferPool
+from threading import Lock
+from time import time, strftime, localtime
+import os
+from os.path import exists, getsize, getmtime, basename
+from traceback import print_exc
+try:
+    from os import fsync
+except ImportError:
+    fsync = lambda x: None
+from bisect import bisect
+    
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+MAXREADSIZE = 32768
+MAXLOCKSIZE = 1000000000L
+MAXLOCKRANGE = 3999999999L   # only lock first 4 gig of file
+
+_pool = BufferPool()
+PieceBuffer = _pool.new
+
+def dummy_status(fractionDone = None, activity = None):
+    pass
+
+class Storage:
+    def __init__(self, files, piece_length, doneflag, config,
+                 disabled_files = None):
+        # can raise IOError and ValueError
+        self.files = files
+        self.piece_length = piece_length
+        self.doneflag = doneflag
+        self.disabled = [False] * len(files)
+        self.file_ranges = []
+        self.disabled_ranges = []
+        self.working_ranges = []
+        numfiles = 0
+        total = 0l
+        so_far = 0l
+        self.handles = {}
+        self.whandles = {}
+        self.tops = {}
+        self.sizes = {}
+        self.mtimes = {}
+        if config.get('lock_files', True):
+            self.lock_file, self.unlock_file = self._lock_file, self._unlock_file
+        else:
+            self.lock_file, self.unlock_file = lambda x1,x2: None, lambda x1,x2: None
+        self.lock_while_reading = config.get('lock_while_reading', False)
+        self.lock = Lock()
+
+        if not disabled_files:
+            disabled_files = [False] * len(files)
+
+        for i in xrange(len(files)):
+            file, length = files[i]
+            if doneflag.isSet():    # bail out if doneflag is set
+                return
+            self.disabled_ranges.append(None)
+            if length == 0:
+                self.file_ranges.append(None)
+                self.working_ranges.append([])
+            else:
+                range = (total, total + length, 0, file)
+                self.file_ranges.append(range)
+                self.working_ranges.append([range])
+                numfiles += 1
+                total += length
+                if disabled_files[i]:
+                    l = 0
+                else:
+                    if exists(file):
+                        l = getsize(file)
+                        if l > length:
+                            h = open(file, 'rb+')
+                            h.truncate(length)
+                            h.flush()
+                            h.close()
+                            l = length
+                    else:
+                        l = 0
+                        h = open(file, 'wb+')
+                        h.flush()
+                        h.close()
+                    self.mtimes[file] = getmtime(file)
+                self.tops[file] = l
+                self.sizes[file] = length
+                so_far += l
+
+        self.total_length = total
+        self._reset_ranges()
+
+        self.max_files_open = config['max_files_open']
+        if self.max_files_open > 0 and numfiles > self.max_files_open:
+            self.handlebuffer = []
+        else:
+            self.handlebuffer = None
+
+
+    if os.name == 'nt':
+        def _lock_file(self, name, f):
+            import msvcrt
+            for p in range(0, min(self.sizes[name],MAXLOCKRANGE), MAXLOCKSIZE):
+                f.seek(p)
+                msvcrt.locking(f.fileno(), msvcrt.LK_LOCK,
+                               min(MAXLOCKSIZE,self.sizes[name]-p))
+
+        def _unlock_file(self, name, f):
+            import msvcrt
+            for p in range(0, min(self.sizes[name],MAXLOCKRANGE), MAXLOCKSIZE):
+                f.seek(p)
+                msvcrt.locking(f.fileno(), msvcrt.LK_UNLCK,
+                               min(MAXLOCKSIZE,self.sizes[name]-p))
+
+    elif os.name == 'posix':
+        def _lock_file(self, name, f):
+            import fcntl
+            fcntl.flock(f.fileno(), fcntl.LOCK_EX)
+
+        def _unlock_file(self, name, f):
+            import fcntl
+            fcntl.flock(f.fileno(), fcntl.LOCK_UN)
+
+    else:
+        def _lock_file(self, name, f):
+            pass
+        def _unlock_file(self, name, f):
+            pass
+
+
+    def was_preallocated(self, pos, length):
+        for file, begin, end in self._intervals(pos, length):
+            if self.tops.get(file, 0) < end:
+                return False
+        return True
+
+
+    def _sync(self, file):
+        self._close(file)
+        if self.handlebuffer:
+            self.handlebuffer.remove(file)
+
+    def sync(self):
+        # may raise IOError or OSError
+        for file in self.whandles.keys():
+            self._sync(file)
+
+
+    def set_readonly(self, f=None):
+        if f is None:
+            self.sync()
+            return
+        file = self.files[f][0]
+        if self.whandles.has_key(file):
+            self._sync(file)
+            
+
+    def get_total_length(self):
+        return self.total_length
+
+
+    def _open(self, file, mode):
+        if self.mtimes.has_key(file):
+            try:
+              if self.handlebuffer is not None:
+                assert getsize(file) == self.tops[file]
+                newmtime = getmtime(file)
+                oldmtime = self.mtimes[file]
+                assert newmtime <= oldmtime+1
+                assert newmtime >= oldmtime-1
+            except:
+                if DEBUG:
+                    print ( file+' modified: '
+                            +strftime('(%x %X)',localtime(self.mtimes[file]))
+                            +strftime(' != (%x %X) ?',localtime(getmtime(file))) )
+                raise IOError('modified during download')
+        try:
+            return open(file, mode)
+        except:
+            if DEBUG:
+                print_exc()
+            raise
+
+
+    def _close(self, file):
+        f = self.handles[file]
+        del self.handles[file]
+        if self.whandles.has_key(file):
+            del self.whandles[file]
+            f.flush()
+            self.unlock_file(file, f)
+            f.close()
+            self.tops[file] = getsize(file)
+            self.mtimes[file] = getmtime(file)
+        else:
+            if self.lock_while_reading:
+                self.unlock_file(file, f)
+            f.close()
+
+
+    def _close_file(self, file):
+        if not self.handles.has_key(file):
+            return
+        self._close(file)
+        if self.handlebuffer:
+            self.handlebuffer.remove(file)
+        
+
+    def _get_file_handle(self, file, for_write):
+        if self.handles.has_key(file):
+            if for_write and not self.whandles.has_key(file):
+                self._close(file)
+                try:
+                    f = self._open(file, 'rb+')
+                    self.handles[file] = f
+                    self.whandles[file] = 1
+                    self.lock_file(file, f)
+                except (IOError, OSError), e:
+                    if DEBUG:
+                        print_exc()
+                    raise IOError('unable to reopen '+file+': '+str(e))
+
+            if self.handlebuffer:
+                if self.handlebuffer[-1] != file:
+                    self.handlebuffer.remove(file)
+                    self.handlebuffer.append(file)
+            elif self.handlebuffer is not None:
+                self.handlebuffer.append(file)
+        else:
+            try:
+                if for_write:
+                    f = self._open(file, 'rb+')
+                    self.handles[file] = f
+                    self.whandles[file] = 1
+                    self.lock_file(file, f)
+                else:
+                    f = self._open(file, 'rb')
+                    self.handles[file] = f
+                    if self.lock_while_reading:
+                        self.lock_file(file, f)
+            except (IOError, OSError), e:
+                if DEBUG:
+                    print_exc()
+                raise IOError('unable to open '+file+': '+str(e))
+            
+            if self.handlebuffer is not None:
+                self.handlebuffer.append(file)
+                if len(self.handlebuffer) > self.max_files_open:
+                    self._close(self.handlebuffer.pop(0))
+
+        return self.handles[file]
+
+
+    def _reset_ranges(self):
+        self.ranges = []
+        for l in self.working_ranges:
+            self.ranges.extend(l)
+            self.begins = [i[0] for i in self.ranges]
+
+    def _intervals(self, pos, amount):
+        r = []
+        stop = pos + amount
+        p = bisect(self.begins, pos) - 1
+        while p < len(self.ranges):
+            begin, end, offset, file = self.ranges[p]
+            if begin >= stop:
+                break
+            r.append(( file,
+                       offset + max(pos, begin) - begin,
+                       offset + min(end, stop) - begin   ))
+            p += 1
+        return r
+
+
+    def read(self, pos, amount, flush_first = False):
+        r = PieceBuffer()
+        for file, pos, end in self._intervals(pos, amount):
+            if DEBUG:
+                print 'reading '+file+' from '+str(pos)+' to '+str(end)
+            self.lock.acquire()
+            h = self._get_file_handle(file, False)
+            if flush_first and self.whandles.has_key(file):
+                h.flush()
+                fsync(h)
+            h.seek(pos)
+            while pos < end:
+                length = min(end-pos, MAXREADSIZE)
+                data = h.read(length)
+                if len(data) != length:
+                    raise IOError('error reading data from '+file)
+                r.append(data)
+                pos += length
+            self.lock.release()
+        return r
+
+    def write(self, pos, s):
+        # might raise an IOError
+        total = 0
+        for file, begin, end in self._intervals(pos, len(s)):
+            if DEBUG:
+                print 'writing '+file+' from '+str(pos)+' to '+str(end)
+            self.lock.acquire()
+            h = self._get_file_handle(file, True)
+            h.seek(begin)
+            h.write(s[total: total + end - begin])
+            self.lock.release()
+            total += end - begin
+
+    def top_off(self):
+        for begin, end, offset, file in self.ranges:
+            l = offset + end - begin
+            if l > self.tops.get(file, 0):
+                self.lock.acquire()
+                h = self._get_file_handle(file, True)
+                h.seek(l-1)
+                h.write(chr(0xFF))
+                self.lock.release()
+
+    def flush(self):
+        # may raise IOError or OSError
+        for file in self.whandles.keys():
+            self.lock.acquire()
+            self.handles[file].flush()
+            self.lock.release()
+
+    def close(self):
+        for file, f in self.handles.items():
+            try:
+                self.unlock_file(file, f)
+            except:
+                pass
+            try:
+                f.close()
+            except:
+                pass
+        self.handles = {}
+        self.whandles = {}
+        self.handlebuffer = None
+
+
+    def _get_disabled_ranges(self, f):
+        if not self.file_ranges[f]:
+            return ((),(),())
+        r = self.disabled_ranges[f]
+        if r:
+            return r
+        start, end, offset, file = self.file_ranges[f]
+        if DEBUG:
+            print 'calculating disabled range for '+self.files[f][0]
+            print 'bytes: '+str(start)+'-'+str(end)
+            print 'file spans pieces '+str(int(start/self.piece_length))+'-'+str(int((end-1)/self.piece_length)+1)
+        pieces = range( int(start/self.piece_length),
+                        int((end-1)/self.piece_length)+1 )
+        offset = 0
+        disabled_files = []
+        if len(pieces) == 1:
+            if ( start % self.piece_length == 0
+                 and end % self.piece_length == 0 ):   # happens to be a single,
+                                                       # perfect piece
+                working_range = [(start, end, offset, file)]
+                update_pieces = []
+            else:
+                midfile = os.path.join(self.bufferdir,str(f))
+                working_range = [(start, end, 0, midfile)]
+                disabled_files.append((midfile, start, end))
+                length = end - start
+                self.sizes[midfile] = length
+                piece = pieces[0]
+                update_pieces = [(piece, start-(piece*self.piece_length), length)]
+        else:
+            update_pieces = []
+            if start % self.piece_length != 0:  # doesn't begin on an even piece boundary
+                end_b = pieces[1]*self.piece_length
+                startfile = os.path.join(self.bufferdir,str(f)+'b')
+                working_range_b = [ ( start, end_b, 0, startfile ) ]
+                disabled_files.append((startfile, start, end_b))
+                length = end_b - start
+                self.sizes[startfile] = length
+                offset = length
+                piece = pieces.pop(0)
+                update_pieces.append((piece, start-(piece*self.piece_length), length))
+            else:
+                working_range_b = []
+            if f  != len(self.files)-1 and end % self.piece_length != 0:
+                                                # doesn't end on an even piece boundary
+                start_e = pieces[-1] * self.piece_length
+                endfile = os.path.join(self.bufferdir,str(f)+'e')
+                working_range_e = [ ( start_e, end, 0, endfile ) ]
+                disabled_files.append((endfile, start_e, end))
+                length = end - start_e
+                self.sizes[endfile] = length
+                piece = pieces.pop(-1)
+                update_pieces.append((piece, 0, length))
+            else:
+                working_range_e = []
+            if pieces:
+                working_range_m = [ ( pieces[0]*self.piece_length,
+                                      (pieces[-1]+1)*self.piece_length,
+                                      offset, file ) ]
+            else:
+                working_range_m = []
+            working_range = working_range_b + working_range_m + working_range_e
+
+        if DEBUG:            
+            print str(working_range)
+            print str(update_pieces)
+        r = (tuple(working_range), tuple(update_pieces), tuple(disabled_files))
+        self.disabled_ranges[f] = r
+        return r
+        
+
+    def set_bufferdir(self, dir):
+        self.bufferdir = dir
+
+    def enable_file(self, f):
+        if not self.disabled[f]:
+            return
+        self.disabled[f] = False
+        r = self.file_ranges[f]
+        if not r:
+            return
+        file = r[3]
+        if not exists(file):
+            h = open(file, 'wb+')
+            h.flush()
+            h.close()
+        if not self.tops.has_key(file):
+            self.tops[file] = getsize(file)
+        if not self.mtimes.has_key(file):
+            self.mtimes[file] = getmtime(file)
+        self.working_ranges[f] = [r]
+
+    def disable_file(self, f):
+        if self.disabled[f]:
+            return
+        self.disabled[f] = True
+        r = self._get_disabled_ranges(f)
+        if not r:
+            return
+        for file, begin, end in r[2]:
+            if not os.path.isdir(self.bufferdir):
+                os.makedirs(self.bufferdir)
+            if not exists(file):
+                h = open(file, 'wb+')
+                h.flush()
+                h.close()
+            if not self.tops.has_key(file):
+                self.tops[file] = getsize(file)
+            if not self.mtimes.has_key(file):
+                self.mtimes[file] = getmtime(file)
+        self.working_ranges[f] = r[0]
+
+    reset_file_status = _reset_ranges
+
+
+    def get_piece_update_list(self, f):
+        return self._get_disabled_ranges(f)[1]
+
+
+    def delete_file(self, f):
+        try:
+            os.remove(self.files[f][0])
+        except:
+            pass
+
+
+    '''
+    Pickled data format:
+
+    d['files'] = [ file #, size, mtime {, file #, size, mtime...} ]
+                    file # in torrent, and the size and last modification
+                    time for those files.  Missing files are either empty
+                    or disabled.
+    d['partial files'] = [ name, size, mtime... ]
+                    Names, sizes and last modification times of files containing
+                    partial piece data.  Filenames go by the following convention:
+                    {file #, 0-based}{nothing, "b" or "e"}
+                    eg: "0e" "3" "4b" "4e"
+                    Where "b" specifies the partial data for the first piece in
+                    the file, "e" the last piece, and no letter signifying that
+                    the file is disabled but is smaller than one piece, and that
+                    all the data is cached inside so adjacent files may be
+                    verified.
+    '''
+    def pickle(self):
+        files = []
+        pfiles = []
+        for i in xrange(len(self.files)):
+            if not self.files[i][1]:    # length == 0
+                continue
+            if self.disabled[i]:
+                for file, start, end in self._get_disabled_ranges(i)[2]:
+                    pfiles.extend([basename(file),getsize(file),int(getmtime(file))])
+                continue
+            file = self.files[i][0]
+            files.extend([i,getsize(file),int(getmtime(file))])
+        return {'files': files, 'partial files': pfiles}
+
+
+    def unpickle(self, data):
+        # assume all previously-disabled files have already been disabled
+        try:
+            files = {}
+            pfiles = {}
+            l = data['files']
+            assert len(l) % 3 == 0
+            l = [l[x:x+3] for x in xrange(0,len(l),3)]
+            for f, size, mtime in l:
+                files[f] = (size, mtime)
+            l = data.get('partial files',[])
+            assert len(l) % 3 == 0
+            l = [l[x:x+3] for x in xrange(0,len(l),3)]
+            for file, size, mtime in l:
+                pfiles[file] = (size, mtime)
+
+            valid_pieces = {}
+            for i in xrange(len(self.files)):
+                if self.disabled[i]:
+                    continue
+                r = self.file_ranges[i]
+                if not r:
+                    continue
+                start, end, offset, file =r
+                if DEBUG:
+                    print 'adding '+file
+                for p in xrange( int(start/self.piece_length),
+                                 int((end-1)/self.piece_length)+1 ):
+                    valid_pieces[p] = 1
+
+            if DEBUG:
+                print valid_pieces.keys()
+            
+            def test(old, size, mtime):
+                oldsize, oldmtime = old
+                if size != oldsize:
+                    return False
+                if mtime > oldmtime+1:
+                    return False
+                if mtime < oldmtime-1:
+                    return False
+                return True
+
+            for i in xrange(len(self.files)):
+                if self.disabled[i]:
+                    for file, start, end in self._get_disabled_ranges(i)[2]:
+                        f1 = basename(file)
+                        if ( not pfiles.has_key(f1)
+                             or not test(pfiles[f1],getsize(file),getmtime(file)) ):
+                            if DEBUG:
+                                print 'removing '+file
+                            for p in xrange( int(start/self.piece_length),
+                                             int((end-1)/self.piece_length)+1 ):
+                                if valid_pieces.has_key(p):
+                                    del valid_pieces[p]
+                    continue
+                file, size = self.files[i]
+                if not size:
+                    continue
+                if ( not files.has_key(i)
+                     or not test(files[i],getsize(file),getmtime(file)) ):
+                    start, end, offset, file = self.file_ranges[i]
+                    if DEBUG:
+                        print 'removing '+file
+                    for p in xrange( int(start/self.piece_length),
+                                     int((end-1)/self.piece_length)+1 ):
+                        if valid_pieces.has_key(p):
+                            del valid_pieces[p]
+        except:
+            if DEBUG:
+                print_exc()
+            return []
+
+        if DEBUG:
+            print valid_pieces.keys()                        
+        return valid_pieces.keys()
+

Added: debtorrent/branches/upstream/current/BitTornado/BT1/StorageWrapper.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/StorageWrapper.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/StorageWrapper.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/StorageWrapper.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1045 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.bitfield import Bitfield
+from sha import sha
+from BitTornado.clock import clock
+from traceback import print_exc
+from random import randrange
+try:
+    True
+except:
+    True = 1
+    False = 0
+try:
+    from bisect import insort
+except:
+    def insort(l, item):
+        l.append(item)
+        l.sort()
+
+DEBUG = False
+
+STATS_INTERVAL = 0.2
+
+def dummy_status(fractionDone = None, activity = None):
+    pass
+
+class Olist:
+    def __init__(self, l = []):
+        self.d = {}
+        for i in l:
+            self.d[i] = 1
+    def __len__(self):
+        return len(self.d)
+    def includes(self, i):
+        return self.d.has_key(i)
+    def add(self, i):
+        self.d[i] = 1
+    def extend(self, l):
+        for i in l:
+            self.d[i] = 1
+    def pop(self, n=0):
+        # assert self.d
+        k = self.d.keys()
+        if n == 0:
+            i = min(k)
+        elif n == -1:
+            i = max(k)
+        else:
+            k.sort()
+            i = k[n]
+        del self.d[i]
+        return i
+    def remove(self, i):
+        if self.d.has_key(i):
+            del self.d[i]
+
+class fakeflag:
+    def __init__(self, state=False):
+        self.state = state
+    def wait(self):
+        pass
+    def isSet(self):
+        return self.state
+
+
+class StorageWrapper:
+    def __init__(self, storage, request_size, hashes, 
+            piece_size, finished, failed, 
+            statusfunc = dummy_status, flag = fakeflag(), check_hashes = True,
+            data_flunked = lambda x: None, backfunc = None,
+            config = {}, unpauseflag = fakeflag(True) ):
+        self.storage = storage
+        self.request_size = long(request_size)
+        self.hashes = hashes
+        self.piece_size = long(piece_size)
+        self.piece_length = long(piece_size)
+        self.finished = finished
+        self.failed = failed
+        self.statusfunc = statusfunc
+        self.flag = flag
+        self.check_hashes = check_hashes
+        self.data_flunked = data_flunked
+        self.backfunc = backfunc
+        self.config = config
+        self.unpauseflag = unpauseflag
+        
+        self.alloc_type = config.get('alloc_type','normal')
+        self.double_check = config.get('double_check', 0)
+        self.triple_check = config.get('triple_check', 0)
+        if self.triple_check:
+            self.double_check = True
+        self.bgalloc_enabled = False
+        self.bgalloc_active = False
+        self.total_length = storage.get_total_length()
+        self.amount_left = self.total_length
+        if self.total_length <= self.piece_size * (len(hashes) - 1):
+            raise ValueError, 'bad data in responsefile - total too small'
+        if self.total_length > self.piece_size * len(hashes):
+            raise ValueError, 'bad data in responsefile - total too big'
+        self.numactive = [0] * len(hashes)
+        self.inactive_requests = [1] * len(hashes)
+        self.amount_inactive = self.total_length
+        self.amount_obtained = 0
+        self.amount_desired = self.total_length
+        self.have = Bitfield(len(hashes))
+        self.have_cloaked_data = None
+        self.blocked = [False] * len(hashes)
+        self.blocked_holes = []
+        self.blocked_movein = Olist()
+        self.blocked_moveout = Olist()
+        self.waschecked = [False] * len(hashes)
+        self.places = {}
+        self.holes = []
+        self.stat_active = {}
+        self.stat_new = {}
+        self.dirty = {}
+        self.stat_numflunked = 0
+        self.stat_numdownloaded = 0
+        self.stat_numfound = 0
+        self.download_history = {}
+        self.failed_pieces = {}
+        self.out_of_place = 0
+        self.write_buf_max = config['write_buffer_size']*1048576L
+        self.write_buf_size = 0L
+        self.write_buf = {}   # structure:  piece: [(start, data), ...]
+        self.write_buf_list = []
+
+        self.initialize_tasks = [
+            ['checking existing data', 0, self.init_hashcheck, self.hashcheckfunc],
+            ['moving data', 1, self.init_movedata, self.movedatafunc],
+            ['allocating disk space', 1, self.init_alloc, self.allocfunc] ]
+
+        self.backfunc(self._bgalloc,0.1)
+        self.backfunc(self._bgsync,max(self.config['auto_flush']*60,60))
+
+    def _bgsync(self):
+        if self.config['auto_flush']:
+            self.sync()
+        self.backfunc(self._bgsync,max(self.config['auto_flush']*60,60))
+
+
+    def old_style_init(self):
+        while self.initialize_tasks:
+            msg, done, init, next = self.initialize_tasks.pop(0)
+            if init():
+                self.statusfunc(activity = msg, fractionDone = done)
+                t = clock() + STATS_INTERVAL
+                x = 0
+                while x is not None:
+                    if t < clock():
+                        t = clock() + STATS_INTERVAL
+                        self.statusfunc(fractionDone = x)
+                    self.unpauseflag.wait()
+                    if self.flag.isSet():
+                        return False
+                    x = next()
+
+        self.statusfunc(fractionDone = 0)
+        return True
+
+
+    def initialize(self, donefunc, statusfunc = None):
+        self.initialize_done = donefunc
+        if statusfunc is None:
+            statusfunc = self.statusfunc
+        self.initialize_status = statusfunc
+        self.initialize_next = None
+            
+        self.backfunc(self._initialize)
+
+    def _initialize(self):
+        if not self.unpauseflag.isSet():
+            self.backfunc(self._initialize, 1)
+            return
+        
+        if self.initialize_next:
+            x = self.initialize_next()
+            if x is None:
+                self.initialize_next = None
+            else:
+                self.initialize_status(fractionDone = x)
+        else:
+            if not self.initialize_tasks:
+                self.initialize_done()
+                return
+            msg, done, init, next = self.initialize_tasks.pop(0)
+            if init():
+                self.initialize_status(activity = msg, fractionDone = done)
+                self.initialize_next = next
+
+        self.backfunc(self._initialize)
+
+
+    def init_hashcheck(self):
+        if self.flag.isSet():
+            return False
+        self.check_list = []
+        if len(self.hashes) == 0 or self.amount_left == 0:
+            self.check_total = 0
+            self.finished()
+            return False
+
+        self.check_targets = {}
+        got = {}
+        for p,v in self.places.items():
+            assert not got.has_key(v)
+            got[v] = 1
+        for i in xrange(len(self.hashes)):
+            if self.places.has_key(i):  # restored from pickled
+                self.check_targets[self.hashes[i]] = []
+                if self.places[i] == i:
+                    continue
+                else:
+                    assert not got.has_key(i)
+                    self.out_of_place += 1
+            if got.has_key(i):
+                continue
+            if self._waspre(i):
+                if self.blocked[i]:
+                    self.places[i] = i
+                else:
+                    self.check_list.append(i)
+                continue
+            if not self.check_hashes:
+                self.failed('told file complete on start-up, but data is missing')
+                return False
+            self.holes.append(i)
+            if self.blocked[i] or self.check_targets.has_key(self.hashes[i]):
+                self.check_targets[self.hashes[i]] = [] # in case of a hash collision, discard
+            else:
+                self.check_targets[self.hashes[i]] = [i]
+        self.check_total = len(self.check_list)
+        self.check_numchecked = 0.0
+        self.lastlen = self._piecelen(len(self.hashes) - 1)
+        self.numchecked = 0.0
+        return self.check_total > 0
+
+    def _markgot(self, piece, pos):
+        if DEBUG:
+            print str(piece)+' at '+str(pos)
+        self.places[piece] = pos
+        self.have[piece] = True
+        len = self._piecelen(piece)
+        self.amount_obtained += len
+        self.amount_left -= len
+        self.amount_inactive -= len
+        self.inactive_requests[piece] = None
+        self.waschecked[piece] = self.check_hashes
+        self.stat_numfound += 1
+
+    def hashcheckfunc(self):
+        if self.flag.isSet():
+            return None
+        if not self.check_list:
+            return None
+        
+        i = self.check_list.pop(0)
+        if not self.check_hashes:
+            self._markgot(i, i)
+        else:
+            d1 = self.read_raw(i,0,self.lastlen)
+            if d1 is None:
+                return None
+            sh = sha(d1[:])
+            d1.release()
+            sp = sh.digest()
+            d2 = self.read_raw(i,self.lastlen,self._piecelen(i)-self.lastlen)
+            if d2 is None:
+                return None
+            sh.update(d2[:])
+            d2.release()
+            s = sh.digest()
+            if s == self.hashes[i]:
+                self._markgot(i, i)
+            elif ( self.check_targets.get(s)
+                   and self._piecelen(i) == self._piecelen(self.check_targets[s][-1]) ):
+                self._markgot(self.check_targets[s].pop(), i)
+                self.out_of_place += 1
+            elif ( not self.have[-1] and sp == self.hashes[-1]
+                   and (i == len(self.hashes) - 1
+                        or not self._waspre(len(self.hashes) - 1)) ):
+                self._markgot(len(self.hashes) - 1, i)
+                self.out_of_place += 1
+            else:
+                self.places[i] = i
+        self.numchecked += 1
+        if self.amount_left == 0:
+            self.finished()
+        return (self.numchecked / self.check_total)
+
+
+    def init_movedata(self):
+        if self.flag.isSet():
+            return False
+        if self.alloc_type != 'sparse':
+            return False
+        self.storage.top_off()  # sets file lengths to their final size
+        self.movelist = []
+        if self.out_of_place == 0:
+            for i in self.holes:
+                self.places[i] = i
+            self.holes = []
+            return False
+        self.tomove = float(self.out_of_place)
+        for i in xrange(len(self.hashes)):
+            if not self.places.has_key(i):
+                self.places[i] = i
+            elif self.places[i] != i:
+                self.movelist.append(i)
+        self.holes = []
+        return True
+
+    def movedatafunc(self):
+        if self.flag.isSet():
+            return None
+        if not self.movelist:
+            return None
+        i = self.movelist.pop(0)
+        old = self.read_raw(self.places[i], 0, self._piecelen(i))
+        if old is None:
+            return None
+        if not self.write_raw(i, 0, old):
+            return None
+        if self.double_check and self.have[i]:
+            if self.triple_check:
+                old.release()
+                old = self.read_raw( i, 0, self._piecelen(i),
+                                            flush_first = True )
+                if old is None:
+                    return None
+            if sha(old[:]).digest() != self.hashes[i]:
+                self.failed('download corrupted; please restart and resume')
+                return None
+        old.release()
+
+        self.places[i] = i
+        self.tomove -= 1
+        return (self.tomove / self.out_of_place)
+
+        
+    def init_alloc(self):
+        if self.flag.isSet():
+            return False
+        if not self.holes:
+            return False
+        self.numholes = float(len(self.holes))
+        self.alloc_buf = chr(0xFF) * self.piece_size
+        if self.alloc_type == 'pre-allocate':
+            self.bgalloc_enabled = True
+            return True
+        if self.alloc_type == 'background':
+            self.bgalloc_enabled = True
+        if self.blocked_moveout:
+            return True
+        return False
+
+
+    def _allocfunc(self):
+        while self.holes:
+            n = self.holes.pop(0)
+            if self.blocked[n]: # assume not self.blocked[index]
+                if not self.blocked_movein:
+                    self.blocked_holes.append(n)
+                    continue
+                if not self.places.has_key(n):
+                    b = self.blocked_movein.pop(0)
+                    oldpos = self._move_piece(b, n)
+                    self.places[oldpos] = oldpos
+                    return None
+            if self.places.has_key(n):
+                oldpos = self._move_piece(n, n)
+                self.places[oldpos] = oldpos
+                return None
+            return n
+        return None
+
+    def allocfunc(self):
+        if self.flag.isSet():
+            return None
+        
+        if self.blocked_moveout:
+            self.bgalloc_active = True
+            n = self._allocfunc()
+            if n is not None:
+                if self.blocked_moveout.includes(n):
+                    self.blocked_moveout.remove(n)
+                    b = n
+                else:
+                    b = self.blocked_moveout.pop(0)
+                oldpos = self._move_piece(b,n)
+                self.places[oldpos] = oldpos
+            return len(self.holes) / self.numholes
+
+        if self.holes and self.bgalloc_enabled:
+            self.bgalloc_active = True
+            n = self._allocfunc()
+            if n is not None:
+                self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)])
+                self.places[n] = n
+            return len(self.holes) / self.numholes
+
+        self.bgalloc_active = False
+        return None
+
+    def bgalloc(self):
+        if self.bgalloc_enabled:
+            if not self.holes and not self.blocked_moveout and self.backfunc:
+                self.backfunc(self.storage.flush)
+                # force a flush whenever the "finish allocation" button is hit
+        self.bgalloc_enabled = True
+        return False
+
+    def _bgalloc(self):
+        self.allocfunc()
+        if self.config.get('alloc_rate',0) < 0.1:
+            self.config['alloc_rate'] = 0.1
+        self.backfunc( self._bgalloc,
+              float(self.piece_size)/(self.config['alloc_rate']*1048576) )
+
+
+    def _waspre(self, piece):
+        return self.storage.was_preallocated(piece * self.piece_size, self._piecelen(piece))
+
+    def _piecelen(self, piece):
+        if piece < len(self.hashes) - 1:
+            return self.piece_size
+        else:
+            return self.total_length - (piece * self.piece_size)
+
+    def get_amount_left(self):
+        return self.amount_left
+
+    def do_I_have_anything(self):
+        return self.amount_left < self.total_length
+
+    def _make_inactive(self, index):
+        length = self._piecelen(index)
+        l = []
+        x = 0
+        while x + self.request_size < length:
+            l.append((x, self.request_size))
+            x += self.request_size
+        l.append((x, length - x))
+        self.inactive_requests[index] = l
+
+    def is_endgame(self):
+        return not self.amount_inactive
+
+    def am_I_complete(self):
+        return self.amount_obtained == self.amount_desired
+
+    def reset_endgame(self, requestlist):
+        for index, begin, length in requestlist:
+            self.request_lost(index, begin, length)
+
+    def get_have_list(self):
+        return self.have.tostring()
+
+    def get_have_list_cloaked(self):
+        if self.have_cloaked_data is None:
+            newhave = Bitfield(copyfrom = self.have)
+            unhaves = []
+            n = min(randrange(2,5),len(self.hashes))    # between 2-4 unless torrent is small
+            while len(unhaves) < n:
+                unhave = randrange(min(32,len(self.hashes)))    # all in first 4 bytes
+                if not unhave in unhaves:
+                    unhaves.append(unhave)
+                    newhave[unhave] = False
+            self.have_cloaked_data = (newhave.tostring(), unhaves)
+        return self.have_cloaked_data
+
+    def do_I_have(self, index):
+        return self.have[index]
+
+    def do_I_have_requests(self, index):
+        return not not self.inactive_requests[index]
+
+    def is_unstarted(self, index):
+        return ( not self.have[index] and not self.numactive[index]
+                 and not self.dirty.has_key(index) )
+
+    def get_hash(self, index):
+        return self.hashes[index]
+
+    def get_stats(self):
+        return self.amount_obtained, self.amount_desired
+
+    def new_request(self, index):
+        # returns (begin, length)
+        if self.inactive_requests[index] == 1:
+            self._make_inactive(index)
+        self.numactive[index] += 1
+        self.stat_active[index] = 1
+        if not self.dirty.has_key(index):
+            self.stat_new[index] = 1
+        rs = self.inactive_requests[index]
+#        r = min(rs)
+#        rs.remove(r)
+        r = rs.pop(0)
+        self.amount_inactive -= r[1]
+        return r
+
+
+    def write_raw(self, index, begin, data):
+        try:
+            self.storage.write(self.piece_size * index + begin, data)
+            return True
+        except IOError, e:
+            self.failed('IO Error: ' + str(e))
+            return False
+
+
+    def _write_to_buffer(self, piece, start, data):
+        if not self.write_buf_max:
+            return self.write_raw(self.places[piece], start, data)
+        self.write_buf_size += len(data)
+        while self.write_buf_size > self.write_buf_max:
+            old = self.write_buf_list.pop(0)
+            if not self._flush_buffer(old, True):
+                return False
+        if self.write_buf.has_key(piece):
+            self.write_buf_list.remove(piece)
+        else:
+            self.write_buf[piece] = []
+        self.write_buf_list.append(piece)
+        self.write_buf[piece].append((start,data))
+        return True
+
+    def _flush_buffer(self, piece, popped = False):
+        if not self.write_buf.has_key(piece):
+            return True
+        if not popped:
+            self.write_buf_list.remove(piece)
+        l = self.write_buf[piece]
+        del self.write_buf[piece]
+        l.sort()
+        for start, data in l:
+            self.write_buf_size -= len(data)
+            if not self.write_raw(self.places[piece], start, data):
+                return False
+        return True
+
+    def sync(self):
+        spots = {}
+        for p in self.write_buf_list:
+            spots[self.places[p]] = p
+        l = spots.keys()
+        l.sort()
+        for i in l:
+            try:
+                self._flush_buffer(spots[i])
+            except:
+                pass
+        try:
+            self.storage.sync()
+        except IOError, e:
+            self.failed('IO Error: ' + str(e))
+        except OSError, e:
+            self.failed('OS Error: ' + str(e))
+
+
+    def _move_piece(self, index, newpos):
+        oldpos = self.places[index]
+        if DEBUG:
+            print 'moving '+str(index)+' from '+str(oldpos)+' to '+str(newpos)
+        assert oldpos != index
+        assert oldpos != newpos
+        assert index == newpos or not self.places.has_key(newpos)
+        old = self.read_raw(oldpos, 0, self._piecelen(index))
+        if old is None:
+            return -1
+        if not self.write_raw(newpos, 0, old):
+            return -1
+        self.places[index] = newpos
+        if self.have[index] and (
+                self.triple_check or (self.double_check and index == newpos) ):
+            if self.triple_check:
+                old.release()
+                old = self.read_raw(newpos, 0, self._piecelen(index),
+                                    flush_first = True)
+                if old is None:
+                    return -1
+            if sha(old[:]).digest() != self.hashes[index]:
+                self.failed('download corrupted; please restart and resume')
+                return -1
+        old.release()
+
+        if self.blocked[index]:
+            self.blocked_moveout.remove(index)
+            if self.blocked[newpos]:
+                self.blocked_movein.remove(index)
+            else:
+                self.blocked_movein.add(index)
+        else:
+            self.blocked_movein.remove(index)
+            if self.blocked[newpos]:
+                self.blocked_moveout.add(index)
+            else:
+                self.blocked_moveout.remove(index)
+                    
+        return oldpos
+            
+    def _clear_space(self, index):
+        h = self.holes.pop(0)
+        n = h
+        if self.blocked[n]: # assume not self.blocked[index]
+            if not self.blocked_movein:
+                self.blocked_holes.append(n)
+                return True    # repeat
+            if not self.places.has_key(n):
+                b = self.blocked_movein.pop(0)
+                oldpos = self._move_piece(b, n)
+                if oldpos < 0:
+                    return False
+                n = oldpos
+        if self.places.has_key(n):
+            oldpos = self._move_piece(n, n)
+            if oldpos < 0:
+                return False
+            n = oldpos
+        if index == n or index in self.holes:
+            if n == h:
+                self.write_raw(n, 0, self.alloc_buf[:self._piecelen(n)])
+            self.places[index] = n
+            if self.blocked[n]:
+                # because n may be a spot cleared 10 lines above, it's possible
+                # for it to be blocked.  While that spot could be left cleared
+                # and a new spot allocated, this condition might occur several
+                # times in a row, resulting in a significant amount of disk I/O,
+                # delaying the operation of the engine.  Rather than do this,
+                # queue the piece to be moved out again, which will be performed
+                # by the background allocator, with which data movement is
+                # automatically limited.
+                self.blocked_moveout.add(index)
+            return False
+        for p, v in self.places.items():
+            if v == index:
+                break
+        else:
+            self.failed('download corrupted; please restart and resume')
+            return False
+        self._move_piece(p, n)
+        self.places[index] = index
+        return False
+
+
+    def piece_came_in(self, index, begin, piece, source = None):
+        assert not self.have[index]
+        
+        if not self.places.has_key(index):
+            while self._clear_space(index):
+                pass
+            if DEBUG:
+                print 'new place for '+str(index)+' at '+str(self.places[index])
+        if self.flag.isSet():
+            return
+
+        if self.failed_pieces.has_key(index):
+            old = self.read_raw(self.places[index], begin, len(piece))
+            if old is None:
+                return True
+            if old[:].tostring() != piece:
+                try:
+                    self.failed_pieces[index][self.download_history[index][begin]] = 1
+                except:
+                    self.failed_pieces[index][None] = 1
+            old.release()
+        self.download_history.setdefault(index,{})[begin] = source
+        
+        if not self._write_to_buffer(index, begin, piece):
+            return True
+        
+        self.amount_obtained += len(piece)
+        self.dirty.setdefault(index,[]).append((begin, len(piece)))
+        self.numactive[index] -= 1
+        assert self.numactive[index] >= 0
+        if not self.numactive[index]:
+            del self.stat_active[index]
+        if self.stat_new.has_key(index):
+            del self.stat_new[index]
+
+        if self.inactive_requests[index] or self.numactive[index]:
+            return True
+        
+        del self.dirty[index]
+        if not self._flush_buffer(index):
+            return True
+        length = self._piecelen(index)
+        data = self.read_raw(self.places[index], 0, length,
+                                 flush_first = self.triple_check)
+        if data is None:
+            return True
+        hash = sha(data[:]).digest()
+        data.release()
+        if hash != self.hashes[index]:
+
+            self.amount_obtained -= length
+            self.data_flunked(length, index)
+            self.inactive_requests[index] = 1
+            self.amount_inactive += length
+            self.stat_numflunked += 1
+
+            self.failed_pieces[index] = {}
+            allsenders = {}
+            for d in self.download_history[index].values():
+                allsenders[d] = 1
+            if len(allsenders) == 1:
+                culprit = allsenders.keys()[0]
+                if culprit is not None:
+                    culprit.failed(index, bump = True)
+                del self.failed_pieces[index] # found the culprit already
+            
+            return False
+
+        self.have[index] = True
+        self.inactive_requests[index] = None
+        self.waschecked[index] = True
+        self.amount_left -= length
+        self.stat_numdownloaded += 1
+
+        for d in self.download_history[index].values():
+            if d is not None:
+                d.good(index)
+        del self.download_history[index]
+        if self.failed_pieces.has_key(index):
+            for d in self.failed_pieces[index].keys():
+                if d is not None:
+                    d.failed(index)
+            del self.failed_pieces[index]
+
+        if self.amount_left == 0:
+            self.finished()
+        return True
+
+
+    def request_lost(self, index, begin, length):
+        assert not (begin, length) in self.inactive_requests[index]
+        insort(self.inactive_requests[index], (begin, length))
+        self.amount_inactive += length
+        self.numactive[index] -= 1
+        if not self.numactive[index]:
+            del self.stat_active[index]
+            if self.stat_new.has_key(index):
+                del self.stat_new[index]
+
+
+    def get_piece(self, index, begin, length):
+        if not self.have[index]:
+            return None
+        data = None
+        if not self.waschecked[index]:
+            data = self.read_raw(self.places[index], 0, self._piecelen(index))
+            if data is None:
+                return None
+            if sha(data[:]).digest() != self.hashes[index]:
+                self.failed('told file complete on start-up, but piece failed hash check')
+                return None
+            self.waschecked[index] = True
+            if length == -1 and begin == 0:
+                return data     # optimization
+        if length == -1:
+            if begin > self._piecelen(index):
+                return None
+            length = self._piecelen(index)-begin
+            if begin == 0:
+                return self.read_raw(self.places[index], 0, length)
+        elif begin + length > self._piecelen(index):
+            return None
+        if data is not None:
+            s = data[begin:begin+length]
+            data.release()
+            return s
+        data = self.read_raw(self.places[index], begin, length)
+        if data is None:
+            return None
+        s = data.getarray()
+        data.release()
+        return s
+
+    def read_raw(self, piece, begin, length, flush_first = False):
+        try:
+            return self.storage.read(self.piece_size * piece + begin,
+                                                     length, flush_first)
+        except IOError, e:
+            self.failed('IO Error: ' + str(e))
+            return None
+
+
+    def set_file_readonly(self, n):
+        try:
+            self.storage.set_readonly(n)
+        except IOError, e:
+            self.failed('IO Error: ' + str(e))
+        except OSError, e:
+            self.failed('OS Error: ' + str(e))
+
+
+    def has_data(self, index):
+        return index not in self.holes and index not in self.blocked_holes
+
+    def doublecheck_data(self, pieces_to_check):
+        if not self.double_check:
+            return
+        sources = []
+        for p,v in self.places.items():
+            if pieces_to_check.has_key(v):
+                sources.append(p)
+        assert len(sources) == len(pieces_to_check)
+        sources.sort()
+        for index in sources:
+            if self.have[index]:
+                piece = self.read_raw(self.places[index],0,self._piecelen(index),
+                                       flush_first = True )
+                if piece is None:
+                    return False
+                if sha(piece[:]).digest() != self.hashes[index]:
+                    self.failed('download corrupted; please restart and resume')
+                    return False
+                piece.release()
+        return True
+
+
+    def reblock(self, new_blocked):
+        # assume downloads have already been canceled and chunks made inactive
+        for i in xrange(len(new_blocked)):
+            if new_blocked[i] and not self.blocked[i]:
+                length = self._piecelen(i)
+                self.amount_desired -= length
+                if self.have[i]:
+                    self.amount_obtained -= length
+                    continue
+                if self.inactive_requests[i] == 1:
+                    self.amount_inactive -= length
+                    continue
+                inactive = 0
+                for nb, nl in self.inactive_requests[i]:
+                    inactive += nl
+                self.amount_inactive -= inactive
+                self.amount_obtained -= length - inactive
+                
+            if self.blocked[i] and not new_blocked[i]:
+                length = self._piecelen(i)
+                self.amount_desired += length
+                if self.have[i]:
+                    self.amount_obtained += length
+                    continue
+                if self.inactive_requests[i] == 1:
+                    self.amount_inactive += length
+                    continue
+                inactive = 0
+                for nb, nl in self.inactive_requests[i]:
+                    inactive += nl
+                self.amount_inactive += inactive
+                self.amount_obtained += length - inactive
+
+        self.blocked = new_blocked
+
+        self.blocked_movein = Olist()
+        self.blocked_moveout = Olist()
+        for p,v in self.places.items():
+            if p != v:
+                if self.blocked[p] and not self.blocked[v]:
+                    self.blocked_movein.add(p)
+                elif self.blocked[v] and not self.blocked[p]:
+                    self.blocked_moveout.add(p)
+
+        self.holes.extend(self.blocked_holes)    # reset holes list
+        self.holes.sort()
+        self.blocked_holes = []
+
+
+    '''
+    Pickled data format:
+
+    d['pieces'] = either a string containing a bitfield of complete pieces,
+                    or the numeric value "1" signifying a seed.  If it is
+                    a seed, d['places'] and d['partials'] should be empty
+                    and needn't even exist.
+    d['partials'] = [ piece, [ offset, length... ]... ]
+                    a list of partial data that had been previously
+                    downloaded, plus the given offsets.  Adjacent partials
+                    are merged so as to save space, and so that if the
+                    request size changes then new requests can be
+                    calculated more efficiently.
+    d['places'] = [ piece, place, {,piece, place ...} ]
+                    the piece index, and the place it's stored.
+                    If d['pieces'] specifies a complete piece or d['partials']
+                    specifies a set of partials for a piece which has no
+                    entry in d['places'], it can be assumed that
+                    place[index] = index.  A place specified with no
+                    corresponding data in d['pieces'] or d['partials']
+                    indicates allocated space with no valid data, and is
+                    reserved so it doesn't need to be hash-checked.
+    '''
+    def pickle(self):
+        if self.have.complete():
+            return {'pieces': 1}
+        pieces = Bitfield(len(self.hashes))
+        places = []
+        partials = []
+        for p in xrange(len(self.hashes)):
+            if self.blocked[p] or not self.places.has_key(p):
+                continue
+            h = self.have[p]
+            pieces[p] = h
+            pp = self.dirty.get(p)
+            if not h and not pp:  # no data
+                places.extend([self.places[p],self.places[p]])
+            elif self.places[p] != p:
+                places.extend([p, self.places[p]])
+            if h or not pp:
+                continue
+            pp.sort()
+            r = []
+            while len(pp) > 1:
+                if pp[0][0]+pp[0][1] == pp[1][0]:
+                    pp[0] = list(pp[0])
+                    pp[0][1] += pp[1][1]
+                    del pp[1]
+                else:
+                    r.extend(pp[0])
+                    del pp[0]
+            r.extend(pp[0])
+            partials.extend([p,r])
+        return {'pieces': pieces.tostring(), 'places': places, 'partials': partials}
+
+
+    def unpickle(self, data, valid_places):
+        got = {}
+        places = {}
+        dirty = {}
+        download_history = {}
+        stat_active = {}
+        stat_numfound = self.stat_numfound
+        amount_obtained = self.amount_obtained
+        amount_inactive = self.amount_inactive
+        amount_left = self.amount_left
+        inactive_requests = [x for x in self.inactive_requests]
+        restored_partials = []
+
+        try:
+            if data['pieces'] == 1:     # a seed
+                assert not data.get('places',None)
+                assert not data.get('partials',None)
+                have = Bitfield(len(self.hashes))
+                for i in xrange(len(self.hashes)):
+                    have[i] = True
+                assert have.complete()
+                _places = []
+                _partials = []
+            else:
+                have = Bitfield(len(self.hashes), data['pieces'])
+                _places = data['places']
+                assert len(_places) % 2 == 0
+                _places = [_places[x:x+2] for x in xrange(0,len(_places),2)]
+                _partials = data['partials']
+                assert len(_partials) % 2 == 0
+                _partials = [_partials[x:x+2] for x in xrange(0,len(_partials),2)]
+                
+            for index, place in _places:
+                if place not in valid_places:
+                    continue
+                assert not got.has_key(index)
+                assert not got.has_key(place)
+                places[index] = place
+                got[index] = 1
+                got[place] = 1
+
+            for index in xrange(len(self.hashes)):
+                if have[index]:
+                    if not places.has_key(index):
+                        if index not in valid_places:
+                            have[index] = False
+                            continue
+                        assert not got.has_key(index)
+                        places[index] = index
+                        got[index] = 1
+                    length = self._piecelen(index)
+                    amount_obtained += length
+                    stat_numfound += 1
+                    amount_inactive -= length
+                    amount_left -= length
+                    inactive_requests[index] = None
+
+            for index, plist in _partials:
+                assert not dirty.has_key(index)
+                assert not have[index]
+                if not places.has_key(index):
+                    if index not in valid_places:
+                        continue
+                    assert not got.has_key(index)
+                    places[index] = index
+                    got[index] = 1
+                assert len(plist) % 2 == 0
+                plist = [plist[x:x+2] for x in xrange(0,len(plist),2)]
+                dirty[index] = plist
+                stat_active[index] = 1
+                download_history[index] = {}
+                # invert given partials
+                length = self._piecelen(index)
+                l = []
+                if plist[0][0] > 0:
+                    l.append((0,plist[0][0]))
+                for i in xrange(len(plist)-1):
+                    end = plist[i][0]+plist[i][1]
+                    assert not end > plist[i+1][0]
+                    l.append((end,plist[i+1][0]-end))
+                end = plist[-1][0]+plist[-1][1]
+                assert not end > length
+                if end < length:
+                    l.append((end,length-end))
+                # split them to request_size
+                ll = []
+                amount_obtained += length
+                amount_inactive -= length
+                for nb, nl in l:
+                    while nl > 0:
+                        r = min(nl,self.request_size)
+                        ll.append((nb,r))
+                        amount_inactive += r
+                        amount_obtained -= r
+                        nb += self.request_size
+                        nl -= self.request_size
+                inactive_requests[index] = ll
+                restored_partials.append(index)
+
+            assert amount_obtained + amount_inactive == self.amount_desired
+        except:
+#            print_exc()
+            return []   # invalid data, discard everything
+
+        self.have = have
+        self.places = places
+        self.dirty = dirty
+        self.download_history = download_history
+        self.stat_active = stat_active
+        self.stat_numfound = stat_numfound
+        self.amount_obtained = amount_obtained
+        self.amount_inactive = amount_inactive
+        self.amount_left = amount_left
+        self.inactive_requests = inactive_requests
+                
+        return restored_partials
+    

Added: debtorrent/branches/upstream/current/BitTornado/BT1/StreamCheck.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/StreamCheck.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/StreamCheck.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/StreamCheck.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,135 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from binascii import b2a_hex
+from socket import error as socketerror
+from urllib import quote
+from traceback import print_exc
+import Connecter
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+
+protocol_name = 'BitTorrent protocol'
+option_pattern = chr(0)*8
+
+def toint(s):
+    return long(b2a_hex(s), 16)
+
+def tobinary(i):
+    return (chr(i >> 24) + chr((i >> 16) & 0xFF) + 
+        chr((i >> 8) & 0xFF) + chr(i & 0xFF))
+
+hexchars = '0123456789ABCDEF'
+hexmap = []
+for i in xrange(256):
+    hexmap.append(hexchars[(i&0xF0)/16]+hexchars[i&0x0F])
+
+def tohex(s):
+    r = []
+    for c in s:
+        r.append(hexmap[ord(c)])
+    return ''.join(r)
+
+def make_readable(s):
+    if not s:
+        return ''
+    if quote(s).find('%') >= 0:
+        return tohex(s)
+    return '"'+s+'"'
+   
+def toint(s):
+    return long(b2a_hex(s), 16)
+
+# header, reserved, download id, my id, [length, message]
+
+streamno = 0
+
+
+class StreamCheck:
+    def __init__(self):
+        global streamno
+        self.no = streamno
+        streamno += 1
+        self.buffer = StringIO()
+        self.next_len, self.next_func = 1, self.read_header_len
+
+    def read_header_len(self, s):
+        if ord(s) != len(protocol_name):
+            print self.no, 'BAD HEADER LENGTH'
+        return len(protocol_name), self.read_header
+
+    def read_header(self, s):
+        if s != protocol_name:
+            print self.no, 'BAD HEADER'
+        return 8, self.read_reserved
+
+    def read_reserved(self, s):
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if DEBUG:
+            print self.no, 'download ID ' + tohex(s)
+        return 20, self.read_peer_id
+
+    def read_peer_id(self, s):
+        if DEBUG:
+            print self.no, 'peer ID' + make_readable(s)
+        return 4, self.read_len
+
+    def read_len(self, s):
+        l = toint(s)
+        if l > 2 ** 23:
+            print self.no, 'BAD LENGTH: '+str(l)+' ('+s+')'
+        return l, self.read_message
+
+    def read_message(self, s):
+        if not s:
+            return 4, self.read_len
+        m = s[0]
+        if ord(m) > 8:
+            print self.no, 'BAD MESSAGE: '+str(ord(m))
+        if m == Connecter.REQUEST:
+            if len(s) != 13:
+                print self.no, 'BAD REQUEST SIZE: '+str(len(s))
+                return 4, self.read_len
+            index = toint(s[1:5])
+            begin = toint(s[5:9])
+            length = toint(s[9:])
+            print self.no, 'Request: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
+        elif m == Connecter.CANCEL:
+            if len(s) != 13:
+                print self.no, 'BAD CANCEL SIZE: '+str(len(s))
+                return 4, self.read_len
+            index = toint(s[1:5])
+            begin = toint(s[5:9])
+            length = toint(s[9:])
+            print self.no, 'Cancel: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
+        elif m == Connecter.PIECE:
+            index = toint(s[1:5])
+            begin = toint(s[5:9])
+            length = len(s)-9
+            print self.no, 'Piece: '+str(index)+': '+str(begin)+'-'+str(begin)+'+'+str(length)
+        else:
+            print self.no, 'Message '+str(ord(m))+' (length '+str(len(s))+')'
+        return 4, self.read_len
+
+    def write(self, s):
+        while True:
+            i = self.next_len - self.buffer.tell()
+            if i > len(s):
+                self.buffer.write(s)
+                return
+            self.buffer.write(s[:i])
+            s = s[i:]
+            m = self.buffer.getvalue()
+            self.buffer.reset()
+            self.buffer.truncate()
+            x = self.next_func(m)
+            self.next_len, self.next_func = x

Added: debtorrent/branches/upstream/current/BitTornado/BT1/T2T.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/T2T.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/T2T.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/T2T.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,198 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from Rerequester import Rerequester
+from urllib import quote
+from threading import Event
+from random import randrange
+from string import lower
+import sys
+import __init__
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = True
+
+
+def excfunc(x):
+    print x
+
+R_0 = lambda: 0
+R_1 = lambda: 1
+
+class T2TConnection:
+    def __init__(self, myid, tracker, hash, interval, peers, timeout,
+                     rawserver, disallow, isdisallowed):
+        self.tracker = tracker
+        self.interval = interval
+        self.hash = hash
+        self.operatinginterval = interval
+        self.peers = peers
+        self.rawserver = rawserver
+        self.disallow = disallow
+        self.isdisallowed = isdisallowed
+        self.active = True
+        self.busy = False
+        self.errors = 0
+        self.rejected = 0
+        self.trackererror = False
+        self.peerlists = []
+        cfg = { 'min_peers': peers,
+                'max_initiate': peers,
+                'rerequest_interval': interval,
+                'http_timeout': timeout }
+        self.rerequester = Rerequester( 0, myid, hash, [[tracker]], cfg,
+            rawserver.add_task, rawserver.add_task, self.errorfunc, excfunc,
+            self.addtolist, R_0, R_1, R_0, R_0, R_0, R_0,
+            Event() )
+
+        if self.isactive():
+            rawserver.add_task(self.refresh, randrange(int(self.interval/10), self.interval))
+                                        # stagger announces
+
+    def isactive(self):
+        if self.isdisallowed(self.tracker):    # whoops!
+            self.deactivate()
+        return self.active
+            
+    def deactivate(self):
+        self.active = False
+
+    def refresh(self):
+        if not self.isactive():
+            return
+        self.lastsuccessful = True
+        self.newpeerdata = []
+        if DEBUG:
+            print 'contacting %s for info_hash=%s' % (self.tracker, quote(self.hash))
+        self.rerequester.snoop(self.peers, self.callback)
+
+    def callback(self):
+        self.busy = False
+        if self.lastsuccessful:
+            self.errors = 0
+            self.rejected = 0
+            if self.rerequester.announce_interval > (3*self.interval):
+                # I think I'm stripping from a regular tracker; boost the number of peers requested
+                self.peers = int(self.peers * (self.rerequester.announce_interval / self.interval))
+            self.operatinginterval = self.rerequester.announce_interval
+            if DEBUG:
+                print ("%s with info_hash=%s returned %d peers" %
+                        (self.tracker, quote(self.hash), len(self.newpeerdata)))
+            self.peerlists.append(self.newpeerdata)
+            self.peerlists = self.peerlists[-10:]  # keep up to the last 10 announces
+        if self.isactive():
+            self.rawserver.add_task(self.refresh, self.operatinginterval)
+
+    def addtolist(self, peers):
+        for peer in peers:
+            self.newpeerdata.append((peer[1],peer[0][0],peer[0][1]))
+        
+    def errorfunc(self, r):
+        self.lastsuccessful = False
+        if DEBUG:
+            print "%s with info_hash=%s gives error: '%s'" % (self.tracker, quote(self.hash), r)
+        if r == self.rerequester.rejectedmessage + 'disallowed':   # whoops!
+            if DEBUG:
+                print ' -- disallowed - deactivating'
+            self.deactivate()
+            self.disallow(self.tracker)   # signal other torrents on this tracker
+            return
+        if lower(r[:8]) == 'rejected': # tracker rejected this particular torrent
+            self.rejected += 1
+            if self.rejected == 3:     # rejected 3 times
+                if DEBUG:
+                    print ' -- rejected 3 times - deactivating'
+                self.deactivate()
+            return
+        self.errors += 1
+        if self.errors >= 3:                         # three or more errors in a row
+            self.operatinginterval += self.interval  # lengthen the interval
+            if DEBUG:
+                print ' -- lengthening interval to '+str(self.operatinginterval)+' seconds'
+
+    def harvest(self):
+        x = []
+        for list in self.peerlists:
+            x += list
+        self.peerlists = []
+        return x
+
+
+class T2TList:
+    def __init__(self, enabled, trackerid, interval, maxpeers, timeout, rawserver):
+        self.enabled = enabled
+        self.trackerid = trackerid
+        self.interval = interval
+        self.maxpeers = maxpeers
+        self.timeout = timeout
+        self.rawserver = rawserver
+        self.list = {}
+        self.torrents = {}
+        self.disallowed = {}
+        self.oldtorrents = []
+
+    def parse(self, allowed_list):
+        if not self.enabled:
+            return
+
+        # step 1:  Create a new list with all tracker/torrent combinations in allowed_dir        
+        newlist = {}
+        for hash, data in allowed_list.items():
+            if data.has_key('announce-list'):
+                for tier in data['announce-list']:
+                    for tracker in tier:
+                        self.disallowed.setdefault(tracker, False)
+                        newlist.setdefault(tracker, {})
+                        newlist[tracker][hash] = None # placeholder
+                            
+        # step 2:  Go through and copy old data to the new list.
+        # if the new list has no place for it, then it's old, so deactivate it
+        for tracker, hashdata in self.list.items():
+            for hash, t2t in hashdata.items():
+                if not newlist.has_key(tracker) or not newlist[tracker].has_key(hash):
+                    t2t.deactivate()                # this connection is no longer current
+                    self.oldtorrents += [t2t]
+                        # keep it referenced in case a thread comes along and tries to access.
+                else:
+                    newlist[tracker][hash] = t2t
+            if not newlist.has_key(tracker):
+                self.disallowed[tracker] = False    # reset when no torrents on it left
+
+        self.list = newlist
+        newtorrents = {}
+
+        # step 3:  If there are any entries that haven't been initialized yet, do so.
+        # At the same time, copy all entries onto the by-torrent list.
+        for tracker, hashdata in newlist.items():
+            for hash, t2t in hashdata.items():
+                if t2t is None:
+                    hashdata[hash] = T2TConnection(self.trackerid, tracker, hash,
+                                        self.interval, self.maxpeers, self.timeout,
+                                        self.rawserver, self._disallow, self._isdisallowed)
+                newtorrents.setdefault(hash,[])
+                newtorrents[hash] += [hashdata[hash]]
+                
+        self.torrents = newtorrents
+
+        # structures:
+        # list = {tracker: {hash: T2TConnection, ...}, ...}
+        # torrents = {hash: [T2TConnection, ...]}
+        # disallowed = {tracker: flag, ...}
+        # oldtorrents = [T2TConnection, ...]
+
+    def _disallow(self,tracker):
+        self.disallowed[tracker] = True
+
+    def _isdisallowed(self,tracker):
+        return self.disallowed[tracker]
+
+    def harvest(self,hash):
+        harvest = []
+        if self.enabled:
+            for t2t in self.torrents[hash]:
+                harvest += t2t.harvest()
+        return harvest

Added: debtorrent/branches/upstream/current/BitTornado/BT1/Uploader.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/Uploader.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/Uploader.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/Uploader.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,145 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.CurrentRateMeasure import Measure
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+class Upload:
+    def __init__(self, connection, ratelimiter, totalup, choker, storage,
+                 picker, config):
+        self.connection = connection
+        self.ratelimiter = ratelimiter
+        self.totalup = totalup
+        self.choker = choker
+        self.storage = storage
+        self.picker = picker
+        self.config = config
+        self.max_slice_length = config['max_slice_length']
+        self.choked = True
+        self.cleared = True
+        self.interested = False
+        self.super_seeding = False
+        self.buffer = []
+        self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge'])
+        self.was_ever_interested = False
+        if storage.get_amount_left() == 0:
+            if choker.super_seed:
+                self.super_seeding = True   # flag, and don't send bitfield
+                self.seed_have_list = []    # set from piecepicker
+                self.skipped_count = 0
+            else:
+                if config['breakup_seed_bitfield']:
+                    bitfield, msgs = storage.get_have_list_cloaked()
+                    connection.send_bitfield(bitfield)
+                    for have in msgs:
+                        connection.send_have(have)
+                else:
+                    connection.send_bitfield(storage.get_have_list())
+        else:
+            if storage.do_I_have_anything():
+                connection.send_bitfield(storage.get_have_list())
+        self.piecedl = None
+        self.piecebuf = None
+
+    def got_not_interested(self):
+        if self.interested:
+            self.interested = False
+            del self.buffer[:]
+            self.piecedl = None
+            if self.piecebuf:
+                self.piecebuf.release()
+            self.piecebuf = None
+            self.choker.not_interested(self.connection)
+
+    def got_interested(self):
+        if not self.interested:
+            self.interested = True
+            self.was_ever_interested = True
+            self.choker.interested(self.connection)
+
+    def get_upload_chunk(self):
+        if self.choked or not self.buffer:
+            return None
+        index, begin, length = self.buffer.pop(0)
+        if self.config['buffer_reads']:
+            if index != self.piecedl:
+                if self.piecebuf:
+                    self.piecebuf.release()
+                self.piecedl = index
+                self.piecebuf = self.storage.get_piece(index, 0, -1)
+            try:
+                piece = self.piecebuf[begin:begin+length]
+                assert len(piece) == length
+            except:     # fails if storage.get_piece returns None or if out of range
+                self.connection.close()
+                return None
+        else:
+            if self.piecebuf:
+                self.piecebuf.release()
+                self.piecedl = None
+            piece = self.storage.get_piece(index, begin, length)
+            if piece is None:
+                self.connection.close()
+                return None
+        self.measure.update_rate(len(piece))
+        self.totalup.update_rate(len(piece))
+        return (index, begin, piece)
+
+    def got_request(self, index, begin, length):
+        if ( (self.super_seeding and not index in self.seed_have_list)
+                   or not self.interested or length > self.max_slice_length ):
+            self.connection.close()
+            return
+        if not self.cleared:
+            self.buffer.append((index, begin, length))
+        if not self.choked and self.connection.next_upload is None:
+                self.ratelimiter.queue(self.connection)
+
+
+    def got_cancel(self, index, begin, length):
+        try:
+            self.buffer.remove((index, begin, length))
+        except ValueError:
+            pass
+
+    def choke(self):
+        if not self.choked:
+            self.choked = True
+            self.connection.send_choke()
+        self.piecedl = None
+        if self.piecebuf:
+            self.piecebuf.release()
+            self.piecebuf = None
+
+    def choke_sent(self):
+        del self.buffer[:]
+        self.cleared = True
+
+    def unchoke(self):
+        if self.choked:
+            self.choked = False
+            self.cleared = False
+            self.connection.send_unchoke()
+        
+    def disconnected(self):
+        if self.piecebuf:
+            self.piecebuf.release()
+            self.piecebuf = None
+
+    def is_choked(self):
+        return self.choked
+        
+    def is_interested(self):
+        return self.interested
+
+    def has_queries(self):
+        return not self.choked and len(self.buffer) > 0
+
+    def get_rate(self):
+        return self.measure.get_rate()
+    

Added: debtorrent/branches/upstream/current/BitTornado/BT1/__init__.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/__init__.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/__init__.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/__init__.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+# placeholder

Added: debtorrent/branches/upstream/current/BitTornado/BT1/btformats.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/btformats.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/btformats.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/btformats.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,100 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from types import StringType, LongType, IntType, ListType, DictType
+from re import compile
+
+reg = compile(r'^[^/\\.~][^/\\]*$')
+
+ints = (LongType, IntType)
+
+def check_info(info):
+    if type(info) != DictType:
+        raise ValueError, 'bad metainfo - not a dictionary'
+    pieces = info.get('pieces')
+    if type(pieces) != StringType or len(pieces) % 20 != 0:
+        raise ValueError, 'bad metainfo - bad pieces key'
+    piecelength = info.get('piece length')
+    if type(piecelength) not in ints or piecelength <= 0:
+        raise ValueError, 'bad metainfo - illegal piece length'
+    name = info.get('name')
+    if type(name) != StringType:
+        raise ValueError, 'bad metainfo - bad name'
+    if not reg.match(name):
+        raise ValueError, 'name %s disallowed for security reasons' % name
+    if info.has_key('files') == info.has_key('length'):
+        raise ValueError, 'single/multiple file mix'
+    if info.has_key('length'):
+        length = info.get('length')
+        if type(length) not in ints or length < 0:
+            raise ValueError, 'bad metainfo - bad length'
+    else:
+        files = info.get('files')
+        if type(files) != ListType:
+            raise ValueError
+        for f in files:
+            if type(f) != DictType:
+                raise ValueError, 'bad metainfo - bad file value'
+            length = f.get('length')
+            if type(length) not in ints or length < 0:
+                raise ValueError, 'bad metainfo - bad length'
+            path = f.get('path')
+            if type(path) != ListType or path == []:
+                raise ValueError, 'bad metainfo - bad path'
+            for p in path:
+                if type(p) != StringType:
+                    raise ValueError, 'bad metainfo - bad path dir'
+                if not reg.match(p):
+                    raise ValueError, 'path %s disallowed for security reasons' % p
+        for i in xrange(len(files)):
+            for j in xrange(i):
+                if files[i]['path'] == files[j]['path']:
+                    raise ValueError, 'bad metainfo - duplicate path'
+
+def check_message(message):
+    if type(message) != DictType:
+        raise ValueError
+    check_info(message.get('info'))
+    if type(message.get('announce')) != StringType:
+        raise ValueError
+
+def check_peers(message):
+    if type(message) != DictType:
+        raise ValueError
+    if message.has_key('failure reason'):
+        if type(message['failure reason']) != StringType:
+            raise ValueError
+        return
+    peers = message.get('peers')
+    if type(peers) == ListType:
+        for p in peers:
+            if type(p) != DictType:
+                raise ValueError
+            if type(p.get('ip')) != StringType:
+                raise ValueError
+            port = p.get('port')
+            if type(port) not in ints or p <= 0:
+                raise ValueError
+            if p.has_key('peer id'):
+                id = p['peer id']
+                if type(id) != StringType or len(id) != 20:
+                    raise ValueError
+    elif type(peers) != StringType or len(peers) % 6 != 0:
+        raise ValueError
+    interval = message.get('interval', 1)
+    if type(interval) not in ints or interval <= 0:
+        raise ValueError
+    minint = message.get('min interval', 1)
+    if type(minint) not in ints or minint <= 0:
+        raise ValueError
+    if type(message.get('tracker id', '')) != StringType:
+        raise ValueError
+    npeers = message.get('num peers', 0)
+    if type(npeers) not in ints or npeers < 0:
+        raise ValueError
+    dpeers = message.get('done peers', 0)
+    if type(dpeers) not in ints or dpeers < 0:
+        raise ValueError
+    last = message.get('last', 0)
+    if type(last) not in ints or last < 0:
+        raise ValueError

Added: debtorrent/branches/upstream/current/BitTornado/BT1/fakeopen.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/fakeopen.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/fakeopen.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/fakeopen.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,89 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from string import join
+
+class FakeHandle:
+    def __init__(self, name, fakeopen):
+        self.name = name
+        self.fakeopen = fakeopen
+        self.pos = 0
+    
+    def flush(self):
+        pass
+    
+    def close(self):
+        pass
+    
+    def seek(self, pos):
+        self.pos = pos
+    
+    def read(self, amount = None):
+        old = self.pos
+        f = self.fakeopen.files[self.name]
+        if self.pos >= len(f):
+            return ''
+        if amount is None:
+            self.pos = len(f)
+            return join(f[old:], '')
+        else:
+            self.pos = min(len(f), old + amount)
+            return join(f[old:self.pos], '')
+    
+    def write(self, s):
+        f = self.fakeopen.files[self.name]
+        while len(f) < self.pos:
+            f.append(chr(0))
+        self.fakeopen.files[self.name][self.pos : self.pos + len(s)] = list(s)
+        self.pos += len(s)
+
+class FakeOpen:
+    def __init__(self, initial = {}):
+        self.files = {}
+        for key, value in initial.items():
+            self.files[key] = list(value)
+    
+    def open(self, filename, mode):
+        """currently treats everything as rw - doesn't support append"""
+        self.files.setdefault(filename, [])
+        return FakeHandle(filename, self)
+
+    def exists(self, file):
+        return self.files.has_key(file)
+
+    def getsize(self, file):
+        return len(self.files[file])
+
+def test_normal():
+    f = FakeOpen({'f1': 'abcde'})
+    assert f.exists('f1')
+    assert not f.exists('f2')
+    assert f.getsize('f1') == 5
+    h = f.open('f1', 'rw')
+    assert h.read(3) == 'abc'
+    assert h.read(1) == 'd'
+    assert h.read() == 'e'
+    assert h.read(2) == ''
+    h.write('fpq')
+    h.seek(4)
+    assert h.read(2) == 'ef'
+    h.write('ghij')
+    h.seek(0)
+    assert h.read() == 'abcdefghij'
+    h.seek(2)
+    h.write('p')
+    h.write('q')
+    assert h.read(1) == 'e'
+    h.seek(1)
+    assert h.read(5) == 'bpqef'
+
+    h2 = f.open('f2', 'rw')
+    assert h2.read() == ''
+    h2.write('mnop')
+    h2.seek(1)
+    assert h2.read() == 'nop'
+    
+    assert f.exists('f1')
+    assert f.exists('f2')
+    assert f.getsize('f1') == 10
+    assert f.getsize('f2') == 4

Added: debtorrent/branches/upstream/current/BitTornado/BT1/makemetafile.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/makemetafile.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/makemetafile.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/makemetafile.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,263 @@
+# Written by Bram Cohen
+# multitracker extensions by John Hoffman
+# see LICENSE.txt for license information
+
+from os.path import getsize, split, join, abspath, isdir
+from os import listdir
+from sha import sha
+from copy import copy
+from string import strip
+from BitTornado.bencode import bencode
+from btformats import check_info
+from threading import Event
+from time import time
+from traceback import print_exc
+try:
+    from sys import getfilesystemencoding
+    ENCODING = getfilesystemencoding()
+except:
+    from sys import getdefaultencoding
+    ENCODING = getdefaultencoding()
+
+defaults = [
+    ('announce_list', '',
+        'a list of announce URLs - explained below'),
+    ('httpseeds', '',
+        'a list of http seed URLs - explained below'),
+    ('piece_size_pow2', 0,
+        "which power of 2 to set the piece size to (0 = automatic)"),
+    ('comment', '',
+        "optional human-readable comment to put in .torrent"),
+    ('filesystem_encoding', '',
+        "optional specification for filesystem encoding " +
+        "(set automatically in recent Python versions)"),
+    ('target', '',
+        "optional target file for the torrent")
+    ]
+
+default_piece_len_exp = 18
+
+ignore = ['core', 'CVS']
+
+def print_announcelist_details():
+    print ('    announce_list = optional list of redundant/backup tracker URLs, in the format:')
+    print ('           url[,url...][|url[,url...]...]')
+    print ('                where URLs separated by commas are all tried first')
+    print ('                before the next group of URLs separated by the pipe is checked.')
+    print ("                If none is given, it is assumed you don't want one in the metafile.")
+    print ('                If announce_list is given, clients which support it')
+    print ('                will ignore the <announce> value.')
+    print ('           Examples:')
+    print ('                http://tracker1.com|http://tracker2.com|http://tracker3.com')
+    print ('                     (tries trackers 1-3 in order)')
+    print ('                http://tracker1.com,http://tracker2.com,http://tracker3.com')
+    print ('                     (tries trackers 1-3 in a randomly selected order)')
+    print ('                http://tracker1.com|http://backup1.com,http://backup2.com')
+    print ('                     (tries tracker 1 first, then tries between the 2 backups randomly)')
+    print ('')
+    print ('    httpseeds = optional list of http-seed URLs, in the format:')
+    print ('            url[|url...]')
+    
+def make_meta_file(file, url, params = {}, flag = Event(),
+                   progress = lambda x: None, progress_percent = 1):
+    if params.has_key('piece_size_pow2'):
+        piece_len_exp = params['piece_size_pow2']
+    else:
+        piece_len_exp = default_piece_len_exp
+    if params.has_key('target') and params['target'] != '':
+        f = params['target']
+    else:
+        a, b = split(file)
+        if b == '':
+            f = a + '.torrent'
+        else:
+            f = join(a, b + '.torrent')
+            
+    if piece_len_exp == 0:  # automatic
+        size = calcsize(file)
+        if   size > 8L*1024*1024*1024:   # > 8 gig =
+            piece_len_exp = 21          #   2 meg pieces
+        elif size > 2*1024*1024*1024:   # > 2 gig =
+            piece_len_exp = 20          #   1 meg pieces
+        elif size > 512*1024*1024:      # > 512M =
+            piece_len_exp = 19          #   512K pieces
+        elif size > 64*1024*1024:       # > 64M =
+            piece_len_exp = 18          #   256K pieces
+        elif size > 16*1024*1024:       # > 16M =
+            piece_len_exp = 17          #   128K pieces
+        elif size > 4*1024*1024:        # > 4M =
+            piece_len_exp = 16          #   64K pieces
+        else:                           # < 4M =
+            piece_len_exp = 15          #   32K pieces
+    piece_length = 2 ** piece_len_exp
+
+    encoding = None
+    if params.has_key('filesystem_encoding'):
+        encoding = params['filesystem_encoding']
+    if not encoding:
+        encoding = ENCODING
+    if not encoding:
+        encoding = 'ascii'
+    
+    info = makeinfo(file, piece_length, encoding, flag, progress, progress_percent)
+    if flag.isSet():
+        return
+    check_info(info)
+    h = open(f, 'wb')
+    data = {'info': info, 'announce': strip(url), 'creation date': long(time())}
+    
+    if params.has_key('comment') and params['comment']:
+        data['comment'] = params['comment']
+        
+    if params.has_key('real_announce_list'):    # shortcut for progs calling in from outside
+        data['announce-list'] = params['real_announce_list']
+    elif params.has_key('announce_list') and params['announce_list']:
+        l = []
+        for tier in params['announce_list'].split('|'):
+            l.append(tier.split(','))
+        data['announce-list'] = l
+        
+    if params.has_key('real_httpseeds'):    # shortcut for progs calling in from outside
+        data['httpseeds'] = params['real_httpseeds']
+    elif params.has_key('httpseeds') and params['httpseeds']:
+        data['httpseeds'] = params['httpseeds'].split('|')
+        
+    h.write(bencode(data))
+    h.close()
+
+def calcsize(file):
+    if not isdir(file):
+        return getsize(file)
+    total = 0L
+    for s in subfiles(abspath(file)):
+        total += getsize(s[1])
+    return total
+
+
+def uniconvertl(l, e):
+    r = []
+    try:
+        for s in l:
+            r.append(uniconvert(s, e))
+    except UnicodeError:
+        raise UnicodeError('bad filename: '+join(l))
+    return r
+
+def uniconvert(s, e):
+    try:
+        s = unicode(s,e)
+    except UnicodeError:
+        raise UnicodeError('bad filename: '+s)
+    return s.encode('utf-8')
+
+def makeinfo(file, piece_length, encoding, flag, progress, progress_percent=1):
+    file = abspath(file)
+    if isdir(file):
+        subs = subfiles(file)
+        subs.sort()
+        pieces = []
+        sh = sha()
+        done = 0L
+        fs = []
+        totalsize = 0.0
+        totalhashed = 0L
+        for p, f in subs:
+            totalsize += getsize(f)
+
+        for p, f in subs:
+            pos = 0L
+            size = getsize(f)
+            fs.append({'length': size, 'path': uniconvertl(p, encoding)})
+            h = open(f, 'rb')
+            while pos < size:
+                a = min(size - pos, piece_length - done)
+                sh.update(h.read(a))
+                if flag.isSet():
+                    return
+                done += a
+                pos += a
+                totalhashed += a
+                
+                if done == piece_length:
+                    pieces.append(sh.digest())
+                    done = 0
+                    sh = sha()
+                if progress_percent:
+                    progress(totalhashed / totalsize)
+                else:
+                    progress(a)
+            h.close()
+        if done > 0:
+            pieces.append(sh.digest())
+        return {'pieces': ''.join(pieces),
+            'piece length': piece_length, 'files': fs, 
+            'name': uniconvert(split(file)[1], encoding) }
+    else:
+        size = getsize(file)
+        pieces = []
+        p = 0L
+        h = open(file, 'rb')
+        while p < size:
+            x = h.read(min(piece_length, size - p))
+            if flag.isSet():
+                return
+            pieces.append(sha(x).digest())
+            p += piece_length
+            if p > size:
+                p = size
+            if progress_percent:
+                progress(float(p) / size)
+            else:
+                progress(min(piece_length, size - p))
+        h.close()
+        return {'pieces': ''.join(pieces), 
+            'piece length': piece_length, 'length': size, 
+            'name': uniconvert(split(file)[1], encoding) }
+
+def subfiles(d):
+    r = []
+    stack = [([], d)]
+    while len(stack) > 0:
+        p, n = stack.pop()
+        if isdir(n):
+            for s in listdir(n):
+                if s not in ignore and s[:1] != '.':
+                    stack.append((copy(p) + [s], join(n, s)))
+        else:
+            r.append((p, n))
+    return r
+
+
+def completedir(dir, url, params = {}, flag = Event(),
+                vc = lambda x: None, fc = lambda x: None):
+    files = listdir(dir)
+    files.sort()
+    ext = '.torrent'
+    if params.has_key('target'):
+        target = params['target']
+    else:
+        target = ''
+
+    togen = []
+    for f in files:
+        if f[-len(ext):] != ext and (f + ext) not in files:
+            togen.append(join(dir, f))
+        
+    total = 0
+    for i in togen:
+        total += calcsize(i)
+
+    subtotal = [0]
+    def callback(x, subtotal = subtotal, total = total, vc = vc):
+        subtotal[0] += x
+        vc(float(subtotal[0]) / total)
+    for i in togen:
+        fc(i)
+        try:
+            t = split(i)[-1]
+            if t not in ignore and t[0] != '.':
+                if target != '':
+                    params['target'] = join(target,t+ext)
+                make_meta_file(i, url, params, flag, progress = callback, progress_percent = 0)
+        except ValueError:
+            print_exc()

Added: debtorrent/branches/upstream/current/BitTornado/BT1/track.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BT1/track.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BT1/track.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BT1/track.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1137 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado.parseargs import parseargs, formatDefinitions
+from BitTornado.RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
+from BitTornado.HTTPHandler import HTTPHandler, months, weekdays
+from BitTornado.parsedir import parsedir
+from NatCheck import NatCheck, CHECK_PEER_ID_ENCRYPTED
+from BitTornado.BTcrypto import CRYPTO_OK
+from T2T import T2TList
+from BitTornado.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4
+from BitTornado.iprangeparse import IP_List as IP_Range_List
+from BitTornado.torrentlistparse import parsetorrentlist
+from threading import Event, Thread
+from BitTornado.bencode import bencode, bdecode, Bencached
+from BitTornado.zurllib import urlopen, quote, unquote
+from Filter import Filter
+from urlparse import urlparse
+from os import rename, getpid
+from os.path import exists, isfile
+from cStringIO import StringIO
+from traceback import print_exc
+from time import time, gmtime, strftime, localtime
+from BitTornado.clock import clock
+from random import shuffle, seed, randrange
+from sha import sha
+from types import StringType, IntType, LongType, ListType, DictType
+from binascii import b2a_hex, a2b_hex, a2b_base64
+from string import lower
+import sys, os
+import signal
+import re
+import BitTornado.__init__
+from BitTornado.__init__ import version, createPeerID
+try:
+    True
+except:
+    True = 1
+    False = 0
+    bool = lambda x: not not x
+
+defaults = [
+    ('port', 80, "Port to listen on."),
+    ('dfile', None, 'file to store recent downloader info in'),
+    ('bind', '', 'comma-separated list of ips/hostnames to bind to locally'),
+#    ('ipv6_enabled', autodetect_ipv6(),
+    ('ipv6_enabled', 0,
+         'allow the client to connect to peers via IPv6'),
+    ('ipv6_binds_v4', autodetect_socket_style(),
+        'set if an IPv6 server socket will also field IPv4 connections'),
+    ('socket_timeout', 15, 'timeout for closing connections'),
+    ('save_dfile_interval', 5 * 60, 'seconds between saving dfile'),
+    ('timeout_downloaders_interval', 45 * 60, 'seconds between expiring downloaders'),
+    ('reannounce_interval', 30 * 60, 'seconds downloaders should wait between reannouncements'),
+    ('response_size', 50, 'number of peers to send in an info message'),
+    ('timeout_check_interval', 5,
+        'time to wait between checking if any connections have timed out'),
+    ('nat_check', 3,
+        "how many times to check if a downloader is behind a NAT (0 = don't check)"),
+    ('log_nat_checks', 0,
+        "whether to add entries to the log for nat-check results"),
+    ('min_time_between_log_flushes', 3.0,
+        'minimum time it must have been since the last flush to do another one'),
+    ('min_time_between_cache_refreshes', 600.0,
+        'minimum time in seconds before a cache is considered stale and is flushed'),
+    ('allowed_dir', '', 'only allow downloads for .torrents in this dir'),
+    ('allowed_list', '', 'only allow downloads for hashes in this list (hex format, one per line)'),
+    ('allowed_controls', 0, 'allow special keys in torrents in the allowed_dir to affect tracker access'),
+    ('multitracker_enabled', 0, 'whether to enable multitracker operation'),
+    ('multitracker_allowed', 'autodetect', 'whether to allow incoming tracker announces (can be none, autodetect or all)'),
+    ('multitracker_reannounce_interval', 2 * 60, 'seconds between outgoing tracker announces'),
+    ('multitracker_maxpeers', 20, 'number of peers to get in a tracker announce'),
+    ('aggregate_forward', '', 'format: <url>[,<password>] - if set, forwards all non-multitracker to this url with this optional password'),
+    ('aggregator', '0', 'whether to act as a data aggregator rather than a tracker.  If enabled, may be 1, or <password>; ' +
+             'if password is set, then an incoming password is required for access'),
+    ('hupmonitor', 0, 'whether to reopen the log file upon receipt of HUP signal'),
+    ('http_timeout', 60, 
+        'number of seconds to wait before assuming that an http connection has timed out'),
+    ('parse_dir_interval', 60, 'seconds between reloading of allowed_dir or allowed_file ' +
+             'and allowed_ips and banned_ips lists'),
+    ('show_infopage', 1, "whether to display an info page when the tracker's root dir is loaded"),
+    ('infopage_redirect', '', 'a URL to redirect the info page to'),
+    ('show_names', 1, 'whether to display names from allowed dir'),
+    ('favicon', '', 'file containing x-icon data to return when browser requests favicon.ico'),
+    ('allowed_ips', '', 'only allow connections from IPs specified in the given file; '+
+             'file contains subnet data in the format: aa.bb.cc.dd/len'),
+    ('banned_ips', '', "don't allow connections from IPs specified in the given file; "+
+             'file contains IP range data in the format: xxx:xxx:ip1-ip2'),
+    ('only_local_override_ip', 2, "ignore the ip GET parameter from machines which aren't on local network IPs " +
+             "(0 = never, 1 = always, 2 = ignore if NAT checking is not enabled)"),
+    ('logfile', '', 'file to write the tracker logs, use - for stdout (default)'),
+    ('allow_get', 0, 'use with allowed_dir; adds a /file?hash={hash} url that allows users to download the torrent file'),
+    ('keep_dead', 0, 'keep dead torrents after they expire (so they still show up on your /scrape and web page)'),
+    ('scrape_allowed', 'full', 'scrape access allowed (can be none, specific or full)'),
+    ('dedicated_seed_id', '', 'allows tracker to monitor dedicated seed(s) and flag torrents as seeded'),
+    ('compact_reqd', 1, "only allow peers that accept a compact response"),
+  ]
+
+def statefiletemplate(x):
+    if type(x) != DictType:
+        raise ValueError
+    for cname, cinfo in x.items():
+        if cname == 'peers':
+            for y in cinfo.values():      # The 'peers' key is a dictionary of SHA hashes (torrent ids)
+                if type(y) != DictType:   # ... for the active torrents, and each is a dictionary
+                    raise ValueError
+                for id, info in y.items(): # ... of client ids interested in that torrent
+                    if (len(id) != 20):
+                        raise ValueError
+                    if type(info) != DictType:  # ... each of which is also a dictionary
+                        raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
+                    if type(info.get('ip', '')) != StringType:
+                        raise ValueError
+                    port = info.get('port')
+                    if type(port) not in (IntType,LongType) or port < 0:
+                        raise ValueError
+                    left = info.get('left')
+                    if type(left) not in (IntType,LongType) or left < 0:
+                        raise ValueError
+                    if type(info.get('supportcrypto')) not in (IntType,LongType):
+                        raise ValueError
+                    if type(info.get('requirecrypto')) not in (IntType,LongType):
+                        raise ValueError
+        elif cname == 'completed':
+            if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
+                raise ValueError          # ... for keeping track of the total completions per torrent
+            for y in cinfo.values():      # ... each torrent has an integer value
+                if type(y) not in (IntType,LongType):
+                    raise ValueError      # ... for the number of reported completions for that torrent
+        elif cname == 'allowed':
+            if (type(cinfo) != DictType): # a list of info_hashes and included data
+                raise ValueError
+            if x.has_key('allowed_dir_files'):
+                adlist = [z[1] for z in x['allowed_dir_files'].values()]
+                for y in cinfo.keys():        # and each should have a corresponding key here
+                    if not y in adlist:
+                        raise ValueError
+        elif cname == 'allowed_dir_files':
+            if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
+                raise ValueError
+            dirkeys = {}
+            for y in cinfo.values():      # each entry should have a corresponding info_hash
+                if not y[1]:
+                    continue
+                if not x['allowed'].has_key(y[1]):
+                    raise ValueError
+                if dirkeys.has_key(y[1]): # and each should have a unique info_hash
+                    raise ValueError
+                dirkeys[y[1]] = 1
+            
+
+alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n'
+
+local_IPs = IP_List()
+local_IPs.set_intranet_addresses()
+
+
+def isotime(secs = None):
+    if secs == None:
+        secs = time()
+    return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
+
+http_via_filter = re.compile(' for ([0-9.]+)\Z')
+
+def _get_forwarded_ip(headers):
+    header = headers.get('x-forwarded-for')
+    if header:
+        try:
+            x,y = header.split(',')
+        except:
+            return header
+        if is_valid_ip(x) and not local_IPs.includes(x):
+            return x
+        return y
+    header = headers.get('client-ip')
+    if header:
+        return header
+    header = headers.get('via')
+    if header:
+        x = http_via_filter.search(header)
+        try:
+            return x.group(1)
+        except:
+            pass
+    header = headers.get('from')
+    #if header:
+    #    return header
+    #return None
+    return header
+
+def get_forwarded_ip(headers):
+    x = _get_forwarded_ip(headers)
+    if not is_valid_ip(x) or local_IPs.includes(x):
+        return None
+    return x
+
+def compact_peer_info(ip, port):
+    try:
+        s = ( ''.join([chr(int(i)) for i in ip.split('.')])
+              + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
+        if len(s) != 6:
+            raise ValueError
+    except:
+        s = ''  # not a valid IP, must be a domain name
+    return s
+
+class Tracker:
+    def __init__(self, config, rawserver):
+        self.config = config
+        self.response_size = config['response_size']
+        self.dfile = config['dfile']
+        self.natcheck = config['nat_check']
+        favicon = config['favicon']
+        self.parse_dir_interval = config['parse_dir_interval']
+        self.favicon = None
+        if favicon:
+            try:
+                h = open(favicon,'r')
+                self.favicon = h.read()
+                h.close()
+            except:
+                print "**warning** specified favicon file -- %s -- does not exist." % favicon
+        self.rawserver = rawserver
+        self.cached = {}    # format: infohash: [[time1, l1, s1], [time2, l2, s2], ...]
+        self.cached_t = {}  # format: infohash: [time, cache]
+        self.times = {}
+        self.state = {}
+        self.seedcount = {}
+
+        self.allowed_IPs = None
+        self.banned_IPs = None
+        if config['allowed_ips'] or config['banned_ips']:
+            self.allowed_ip_mtime = 0
+            self.banned_ip_mtime = 0
+            self.read_ip_lists()
+                
+        self.only_local_override_ip = config['only_local_override_ip']
+        if self.only_local_override_ip == 2:
+            self.only_local_override_ip = not config['nat_check']
+
+        if CHECK_PEER_ID_ENCRYPTED and not CRYPTO_OK:
+            print ('**warning** crypto library not installed,' +
+                   ' cannot completely verify encrypted peers')
+
+        if exists(self.dfile):
+            try:
+                h = open(self.dfile, 'rb')
+                ds = h.read()
+                h.close()
+                tempstate = bdecode(ds)
+                if not tempstate.has_key('peers'):
+                    tempstate = {'peers': tempstate}
+                statefiletemplate(tempstate)
+                self.state = tempstate
+            except:
+                print '**warning** statefile '+self.dfile+' corrupt; resetting'
+        self.downloads = self.state.setdefault('peers', {})
+        self.completed = self.state.setdefault('completed', {})
+
+        self.becache = {}
+        ''' format: infohash: [[l0, s0], [l1, s1], ...]
+                l0,s0 = compact, not requirecrypto=1
+                l1,s1 = compact, only supportcrypto=1
+                l2,s2 = [compact, crypto_flag], all peers
+            if --compact_reqd 0:
+                l3,s3 = [ip,port,id]
+                l4,l4 = [ip,port] nopeerid
+        '''
+        if config['compact_reqd']:
+            x = 3
+        else:
+            x = 5
+        self.cache_default = [({},{}) for i in xrange(x)]
+        for infohash, ds in self.downloads.items():
+            self.seedcount[infohash] = 0
+            for x,y in ds.items():
+                ip = y['ip']
+                if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
+                     or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
+                    del ds[x]
+                    continue
+                if not y['left']:
+                    self.seedcount[infohash] += 1
+                if y.get('nat',-1):
+                    continue
+                gip = y.get('given_ip')
+                if is_valid_ip(gip) and (
+                    not self.only_local_override_ip or local_IPs.includes(ip) ):
+                    ip = gip
+                self.natcheckOK(infohash,x,ip,y['port'],y)
+            
+        for x in self.downloads.keys():
+            self.times[x] = {}
+            for y in self.downloads[x].keys():
+                self.times[x][y] = 0
+
+        self.trackerid = createPeerID('-T-')
+        seed(self.trackerid)
+                
+        self.reannounce_interval = config['reannounce_interval']
+        self.save_dfile_interval = config['save_dfile_interval']
+        self.show_names = config['show_names']
+        rawserver.add_task(self.save_state, self.save_dfile_interval)
+        self.prevtime = clock()
+        self.timeout_downloaders_interval = config['timeout_downloaders_interval']
+        rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
+        self.logfile = None
+        self.log = None
+        if (config['logfile']) and (config['logfile'] != '-'):
+            try:
+                self.logfile = config['logfile']
+                self.log = open(self.logfile,'a')
+                sys.stdout = self.log
+                print "# Log Started: ", isotime()
+            except:
+                print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0]
+
+        if config['hupmonitor']:
+            def huphandler(signum, frame, self = self):
+                try:
+                    self.log.close ()
+                    self.log = open(self.logfile,'a')
+                    sys.stdout = self.log
+                    print "# Log reopened: ", isotime()
+                except:
+                    print "**warning** could not reopen logfile"
+             
+            signal.signal(signal.SIGHUP, huphandler)            
+                
+        self.allow_get = config['allow_get']
+        
+        self.t2tlist = T2TList(config['multitracker_enabled'], self.trackerid,
+                               config['multitracker_reannounce_interval'],
+                               config['multitracker_maxpeers'], config['http_timeout'],
+                               self.rawserver)
+
+        if config['allowed_list']:
+            if config['allowed_dir']:
+                print '**warning** allowed_dir and allowed_list options cannot be used together'
+                print '**warning** disregarding allowed_dir'
+                config['allowed_dir'] = ''
+            self.allowed = self.state.setdefault('allowed_list',{})
+            self.allowed_list_mtime = 0
+            self.parse_allowed()
+            self.remove_from_state('allowed','allowed_dir_files')
+            if config['multitracker_allowed'] == 'autodetect':
+                config['multitracker_allowed'] = 'none'
+            config['allowed_controls'] = 0
+
+        elif config['allowed_dir']:
+            self.allowed = self.state.setdefault('allowed',{})
+            self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{})
+            self.allowed_dir_blocked = {}
+            self.parse_allowed()
+            self.remove_from_state('allowed_list')
+
+        else:
+            self.allowed = None
+            self.remove_from_state('allowed','allowed_dir_files', 'allowed_list')
+            if config['multitracker_allowed'] == 'autodetect':
+                config['multitracker_allowed'] = 'none'
+            config['allowed_controls'] = 0
+                
+        self.uq_broken = unquote('+') != ' '
+        self.keep_dead = config['keep_dead']
+        self.Filter = Filter(rawserver.add_task)
+        
+        aggregator = config['aggregator']
+        if aggregator == '0':
+            self.is_aggregator = False
+            self.aggregator_key = None
+        else:
+            self.is_aggregator = True
+            if aggregator == '1':
+                self.aggregator_key = None
+            else:
+                self.aggregator_key = aggregator
+            self.natcheck = False
+                
+        send = config['aggregate_forward']
+        if not send:
+            self.aggregate_forward = None
+        else:
+            try:
+                self.aggregate_forward, self.aggregate_password = send.split(',')
+            except:
+                self.aggregate_forward = send
+                self.aggregate_password = None
+
+        self.dedicated_seed_id = config['dedicated_seed_id']
+        self.is_seeded = {}
+
+        self.cachetime = 0
+        self.cachetimeupdate()
+
+    def cachetimeupdate(self):
+        self.cachetime += 1     # raw clock, but more efficient for cache
+        self.rawserver.add_task(self.cachetimeupdate,1)
+
+    def aggregate_senddata(self, query):
+        url = self.aggregate_forward+'?'+query
+        if self.aggregate_password is not None:
+            url += '&password='+self.aggregate_password
+        rq = Thread(target = self._aggregate_senddata, args = [url])
+        rq.setDaemon(False)
+        rq.start()
+
+    def _aggregate_senddata(self, url):     # just send, don't attempt to error check,
+        try:                                # discard any returned data
+            h = urlopen(url)
+            h.read()
+            h.close()
+        except:
+            return
+
+
+    def get_infopage(self):
+        try:
+            if not self.config['show_infopage']:
+                return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+            red = self.config['infopage_redirect']
+            if red:
+                return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
+                        '<A HREF="'+red+'">Click Here</A>')
+            
+            s = StringIO()
+            s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
+                '<html><head><title>BitTorrent download info</title>\n')
+            if self.favicon is not None:
+                s.write('<link rel="shortcut icon" href="/favicon.ico">\n')
+            s.write('</head>\n<body>\n' \
+                '<h3>BitTorrent download info</h3>\n'\
+                '<ul>\n'
+                '<li><strong>tracker version:</strong> %s</li>\n' \
+                '<li><strong>server time:</strong> %s</li>\n' \
+                '</ul>\n' % (version, isotime()))
+            if self.config['allowed_dir']:
+                if self.show_names:
+                    names = [ (self.allowed[hash]['name'],hash)
+                              for hash in self.allowed.keys() ]
+                else:
+                    names = [ (None,hash)
+                              for hash in self.allowed.keys() ]
+            else:
+                names = [ (None,hash) for hash in self.downloads.keys() ]
+            if not names:
+                s.write('<p>not tracking any files yet...</p>\n')
+            else:
+                names.sort()
+                tn = 0
+                tc = 0
+                td = 0
+                tt = 0  # Total transferred
+                ts = 0  # Total size
+                nf = 0  # Number of files displayed
+                if self.config['allowed_dir'] and self.show_names:
+                    s.write('<table summary="files" border="1">\n' \
+                        '<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
+                else:
+                    s.write('<table summary="files">\n' \
+                        '<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
+                for name,hash in names:
+                    l = self.downloads[hash]
+                    n = self.completed.get(hash, 0)
+                    tn = tn + n
+                    c = self.seedcount[hash]
+                    tc = tc + c
+                    d = len(l) - c
+                    td = td + d
+                    if self.config['allowed_dir'] and self.show_names:
+                        if self.allowed.has_key(hash):
+                            nf = nf + 1
+                            sz = self.allowed[hash]['length']  # size
+                            ts = ts + sz
+                            szt = sz * n   # Transferred for this torrent
+                            tt = tt + szt
+                            if self.allow_get == 1:
+                                linkname = '<a href="/file?info_hash=' + quote(hash) + '">' + name + '</a>'
+                            else:
+                                linkname = name
+                            s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
+                                % (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt)))
+                    else:
+                        s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
+                            % (b2a_hex(hash), c, d, n))
+                if self.config['allowed_dir'] and self.show_names:
+                    s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n'
+                            % (nf, size_format(ts), tc, td, tn, size_format(tt)))
+                else:
+                    s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td></tr>\n'
+                            % (nf, tc, td, tn))
+                s.write('</table>\n' \
+                    '<ul>\n' \
+                    '<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.torrent)</li>\n' \
+                    '<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
+                    '<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
+                    '<li><em>downloaded:</em> reported complete downloads</li>\n' \
+                    '<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
+                    '</ul>\n')
+
+            s.write('</body>\n' \
+                '</html>\n')
+            return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
+        except:
+            print_exc()
+            return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')
+
+
+    def scrapedata(self, hash, return_name = True):
+        l = self.downloads[hash]
+        n = self.completed.get(hash, 0)
+        c = self.seedcount[hash]
+        d = len(l) - c
+        f = {'complete': c, 'incomplete': d, 'downloaded': n}
+        if return_name and self.show_names and self.config['allowed_dir']:
+            f['name'] = self.allowed[hash]['name']
+        return (f)
+
+    def get_scrape(self, paramslist):
+        fs = {}
+        if paramslist.has_key('info_hash'):
+            if self.config['scrape_allowed'] not in ['specific', 'full']:
+                return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason':
+                    'specific scrape function is not available with this tracker.'}))
+            for hash in paramslist['info_hash']:
+                if self.allowed is not None:
+                    if self.allowed.has_key(hash):
+                        fs[hash] = self.scrapedata(hash)
+                else:
+                    if self.downloads.has_key(hash):
+                        fs[hash] = self.scrapedata(hash)
+        else:
+            if self.config['scrape_allowed'] != 'full':
+                return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason':
+                    'full scrape function is not available with this tracker.'}))
+            if self.allowed is not None:
+                keys = self.allowed.keys()
+            else:
+                keys = self.downloads.keys()
+            for hash in keys:
+                fs[hash] = self.scrapedata(hash)
+
+        return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
+
+
+    def get_file(self, hash):
+         if not self.allow_get:
+             return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                 'get function is not available with this tracker.')
+         if not self.allowed.has_key(hash):
+             return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+         fname = self.allowed[hash]['file']
+         fpath = self.allowed[hash]['path']
+         return (200, 'OK', {'Content-Type': 'application/x-bittorrent',
+             'Content-Disposition': 'attachment; filename=' + fname},
+             open(fpath, 'rb').read())
+
+
+    def check_allowed(self, infohash, paramslist):
+        if ( self.aggregator_key is not None
+                and not ( paramslist.has_key('password')
+                        and paramslist['password'][0] == self.aggregator_key ) ):
+            return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                bencode({'failure reason':
+                'Requested download is not authorized for use with this tracker.'}))
+
+        if self.allowed is not None:
+            if not self.allowed.has_key(infohash):
+                return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason':
+                    'Requested download is not authorized for use with this tracker.'}))
+            if self.config['allowed_controls']:
+                if self.allowed[infohash].has_key('failure reason'):
+                    return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                        bencode({'failure reason': self.allowed[infohash]['failure reason']}))
+
+        if paramslist.has_key('tracker'):
+            if ( self.config['multitracker_allowed'] == 'none' or       # turned off
+                          paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself
+                return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason': 'disallowed'}))
+            
+            if ( self.config['multitracker_allowed'] == 'autodetect'
+                        and not self.allowed[infohash].has_key('announce-list') ):
+                return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason':
+                    'Requested download is not authorized for multitracker use.'}))
+
+        return None
+
+
+    def add_data(self, infohash, event, ip, paramslist):
+        peers = self.downloads.setdefault(infohash, {})
+        ts = self.times.setdefault(infohash, {})
+        self.completed.setdefault(infohash, 0)
+        self.seedcount.setdefault(infohash, 0)
+
+        def params(key, default = None, l = paramslist):
+            if l.has_key(key):
+                return l[key][0]
+            return default
+        
+        myid = params('peer_id','')
+        if len(myid) != 20:
+            raise ValueError, 'id not of length 20'
+        if event not in ['started', 'completed', 'stopped', 'snooped', None]:
+            raise ValueError, 'invalid event'
+        port = params('cryptoport')
+        if port is None:
+            port = params('port','')
+        port = long(port)
+        if port < 0 or port > 65535:
+            raise ValueError, 'invalid port'
+        left = long(params('left',''))
+        if left < 0:
+            raise ValueError, 'invalid amount left'
+        uploaded = long(params('uploaded',''))
+        downloaded = long(params('downloaded',''))
+        if params('supportcrypto'):
+            supportcrypto = 1
+            try:
+                s = int(params['requirecrypto'])
+                chr(s)
+            except:
+                s = 0
+            requirecrypto = s
+        else:
+            supportcrypto = 0
+            requirecrypto = 0
+
+        peer = peers.get(myid)
+        islocal = local_IPs.includes(ip)
+        mykey = params('key')
+        if peer:
+            auth = peer.get('key',-1) == mykey or peer.get('ip') == ip
+
+        gip = params('ip')
+        if is_valid_ip(gip) and (islocal or not self.only_local_override_ip):
+            ip1 = gip
+        else:
+            ip1 = ip
+
+        if params('numwant') is not None:
+            rsize = min(int(params('numwant')),self.response_size)
+        else:
+            rsize = self.response_size
+
+        if event == 'stopped':
+            if peer:
+                if auth:
+                    self.delete_peer(infohash,myid)
+        
+        elif not peer:
+            ts[myid] = clock()
+            peer = { 'ip': ip, 'port': port, 'left': left,
+                     'supportcrypto': supportcrypto,
+                     'requirecrypto': requirecrypto }
+            if mykey:
+                peer['key'] = mykey
+            if gip:
+                peer['given ip'] = gip
+            if port:
+                if not self.natcheck or islocal:
+                    peer['nat'] = 0
+                    self.natcheckOK(infohash,myid,ip1,port,peer)
+                else:
+                    NatCheck(self.connectback_result,infohash,myid,ip1,port,
+                             self.rawserver,encrypted=requirecrypto)
+            else:
+                peer['nat'] = 2**30
+            if event == 'completed':
+                self.completed[infohash] += 1
+            if not left:
+                self.seedcount[infohash] += 1
+                
+            peers[myid] = peer
+
+        else:
+            if not auth:
+                return rsize    # return w/o changing stats
+
+            ts[myid] = clock()
+            if not left and peer['left']:
+                self.completed[infohash] += 1
+                self.seedcount[infohash] += 1
+                if not peer.get('nat', -1):
+                    for bc in self.becache[infohash]:
+                        bc[1][myid] = bc[0][myid]
+                        del bc[0][myid]
+            elif left and not peer['left']:
+                self.completed[infohash] -= 1
+                self.seedcount[infohash] -= 1
+                if not peer.get('nat', -1):
+                    for bc in self.becache[infohash]:
+                        bc[0][myid] = bc[1][myid]
+                        del bc[1][myid]
+            peer['left'] = left
+
+            if port:
+                recheck = False
+                if ip != peer['ip']:
+                    peer['ip'] = ip
+                    recheck = True
+                if gip != peer.get('given ip'):
+                    if gip:
+                        peer['given ip'] = gip
+                    elif peer.has_key('given ip'):
+                        del peer['given ip']
+                    recheck = True
+
+                natted = peer.get('nat', -1)
+                if recheck:
+                    if natted == 0:
+                        l = self.becache[infohash]
+                        y = not peer['left']
+                        for x in l:
+                            del x[y][myid]
+                    if natted >= 0:
+                        del peer['nat'] # restart NAT testing
+                if natted and natted < self.natcheck:
+                    recheck = True
+
+                if recheck:
+                    if not self.natcheck or islocal:
+                        peer['nat'] = 0
+                        self.natcheckOK(infohash,myid,ip1,port,peer)
+                    else:
+                        NatCheck(self.connectback_result,infohash,myid,ip1,port,
+                                 self.rawserver,encrypted=requirecrypto)
+
+        return rsize
+
+
+    def peerlist(self, infohash, stopped, tracker, is_seed,
+                 return_type, rsize, supportcrypto):
+        data = {}    # return data
+        seeds = self.seedcount[infohash]
+        data['complete'] = seeds
+        data['incomplete'] = len(self.downloads[infohash]) - seeds
+        
+        if ( self.config['allowed_controls']
+                and self.allowed[infohash].has_key('warning message') ):
+            data['warning message'] = self.allowed[infohash]['warning message']
+
+        if tracker:
+            data['interval'] = self.config['multitracker_reannounce_interval']
+            if not rsize:
+                return data
+            cache = self.cached_t.setdefault(infohash, None)
+            if ( not cache or len(cache[1]) < rsize
+                 or cache[0] + self.config['min_time_between_cache_refreshes'] < clock() ):
+                bc = self.becache.setdefault(infohash,self.cache_default)
+                cache = [ clock(), bc[0][0].values() + bc[0][1].values() ]
+                self.cached_t[infohash] = cache
+                shuffle(cache[1])
+                cache = cache[1]
+
+            data['peers'] = cache[-rsize:]
+            del cache[-rsize:]
+            return data
+
+        data['interval'] = self.reannounce_interval
+        if stopped or not rsize:     # save some bandwidth
+            data['peers'] = []
+            return data
+
+        bc = self.becache.setdefault(infohash,self.cache_default)
+        len_l = len(bc[2][0])
+        len_s = len(bc[2][1])
+        if not (len_l+len_s):   # caches are empty!
+            data['peers'] = []
+            return data
+        l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
+        cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
+        if cache and ( not cache[1]
+                       or (is_seed and len(cache[1]) < rsize)
+                       or len(cache[1]) < l_get_size
+                       or cache[0]+self.config['min_time_between_cache_refreshes'] < self.cachetime ):
+            cache = None
+        if not cache:
+            peers = self.downloads[infohash]
+            if self.config['compact_reqd']:
+                vv = ([],[],[])
+            else:
+                vv = ([],[],[],[],[])
+            for key, ip, port in self.t2tlist.harvest(infohash):   # empty if disabled
+                if not peers.has_key(key):
+                    cp = compact_peer_info(ip, port)
+                    vv[0].append(cp)
+                    vv[2].append((cp,'\x00'))
+                    if not self.config['compact_reqd']:
+                        vv[3].append({'ip': ip, 'port': port, 'peer id': key})
+                        vv[4].append({'ip': ip, 'port': port})
+            cache = [ self.cachetime,
+                      bc[return_type][0].values()+vv[return_type],
+                      bc[return_type][1].values() ]
+            shuffle(cache[1])
+            shuffle(cache[2])
+            self.cached[infohash][return_type] = cache
+            for rr in xrange(len(self.cached[infohash])):
+                if rr != return_type:
+                    try:
+                        self.cached[infohash][rr][1].extend(vv[rr])
+                    except:
+                        pass
+        if len(cache[1]) < l_get_size:
+            peerdata = cache[1]
+            if not is_seed:
+                peerdata.extend(cache[2])
+            cache[1] = []
+            cache[2] = []
+        else:
+            if not is_seed:
+                peerdata = cache[2][l_get_size-rsize:]
+                del cache[2][l_get_size-rsize:]
+                rsize -= len(peerdata)
+            else:
+                peerdata = []
+            if rsize:
+                peerdata.extend(cache[1][-rsize:])
+                del cache[1][-rsize:]
+        if return_type == 0:
+            data['peers'] = ''.join(peerdata)
+        elif return_type == 1:
+            data['crypto_flags'] = "0x01"*len(peerdata)
+            data['peers'] = ''.join(peerdata)
+        elif return_type == 2:
+            data['crypto_flags'] = ''.join([p[1] for p in peerdata])
+            data['peers'] = ''.join([p[0] for p in peerdata])
+        else:
+            data['peers'] = peerdata
+        return data
+
+
+    def get(self, connection, path, headers):
+        real_ip = connection.get_ip()
+        ip = real_ip
+        if is_ipv4(ip):
+            ipv4 = True
+        else:
+            try:
+                ip = ipv6_to_ipv4(ip)
+                ipv4 = True
+            except ValueError:
+                ipv4 = False
+
+        if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
+             or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
+            return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                bencode({'failure reason':
+                'your IP is not allowed on this tracker'}))
+
+        nip = get_forwarded_ip(headers)
+        if nip and not self.only_local_override_ip:
+            ip = nip
+            try:
+                ip = to_ipv4(ip)
+                ipv4 = True
+            except ValueError:
+                ipv4 = False
+
+        paramslist = {}
+        def params(key, default = None, l = paramslist):
+            if l.has_key(key):
+                return l[key][0]
+            return default
+
+        try:
+            (scheme, netloc, path, pars, query, fragment) = urlparse(path)
+            if self.uq_broken == 1:
+                path = path.replace('+',' ')
+                query = query.replace('+',' ')
+            path = unquote(path)[1:]
+            for s in query.split('&'):
+                if s:
+                    i = s.index('=')
+                    kw = unquote(s[:i])
+                    paramslist.setdefault(kw, [])
+                    paramslist[kw] += [unquote(s[i+1:])]
+                    
+            if path == '' or path == 'index.html':
+                return self.get_infopage()
+            if (path == 'file'):
+                return self.get_file(params('info_hash'))
+            if path == 'favicon.ico' and self.favicon is not None:
+                return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
+
+            # automated access from here on
+
+            if path in ('scrape', 'scrape.php', 'tracker.php/scrape'):
+                return self.get_scrape(paramslist)
+            
+            if not path in ('announce', 'announce.php', 'tracker.php/announce'):
+                return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+
+            # main tracker function
+
+            filtered = self.Filter.check(real_ip, paramslist, headers)
+            if filtered:
+                return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'failure reason': filtered}))
+            
+            infohash = params('info_hash')
+            if not infohash:
+                raise ValueError, 'no info hash'
+
+            notallowed = self.check_allowed(infohash, paramslist)
+            if notallowed:
+                return notallowed
+
+            event = params('event')
+
+            rsize = self.add_data(infohash, event, ip, paramslist)
+
+        except ValueError, e:
+            return (400, 'Bad Request', {'Content-Type': 'text/plain'}, 
+                'you sent me garbage - ' + str(e))
+
+        if self.aggregate_forward and not paramslist.has_key('tracker'):
+            self.aggregate_senddata(query)
+
+        if self.is_aggregator:      # don't return peer data here
+            return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                    bencode({'response': 'OK'}))
+
+        if params('compact') and ipv4:
+            if params('requirecrypto'):
+                return_type = 1
+            elif params('supportcrypto'):
+                return_type = 2
+            else:
+                return_type = 0
+        elif self.config['compact_reqd'] and ipv4:
+            return (400, 'Bad Request', {'Content-Type': 'text/plain'}, 
+                'your client is outdated, please upgrade')
+        elif params('no_peer_id'):
+            return_type = 4
+        else:
+            return_type = 3
+            
+        data = self.peerlist(infohash, event=='stopped',
+                             params('tracker'), not params('left'),
+                             return_type, rsize, params('supportcrypto'))
+
+        if paramslist.has_key('scrape'):    # deprecated
+            data['scrape'] = self.scrapedata(infohash, False)
+
+        if self.dedicated_seed_id:
+            if params('seed_id') == self.dedicated_seed_id and params('left') == 0:
+                self.is_seeded[infohash] = True
+            if params('check_seeded') and self.is_seeded.get(infohash):
+                data['seeded'] = 1
+            
+        return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data))
+
+
+    def natcheckOK(self, infohash, peerid, ip, port, peer):
+        seed = not peer['left']
+        bc = self.becache.setdefault(infohash,self.cache_default)
+        cp = compact_peer_info(ip, port)
+        reqc = peer['requirecrypto']
+        bc[2][seed][peerid] = (cp,chr(reqc))
+        if peer['supportcrypto']:
+            bc[1][seed][peerid] = cp
+        if not reqc:
+            bc[0][seed][peerid] = cp
+            if not self.config['compact_reqd']:
+                bc[3][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
+                                                         'peer id': peerid}))
+                bc[4][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
+
+
+    def natchecklog(self, peerid, ip, port, result):
+        year, month, day, hour, minute, second, a, b, c = localtime(time())
+        print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
+            ip, quote(peerid), day, months[month], year, hour, minute, second,
+            ip, port, result)
+
+    def connectback_result(self, result, downloadid, peerid, ip, port):
+        record = self.downloads.get(downloadid,{}).get(peerid)
+        if ( record is None 
+                 or (record['ip'] != ip and record.get('given ip') != ip)
+                 or record['port'] != port ):
+            if self.config['log_nat_checks']:
+                self.natchecklog(peerid, ip, port, 404)
+            return
+        if self.config['log_nat_checks']:
+            if result:
+                x = 200
+            else:
+                x = 503
+            self.natchecklog(peerid, ip, port, x)
+        if not record.has_key('nat'):
+            record['nat'] = int(not result)
+            if result:
+                self.natcheckOK(downloadid,peerid,ip,port,record)
+        elif result and record['nat']:
+            record['nat'] = 0
+            self.natcheckOK(downloadid,peerid,ip,port,record)
+        elif not result:
+            record['nat'] += 1
+
+
+    def remove_from_state(self, *l):
+        for s in l:
+            try:
+                del self.state[s]
+            except:
+                pass
+
+    def save_state(self):
+        self.rawserver.add_task(self.save_state, self.save_dfile_interval)
+        h = open(self.dfile, 'wb')
+        h.write(bencode(self.state))
+        h.close()
+
+
+    def parse_allowed(self):
+        self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval)
+
+        if self.config['allowed_dir']:
+            r = parsedir( self.config['allowed_dir'], self.allowed,
+                          self.allowed_dir_files, self.allowed_dir_blocked,
+                          [".torrent"] )
+            ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
+                added, garbage2 ) = r
+            
+            self.state['allowed'] = self.allowed
+            self.state['allowed_dir_files'] = self.allowed_dir_files
+
+            self.t2tlist.parse(self.allowed)
+            
+        else:
+            f = self.config['allowed_list']
+            if self.allowed_list_mtime == os.path.getmtime(f):
+                return
+            try:
+                r = parsetorrentlist(f, self.allowed)
+                (self.allowed, added, garbage2) = r
+                self.state['allowed_list'] = self.allowed
+            except (IOError, OSError):
+                print '**warning** unable to read allowed torrent list'
+                return
+            self.allowed_list_mtime = os.path.getmtime(f)
+
+        for infohash in added.keys():
+            self.downloads.setdefault(infohash, {})
+            self.completed.setdefault(infohash, 0)
+            self.seedcount.setdefault(infohash, 0)
+
+
+    def read_ip_lists(self):
+        self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval)
+            
+        f = self.config['allowed_ips']
+        if f and self.allowed_ip_mtime != os.path.getmtime(f):
+            self.allowed_IPs = IP_List()
+            try:
+                self.allowed_IPs.read_fieldlist(f)
+                self.allowed_ip_mtime = os.path.getmtime(f)
+            except (IOError, OSError):
+                print '**warning** unable to read allowed_IP list'
+                
+        f = self.config['banned_ips']
+        if f and self.banned_ip_mtime != os.path.getmtime(f):
+            self.banned_IPs = IP_Range_List()
+            try:
+                self.banned_IPs.read_rangelist(f)
+                self.banned_ip_mtime = os.path.getmtime(f)
+            except (IOError, OSError):
+                print '**warning** unable to read banned_IP list'
+                
+
+    def delete_peer(self, infohash, peerid):
+        dls = self.downloads[infohash]
+        peer = dls[peerid]
+        if not peer['left']:
+            self.seedcount[infohash] -= 1
+        if not peer.get('nat',-1):
+            l = self.becache[infohash]
+            y = not peer['left']
+            for x in l:
+                if x[y].has_key(peerid):
+                    del x[y][peerid]
+        del self.times[infohash][peerid]
+        del dls[peerid]
+
+    def expire_downloaders(self):
+        for x in self.times.keys():
+            for myid, t in self.times[x].items():
+                if t < self.prevtime:
+                    self.delete_peer(x,myid)
+        self.prevtime = clock()
+        if (self.keep_dead != 1):
+            for key, value in self.downloads.items():
+                if len(value) == 0 and (
+                        self.allowed is None or not self.allowed.has_key(key) ):
+                    del self.times[key]
+                    del self.downloads[key]
+                    del self.seedcount[key]
+        self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
+
+
+def track(args):
+    if len(args) == 0:
+        print formatDefinitions(defaults, 80)
+        return
+    try:
+        config, files = parseargs(args, defaults, 0, 0)
+    except ValueError, e:
+        print 'error: ' + str(e)
+        print 'run with no arguments for parameter explanations'
+        return
+    r = RawServer(Event(), config['timeout_check_interval'],
+                  config['socket_timeout'], ipv6_enable = config['ipv6_enabled'])
+    t = Tracker(config, r)
+    r.bind(config['port'], config['bind'],
+           reuse = True, ipv6_socket_style = config['ipv6_binds_v4'])
+    r.listen_forever(HTTPHandler(t.get, config['min_time_between_log_flushes']))
+    t.save_state()
+    print '# Shutting down: ' + isotime()
+
+def size_format(s):
+    if (s < 1024):
+        r = str(s) + 'B'
+    elif (s < 1048576):
+        r = str(int(s/1024)) + 'KiB'
+    elif (s < 1073741824L):
+        r = str(int(s/1048576)) + 'MiB'
+    elif (s < 1099511627776L):
+        r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
+    else:
+        r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
+    return(r)
+

Added: debtorrent/branches/upstream/current/BitTornado/BTcrypto.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/BTcrypto.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/BTcrypto.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/BTcrypto.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,103 @@
+# Written by John Hoffman
+# based on code by Uoti Urpala
+# see LICENSE.txt for license information
+
+from __future__ import generators   # for python 2.2
+from random import randrange,randint,seed
+try:
+    from os import urandom
+except:
+    seed()
+    urandom = lambda x: ''.join([chr(randint(0,255)) for i in xrange(x)])
+from sha import sha
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    
+try:
+    from Crypto.Cipher import ARC4
+    CRYPTO_OK = True
+except:
+    CRYPTO_OK = False
+
+KEY_LENGTH = 160
+DH_PRIME = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A63A36210000000000090563
+PAD_MAX = 200 # less than protocol maximum, and later assumed to be < 256
+DH_BYTES = 96
+
+def bytetonum(x):
+    return long(x.encode('hex'), 16)
+
+def numtobyte(x):
+    x = hex(x).lstrip('0x').rstrip('Ll')
+    x = '0'*(192 - len(x)) + x
+    return x.decode('hex')
+
+class Crypto:
+    def __init__(self, initiator, disable_crypto = False):
+        self.initiator = initiator
+        self.disable_crypto = disable_crypto
+        if not disable_crypto and not CRYPTO_OK:
+            raise NotImplementedError, "attempt to run encryption w/ none installed"
+        self.privkey = bytetonum(urandom(KEY_LENGTH/8))
+        self.pubkey = numtobyte(pow(2, self.privkey, DH_PRIME))
+        self.keylength = DH_BYTES
+        self._VC_pattern = None
+
+    def received_key(self, k):
+        self.S = numtobyte(pow(bytetonum(k), self.privkey, DH_PRIME))
+        self.block3a = sha('req1'+self.S).digest()
+        self.block3bkey = sha('req3'+self.S).digest()
+        self.block3b = None
+
+    def _gen_block3b(self, SKEY):
+        a = sha('req2'+SKEY).digest()
+        return ''.join([ chr(ord(a[i])^ord(self.block3bkey[i]))
+                         for i in xrange(20) ])
+
+    def test_skey(self, s, SKEY):
+        block3b = self._gen_block3b(SKEY)
+        if block3b != s:
+            return False
+        self.block3b = block3b
+        if not self.disable_crypto:
+            self.set_skey(SKEY)
+        return True
+
+    def set_skey(self, SKEY):
+        if not self.block3b:
+            self.block3b = self._gen_block3b(SKEY)
+        crypta = ARC4.new(sha('keyA'+self.S+SKEY).digest())
+        cryptb = ARC4.new(sha('keyB'+self.S+SKEY).digest())
+        if self.initiator:
+            self.encrypt = crypta.encrypt
+            self.decrypt = cryptb.decrypt
+        else:
+            self.encrypt = cryptb.encrypt
+            self.decrypt = crypta.decrypt
+        self.encrypt('x'*1024)  # discard first 1024 bytes
+        self.decrypt('x'*1024)
+
+    def VC_pattern(self):
+        if not self._VC_pattern:
+            self._VC_pattern = self.decrypt('\x00'*8)
+        return self._VC_pattern
+
+
+    def read(self, s):
+        self._read(self.decrypt(s))
+
+    def write(self, s):
+        self._write(self.encrypt(s))
+
+    def setrawaccess(self, _read, _write):
+        self._read = _read
+        self._write = _write
+
+    def padding(self):
+        return urandom(randrange(PAD_MAX-16)+16)
+     
+        

Added: debtorrent/branches/upstream/current/BitTornado/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/BitTornado/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,31 @@
+/.cvsignore/1.1/Tue Feb 24 17:53:47 2004//
+/BTcrypto.py/1.8/Fri Dec 22 02:10:49 2006//
+/ConfigDir.py/1.23/Sat Jan 22 17:39:26 2005//
+/ConfigReader.py/1.30/Sat Dec 23 04:53:58 2006//
+/ConnChoice.py/1.2/Sun Jul 11 02:15:37 2004//
+/CreateIcons.py/1.3/Tue Nov 30 22:03:22 2004//
+/CurrentRateMeasure.py/1.4/Thu May 13 15:14:58 2004//
+/HTTPHandler.py/1.6/Fri Dec 17 00:28:48 2004//
+/PSYCO.py/1.1/Tue Feb 24 17:53:47 2004//
+/RateLimiter.py/1.12/Mon Jul 12 14:37:18 2004//
+/RateMeasure.py/1.8/Tue Oct 10 23:16:35 2006//
+/RawServer.py/1.30/Mon Dec 11 18:28:37 2006//
+/ServerPortHandler.py/1.20/Thu Dec 21 18:55:42 2006//
+/SocketHandler.py/1.29/Mon Dec 11 18:28:37 2006//
+/__init__.py/1.37/Sat Dec 23 04:35:22 2006//
+/bencode.py/1.13/Fri Dec 31 19:35:35 2004//
+/bitfield.py/1.11/Thu Apr 14 16:35:12 2005//
+/clock.py/1.2/Thu May 13 16:18:05 2004//
+/download_bt1.py/1.75/Sat Dec 23 19:48:23 2006//
+/inifile.py/1.3/Wed Jan  5 20:21:53 2005//
+/iprangeparse.py/1.4/Mon Dec 11 16:48:49 2006//
+/launchmanycore.py/1.36/Mon Dec 11 23:05:17 2006//
+/natpunch.py/1.11/Wed Dec 15 03:56:16 2004//
+/parseargs.py/1.5/Tue May 25 19:00:58 2004//
+/parsedir.py/1.16/Fri Jun 25 17:36:57 2004//
+/piecebuffer.py/1.5/Sat Apr  9 00:39:41 2005//
+/selectpoll.py/1.4/Sat Jul 10 21:54:53 2004//
+/subnetparse.py/1.9/Mon Dec 11 16:48:49 2006//
+/torrentlistparse.py/1.2/Tue Dec 21 22:14:09 2004//
+/zurllib.py/1.11/Sat May 21 23:35:09 2005//
+D

Added: debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,30 @@
+/.cvsignore////*///
+/BTcrypto.py////*///
+/ConfigDir.py////*///
+/ConfigReader.py////*///
+/ConnChoice.py////*///
+/CreateIcons.py////*///
+/CurrentRateMeasure.py////*///
+/HTTPHandler.py////*///
+/PSYCO.py////*///
+/RateLimiter.py////*///
+/RateMeasure.py////*///
+/RawServer.py////*///
+/ServerPortHandler.py////*///
+/SocketHandler.py////*///
+/__init__.py////*///
+/bencode.py////*///
+/bitfield.py////*///
+/clock.py////*///
+/download_bt1.py////*///
+/inifile.py////*///
+/iprangeparse.py////*///
+/launchmanycore.py////*///
+/natpunch.py////*///
+/parseargs.py////*///
+/parsedir.py////*///
+/piecebuffer.py////*///
+/selectpoll.py////*///
+/subnetparse.py////*///
+/torrentlistparse.py////*///
+/zurllib.py////*///

Added: debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Log
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Log?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Log (added)
+++ debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Log Sat Apr 14 18:47:18 2007
@@ -1,0 +1,2 @@
+A D/BT1////
+A D/GUI////

Added: debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/BitTornado/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/BitTornado/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/BitTornado

Added: debtorrent/branches/upstream/current/BitTornado/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/CVS/Root (added)
+++ debtorrent/branches/upstream/current/BitTornado/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/BitTornado/ConfigDir.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/ConfigDir.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/ConfigDir.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/ConfigDir.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,401 @@
+#written by John Hoffman
+
+from inifile import ini_write, ini_read
+from bencode import bencode, bdecode
+from types import IntType, LongType, StringType, FloatType
+from CreateIcons import GetIcons, CreateIcon
+from parseargs import defaultargs
+from __init__ import product_name, version_short
+import sys,os
+from time import time, strftime
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+try:
+    realpath = os.path.realpath
+except:
+    realpath = lambda x:x
+OLDICONPATH = os.path.abspath(os.path.dirname(realpath(sys.argv[0])))
+
+DIRNAME = '.'+product_name
+
+hexchars = '0123456789abcdef'
+hexmap = []
+revmap = {}
+for i in xrange(256):
+    x = hexchars[(i&0xF0)/16]+hexchars[i&0x0F]
+    hexmap.append(x)
+    revmap[x] = chr(i)
+
+def tohex(s):
+    r = []
+    for c in s:
+        r.append(hexmap[ord(c)])
+    return ''.join(r)
+
+def unhex(s):
+    r = [ revmap[s[x:x+2]] for x in xrange(0, len(s), 2) ]
+    return ''.join(r)
+
+def copyfile(oldpath, newpath): # simple file copy, all in RAM
+    try:
+        f = open(oldpath,'rb')
+        r = f.read()
+        success = True
+    except:
+        success = False
+    try:
+        f.close()
+    except:
+        pass
+    if not success:
+        return False
+    try:
+        f = open(newpath,'wb')
+        f.write(r)
+    except:
+        success = False
+    try:
+        f.close()
+    except:
+        pass
+    return success
+
+
+class ConfigDir:
+
+    ###### INITIALIZATION TASKS ######
+
+    def __init__(self, config_type = None):
+        self.config_type = config_type
+        if config_type:
+            config_ext = '.'+config_type
+        else:
+            config_ext = ''
+
+        def check_sysvars(x):
+            y = os.path.expandvars(x)
+            if y != x and os.path.isdir(y):
+                return y
+            return None
+
+        for d in ['${APPDATA}', '${HOME}', '${HOMEPATH}', '${USERPROFILE}']:
+            dir_root = check_sysvars(d)
+            if dir_root:
+                break
+        else:
+            dir_root = os.path.expanduser('~')
+            if not os.path.isdir(dir_root):
+                dir_root = os.path.abspath(os.path.dirname(sys.argv[0]))
+
+        dir_root = os.path.join(dir_root,DIRNAME)
+        self.dir_root = dir_root
+
+        if not os.path.isdir(self.dir_root):
+            os.mkdir(self.dir_root,0700)    # exception if failed
+
+        self.dir_icons = os.path.join(dir_root,'icons')
+        if not os.path.isdir(self.dir_icons):
+            os.mkdir(self.dir_icons)
+        for icon in GetIcons():
+            i = os.path.join(self.dir_icons,icon)
+            if not os.path.exists(i):
+                if not copyfile(os.path.join(OLDICONPATH,icon),i):
+                    CreateIcon(icon,self.dir_icons)
+
+        self.dir_torrentcache = os.path.join(dir_root,'torrentcache')
+        if not os.path.isdir(self.dir_torrentcache):
+            os.mkdir(self.dir_torrentcache)
+
+        self.dir_datacache = os.path.join(dir_root,'datacache')
+        if not os.path.isdir(self.dir_datacache):
+            os.mkdir(self.dir_datacache)
+
+        self.dir_piececache = os.path.join(dir_root,'piececache')
+        if not os.path.isdir(self.dir_piececache):
+            os.mkdir(self.dir_piececache)
+
+        self.configfile = os.path.join(dir_root,'config'+config_ext+'.ini')
+        self.statefile = os.path.join(dir_root,'state'+config_ext)
+
+        self.TorrentDataBuffer = {}
+
+
+    ###### CONFIG HANDLING ######
+
+    def setDefaults(self, defaults, ignore=[]):
+        self.config = defaultargs(defaults)
+        for k in ignore:
+            if self.config.has_key(k):
+                del self.config[k]
+
+    def checkConfig(self):
+        return os.path.exists(self.configfile)
+
+    def loadConfig(self):
+        try:
+            r = ini_read(self.configfile)['']
+        except:
+            return self.config
+        l = self.config.keys()
+        for k,v in r.items():
+            if self.config.has_key(k):
+                t = type(self.config[k])
+                try:
+                    if t == StringType:
+                        self.config[k] = v
+                    elif t == IntType or t == LongType:
+                        self.config[k] = long(v)
+                    elif t == FloatType:
+                        self.config[k] = float(v)
+                    l.remove(k)
+                except:
+                    pass
+        if l: # new default values since last save
+            self.saveConfig()
+        return self.config
+
+    def saveConfig(self, new_config = None):
+        if new_config:
+            for k,v in new_config.items():
+                if self.config.has_key(k):
+                    self.config[k] = v
+        try:
+            ini_write( self.configfile, self.config,
+                       'Generated by '+product_name+'/'+version_short+'\n'
+                       + strftime('%x %X') )
+            return True
+        except:
+            return False
+
+    def getConfig(self):
+        return self.config
+
+
+    ###### STATE HANDLING ######
+
+    def getState(self):
+        try:
+            f = open(self.statefile,'rb')
+            r = f.read()
+        except:
+            r = None
+        try:
+            f.close()
+        except:
+            pass
+        try:
+            r = bdecode(r)
+        except:
+            r = None
+        return r        
+
+    def saveState(self, state):
+        try:
+            f = open(self.statefile,'wb')
+            f.write(bencode(state))
+            success = True
+        except:
+            success = False
+        try:
+            f.close()
+        except:
+            pass
+        return success
+
+
+    ###### TORRENT HANDLING ######
+
+    def getTorrents(self):
+        d = {}
+        for f in os.listdir(self.dir_torrentcache):
+            f = os.path.basename(f)
+            try:
+                f, garbage = f.split('.')
+            except:
+                pass
+            d[unhex(f)] = 1
+        return d.keys()
+
+    def getTorrentVariations(self, t):
+        t = tohex(t)
+        d = []
+        for f in os.listdir(self.dir_torrentcache):
+            f = os.path.basename(f)
+            if f[:len(t)] == t:
+                try:
+                    garbage, ver = f.split('.')
+                except:
+                    ver = '0'
+                d.append(int(ver))
+        d.sort()
+        return d
+
+    def getTorrent(self, t, v = -1):
+        t = tohex(t)
+        if v == -1:
+            v = max(self.getTorrentVariations(t))   # potential exception
+        if v:
+            t += '.'+str(v)
+        try:
+            f = open(os.path.join(self.dir_torrentcache,t),'rb')
+            r = bdecode(f.read())
+        except:
+            r = None
+        try:
+            f.close()
+        except:
+            pass
+        return r
+
+    def writeTorrent(self, data, t, v = -1):
+        t = tohex(t)
+        if v == -1:
+            try:
+                v = max(self.getTorrentVariations(t))+1
+            except:
+                v = 0
+        if v:
+            t += '.'+str(v)
+        try:
+            f = open(os.path.join(self.dir_torrentcache,t),'wb')
+            f.write(bencode(data))
+        except:
+            v = None
+        try:
+            f.close()
+        except:
+            pass
+        return v
+
+
+    ###### TORRENT DATA HANDLING ######
+
+    def getTorrentData(self, t):
+        if self.TorrentDataBuffer.has_key(t):
+            return self.TorrentDataBuffer[t]
+        t = os.path.join(self.dir_datacache,tohex(t))
+        if not os.path.exists(t):
+            return None
+        try:
+            f = open(t,'rb')
+            r = bdecode(f.read())
+        except:
+            r = None
+        try:
+            f.close()
+        except:
+            pass
+        self.TorrentDataBuffer[t] = r
+        return r
+
+    def writeTorrentData(self, t, data):
+        self.TorrentDataBuffer[t] = data
+        try:
+            f = open(os.path.join(self.dir_datacache,tohex(t)),'wb')
+            f.write(bencode(data))
+            success = True
+        except:
+            success = False
+        try:
+            f.close()
+        except:
+            pass
+        if not success:
+            self.deleteTorrentData(t)
+        return success
+
+    def deleteTorrentData(self, t):
+        try:
+            os.remove(os.path.join(self.dir_datacache,tohex(t)))
+        except:
+            pass
+
+    def getPieceDir(self, t):
+        return os.path.join(self.dir_piececache,tohex(t))
+
+
+    ###### EXPIRATION HANDLING ######
+
+    def deleteOldCacheData(self, days, still_active = [], delete_torrents = False):
+        if not days:
+            return
+        exptime = time() - (days*24*3600)
+        names = {}
+        times = {}
+
+        for f in os.listdir(self.dir_torrentcache):
+            p = os.path.join(self.dir_torrentcache,f)
+            f = os.path.basename(f)
+            try:
+                f, garbage = f.split('.')
+            except:
+                pass
+            try:
+                f = unhex(f)
+                assert len(f) == 20
+            except:
+                continue
+            if delete_torrents:
+                names.setdefault(f,[]).append(p)
+            try:
+                t = os.path.getmtime(p)
+            except:
+                t = time()
+            times.setdefault(f,[]).append(t)
+        
+        for f in os.listdir(self.dir_datacache):
+            p = os.path.join(self.dir_datacache,f)
+            try:
+                f = unhex(os.path.basename(f))
+                assert len(f) == 20
+            except:
+                continue
+            names.setdefault(f,[]).append(p)
+            try:
+                t = os.path.getmtime(p)
+            except:
+                t = time()
+            times.setdefault(f,[]).append(t)
+
+        for f in os.listdir(self.dir_piececache):
+            p = os.path.join(self.dir_piececache,f)
+            try:
+                f = unhex(os.path.basename(f))
+                assert len(f) == 20
+            except:
+                continue
+            for f2 in os.listdir(p):
+                p2 = os.path.join(p,f2)
+                names.setdefault(f,[]).append(p2)
+                try:
+                    t = os.path.getmtime(p2)
+                except:
+                    t = time()
+                times.setdefault(f,[]).append(t)
+            names.setdefault(f,[]).append(p)
+
+        for k,v in times.items():
+            if max(v) < exptime and not k in still_active:
+                for f in names[k]:
+                    try:
+                        os.remove(f)
+                    except:
+                        try:
+                            os.removedirs(f)
+                        except:
+                            pass
+
+
+    def deleteOldTorrents(self, days, still_active = []):
+        self.deleteOldCacheData(days, still_active, True)
+
+
+    ###### OTHER ######
+
+    def getIconDir(self):
+        return self.dir_icons

Added: debtorrent/branches/upstream/current/BitTornado/ConfigReader.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/ConfigReader.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/ConfigReader.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/ConfigReader.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1195 @@
+#written by John Hoffman
+
+from ConnChoice import *
+from wxPython.wx import *
+from types import IntType, FloatType, StringType
+from download_bt1 import defaults
+from ConfigDir import ConfigDir
+import sys,os
+import socket
+from parseargs import defaultargs
+from BTcrypto import CRYPTO_OK
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    
+try:
+    wxFULL_REPAINT_ON_RESIZE
+except:
+    wxFULL_REPAINT_ON_RESIZE = 0        # fix for wx pre-2.5
+
+if (sys.platform == 'win32'):
+    _FONT = 9
+else:
+    _FONT = 10
+
+def HexToColor(s):
+    r,g,b = s.split(' ')
+    return wxColour(red=int(r,16), green=int(g,16), blue=int(b,16))
+    
+def hex2(c):
+    h = hex(c)[2:]
+    if len(h) == 1:
+        h = '0'+h
+    return h
+def ColorToHex(c):
+    return hex2(c.Red()) + ' ' + hex2(c.Green()) + ' ' + hex2(c.Blue())
+
+ratesettingslist = []
+for x in connChoices:
+    if not x.has_key('super-seed'):
+        ratesettingslist.append(x['name'])
+
+
+configFileDefaults = [
+    #args only available for the gui client
+    ('win32_taskbar_icon', 1,
+         "whether to iconize do system try or not on win32"),
+    ('gui_stretchwindow', 0,
+         "whether to stretch the download status window to fit the torrent name"),
+    ('gui_displaystats', 1,
+         "whether to display statistics on peers and seeds"),
+    ('gui_displaymiscstats', 1,
+         "whether to display miscellaneous other statistics"),
+    ('gui_ratesettingsdefault', ratesettingslist[0],
+         "the default setting for maximum upload rate and users"),
+    ('gui_ratesettingsmode', 'full',
+         "what rate setting controls to display; options are 'none', 'basic', and 'full'"),
+    ('gui_forcegreenonfirewall', 0,
+         "forces the status icon to be green even if the client seems to be firewalled"),
+    ('gui_default_savedir', '',
+         "default save directory"),
+    ('last_saved', '',       # hidden; not set in config
+         "where the last torrent was saved"),
+    ('gui_font', _FONT,
+         "the font size to use"),
+    ('gui_saveas_ask', -1,
+         "whether to ask where to download to (0 = never, 1 = always, -1 = automatic resume"),
+]
+
+def setwxconfigfiledefaults():
+    CHECKINGCOLOR = ColorToHex(wxSystemSettings_GetColour(wxSYS_COLOUR_3DSHADOW)) 	 
+    DOWNLOADCOLOR = ColorToHex(wxSystemSettings_GetColour(wxSYS_COLOUR_ACTIVECAPTION))
+    
+    configFileDefaults.extend([
+        ('gui_checkingcolor', CHECKINGCOLOR,
+            "progress bar checking color"),
+        ('gui_downloadcolor', DOWNLOADCOLOR,
+            "progress bar downloading color"),
+        ('gui_seedingcolor', '00 FF 00',
+            "progress bar seeding color"),
+    ])
+
+defaultsToIgnore = ['responsefile', 'url', 'priority']
+
+
+class configReader:
+
+    def __init__(self):
+        self.configfile = wxConfig("BitTorrent",style=wxCONFIG_USE_LOCAL_FILE)
+        self.configMenuBox = None
+        self.advancedMenuBox = None
+        self.cryptoMenuBox = None
+        self._configReset = True         # run reset for the first time
+
+        setwxconfigfiledefaults()
+
+        defaults.extend(configFileDefaults)
+        self.defaults = defaultargs(defaults)
+
+        self.configDir = ConfigDir('gui')
+        self.configDir.setDefaults(defaults,defaultsToIgnore)
+        if self.configDir.checkConfig():
+            self.config = self.configDir.loadConfig()
+        else:
+            self.config = self.configDir.getConfig()
+            self.importOldGUIConfig()
+            self.configDir.saveConfig()
+
+        updated = False     # make all config default changes here
+
+        if self.config['gui_ratesettingsdefault'] not in ratesettingslist:
+            self.config['gui_ratesettingsdefault'] = (
+                                self.defaults['gui_ratesettingsdefault'] )
+            updated = True
+        if self.config['ipv6_enabled'] and (
+                        sys.version_info < (2,3) or not socket.has_ipv6 ):
+            self.config['ipv6_enabled'] = 0
+            updated = True
+        for c in ['gui_checkingcolor','gui_downloadcolor','gui_seedingcolor']:
+            try:
+                HexToColor(self.config[c])
+            except:
+                self.config[c] = self.defaults[c]
+                updated = True
+
+        if updated:
+            self.configDir.saveConfig()
+
+        self.configDir.deleteOldCacheData(self.config['expire_cache_data'])
+
+
+    def importOldGUIConfig(self):
+        oldconfig = wxConfig("BitTorrent",style=wxCONFIG_USE_LOCAL_FILE)
+        cont, s, i = oldconfig.GetFirstEntry()
+        if not cont:
+            oldconfig.DeleteAll()
+            return False
+        while cont:     # import old config data
+            if self.config.has_key(s):
+                t = oldconfig.GetEntryType(s)
+                try:
+                    if t == 1:
+                        assert type(self.config[s]) == type('')
+                        self.config[s] = oldconfig.Read(s)
+                    elif t == 2 or t == 3:
+                        assert type(self.config[s]) == type(1)
+                        self.config[s] = int(oldconfig.ReadInt(s))
+                    elif t == 4:
+                        assert type(self.config[s]) == type(1.0)
+                        self.config[s] = oldconfig.ReadFloat(s)
+                except:
+                    pass
+            cont, s, i = oldconfig.GetNextEntry(i)
+
+#        oldconfig.DeleteAll()
+        return True
+
+
+    def resetConfigDefaults(self):
+        for p,v in self.defaults.items():
+            if not p in defaultsToIgnore:
+                self.config[p] = v
+        self.configDir.saveConfig()
+
+    def writeConfigFile(self):
+        self.configDir.saveConfig()
+
+    def WriteLastSaved(self, l):
+        self.config['last_saved'] = l
+        self.configDir.saveConfig()
+
+
+    def getcheckingcolor(self):
+        return HexToColor(self.config['gui_checkingcolor'])
+    def getdownloadcolor(self):
+        return HexToColor(self.config['gui_downloadcolor'])
+    def getseedingcolor(self):
+        return HexToColor(self.config['gui_seedingcolor'])
+
+    def configReset(self):
+        r = self._configReset
+        self._configReset = False
+        return r
+
+    def getConfigDir(self):
+        return self.configDir
+
+    def getIconDir(self):
+        return self.configDir.getIconDir()
+
+    def getTorrentData(self,t):
+        return self.configDir.getTorrentData(t)
+
+    def setColorIcon(self, xxicon, xxiconptr, xxcolor):
+        idata = wxMemoryDC()
+        idata.SelectObject(xxicon)
+        idata.SetBrush(wxBrush(xxcolor,wxSOLID))
+        idata.DrawRectangle(0,0,16,16)
+        idata.SelectObject(wxNullBitmap)
+        xxiconptr.Refresh()
+
+
+    def getColorFromUser(self, parent, colInit):
+        data = wxColourData()
+        if colInit.Ok():
+            data.SetColour(colInit)
+        data.SetCustomColour(0, self.checkingcolor)
+        data.SetCustomColour(1, self.downloadcolor)
+        data.SetCustomColour(2, self.seedingcolor)
+        dlg = wxColourDialog(parent,data)
+        if not dlg.ShowModal():
+            return colInit
+        return dlg.GetColourData().GetColour()
+
+
+    def configMenu(self, parent):
+      self.parent = parent
+      try:
+        self.FONT = self.config['gui_font']
+        self.default_font = wxFont(self.FONT, wxDEFAULT, wxNORMAL, wxNORMAL, False)
+        self.checkingcolor = HexToColor(self.config['gui_checkingcolor'])
+        self.downloadcolor = HexToColor(self.config['gui_downloadcolor'])
+        self.seedingcolor = HexToColor(self.config['gui_seedingcolor'])
+        
+        if (self.configMenuBox is not None):
+            try:
+                self.configMenuBox.Close()
+            except wxPyDeadObjectError, e:
+                self.configMenuBox = None
+
+        self.configMenuBox = wxFrame(None, -1, 'BitTornado Preferences', size = (1,1),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+        if (sys.platform == 'win32'):
+            self.icon = self.parent.icon
+            self.configMenuBox.SetIcon(self.icon)
+
+        panel = wxPanel(self.configMenuBox, -1)
+        self.panel = panel
+
+        def StaticText(text, font = self.FONT, underline = False, color = None, panel = panel):
+            x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+            x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+            if color is not None:
+                x.SetForegroundColour(color)
+            return x
+
+        colsizer = wxFlexGridSizer(cols = 1, vgap = 8)
+
+        self.gui_stretchwindow_checkbox = wxCheckBox(panel, -1, "Stretch window to fit torrent name *")
+        self.gui_stretchwindow_checkbox.SetFont(self.default_font)
+        self.gui_stretchwindow_checkbox.SetValue(self.config['gui_stretchwindow'])
+
+        self.gui_displaystats_checkbox = wxCheckBox(panel, -1, "Display peer and seed statistics")
+        self.gui_displaystats_checkbox.SetFont(self.default_font)
+        self.gui_displaystats_checkbox.SetValue(self.config['gui_displaystats'])
+
+        self.gui_displaymiscstats_checkbox = wxCheckBox(panel, -1, "Display miscellaneous other statistics")
+        self.gui_displaymiscstats_checkbox.SetFont(self.default_font)
+        self.gui_displaymiscstats_checkbox.SetValue(self.config['gui_displaymiscstats'])
+
+        self.buffering_checkbox = wxCheckBox(panel, -1, "Enable read/write buffering *")
+        self.buffering_checkbox.SetFont(self.default_font)
+        self.buffering_checkbox.SetValue(self.config['buffer_reads'])
+
+        self.breakup_checkbox = wxCheckBox(panel, -1, "Break-up seed bitfield to foil ISP manipulation")
+        self.breakup_checkbox.SetFont(self.default_font)
+        self.breakup_checkbox.SetValue(self.config['breakup_seed_bitfield'])
+
+        self.autoflush_checkbox = wxCheckBox(panel, -1, "Flush data to disk every 5 minutes")
+        self.autoflush_checkbox.SetFont(self.default_font)
+        self.autoflush_checkbox.SetValue(self.config['auto_flush'])
+
+        if sys.version_info >= (2,3) and socket.has_ipv6:
+            self.ipv6enabled_checkbox = wxCheckBox(panel, -1, "Initiate and receive connections via IPv6 *")
+            self.ipv6enabled_checkbox.SetFont(self.default_font)
+            self.ipv6enabled_checkbox.SetValue(self.config['ipv6_enabled'])
+
+        self.gui_forcegreenonfirewall_checkbox = wxCheckBox(panel, -1,
+                            "Force icon to display green when firewalled")
+        self.gui_forcegreenonfirewall_checkbox.SetFont(self.default_font)
+        self.gui_forcegreenonfirewall_checkbox.SetValue(self.config['gui_forcegreenonfirewall'])
+
+        cryptoButton = wxButton(panel, -1, 'Encryption/Security Settings...')
+
+        self.minport_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*8, -1))
+        self.minport_data.SetFont(self.default_font)
+        self.minport_data.SetRange(1,65535)
+        self.minport_data.SetValue(self.config['minport'])
+
+        self.maxport_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*8, -1))
+        self.maxport_data.SetFont(self.default_font)
+        self.maxport_data.SetRange(1,65535)
+        self.maxport_data.SetValue(self.config['maxport'])
+        
+        self.randomport_checkbox = wxCheckBox(panel, -1, "randomize")
+        self.randomport_checkbox.SetFont(self.default_font)
+        self.randomport_checkbox.SetValue(self.config['random_port'])
+        
+        self.gui_font_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*5, -1))
+        self.gui_font_data.SetFont(self.default_font)
+        self.gui_font_data.SetRange(8,16)
+        self.gui_font_data.SetValue(self.config['gui_font'])
+
+        self.gui_ratesettingsdefault_data=wxChoice(panel, -1, choices = ratesettingslist)
+        self.gui_ratesettingsdefault_data.SetFont(self.default_font)
+        self.gui_ratesettingsdefault_data.SetStringSelection(self.config['gui_ratesettingsdefault'])
+
+        self.maxdownload_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*7, -1))
+        self.maxdownload_data.SetFont(self.default_font)
+        self.maxdownload_data.SetRange(0,5000)
+        self.maxdownload_data.SetValue(self.config['max_download_rate'])
+
+        self.gui_ratesettingsmode_data=wxRadioBox(panel, -1, 'Rate Settings Mode',
+                 choices = [ 'none', 'basic', 'full' ] )
+        self.gui_ratesettingsmode_data.SetFont(self.default_font)
+        self.gui_ratesettingsmode_data.SetStringSelection(self.config['gui_ratesettingsmode'])
+
+        if (sys.platform == 'win32'):
+            self.win32_taskbar_icon_checkbox = wxCheckBox(panel, -1, "Minimize to system tray")
+            self.win32_taskbar_icon_checkbox.SetFont(self.default_font)
+            self.win32_taskbar_icon_checkbox.SetValue(self.config['win32_taskbar_icon'])
+            
+            self.upnp_data=wxChoice(panel, -1,
+                        choices = ['disabled', 'type 1 (fast)', 'type 2 (slow)'])
+            self.upnp_data.SetFont(self.default_font)
+            self.upnp_data.SetSelection(self.config['upnp_nat_access'])
+
+        self.gui_default_savedir_ctrl = wxTextCtrl(parent = panel, id = -1, 
+                            value = self.config['gui_default_savedir'],        
+                            size = (26*self.FONT, -1), style = wxTE_PROCESS_TAB)
+        self.gui_default_savedir_ctrl.SetFont(self.default_font)
+
+        self.gui_savemode_data=wxRadioBox(panel, -1, 'Ask where to save: *',
+                 choices = [ 'always', 'never', 'auto-resume' ] )
+        self.gui_savemode_data.SetFont(self.default_font)
+        self.gui_savemode_data.SetSelection(1-self.config['gui_saveas_ask'])
+
+        self.checkingcolor_icon = wxEmptyBitmap(16,16)
+        self.checkingcolor_iconptr = wxStaticBitmap(panel, -1, self.checkingcolor_icon)
+        self.setColorIcon(self.checkingcolor_icon, self.checkingcolor_iconptr, self.checkingcolor)
+
+        self.downloadcolor_icon = wxEmptyBitmap(16,16)
+        self.downloadcolor_iconptr = wxStaticBitmap(panel, -1, self.downloadcolor_icon)
+        self.setColorIcon(self.downloadcolor_icon, self.downloadcolor_iconptr, self.downloadcolor)
+
+        self.seedingcolor_icon = wxEmptyBitmap(16,16)
+        self.seedingcolor_iconptr = wxStaticBitmap(panel, -1, self.seedingcolor_icon)
+        self.setColorIcon(self.seedingcolor_icon, self.downloadcolor_iconptr, self.seedingcolor)
+        
+        rowsizer = wxFlexGridSizer(cols = 2, hgap = 20)
+
+        block12sizer = wxFlexGridSizer(cols = 1, vgap = 12)
+
+        block1sizer = wxFlexGridSizer(cols = 1, vgap = 2)
+        if (sys.platform == 'win32'):
+            block1sizer.Add(self.win32_taskbar_icon_checkbox)
+        block1sizer.Add(self.gui_stretchwindow_checkbox)
+        block1sizer.Add(self.gui_displaystats_checkbox)
+        block1sizer.Add(self.gui_displaymiscstats_checkbox)
+        block1sizer.Add(self.buffering_checkbox)
+        block1sizer.Add(self.breakup_checkbox)
+        block1sizer.Add(self.autoflush_checkbox)
+        if sys.version_info >= (2,3) and socket.has_ipv6:
+            block1sizer.Add(self.ipv6enabled_checkbox)
+        block1sizer.Add(self.gui_forcegreenonfirewall_checkbox)
+        block12sizer.Add(block1sizer)
+        block12sizer.Add(cryptoButton, 0, wxALIGN_CENTER)
+
+        colorsizer = wxStaticBoxSizer(wxStaticBox(panel, -1, "Gauge Colors:"), wxVERTICAL)
+        colorsizer1 = wxFlexGridSizer(cols = 7)
+        colorsizer1.Add(StaticText('           Checking: '), 1, wxALIGN_BOTTOM)
+        colorsizer1.Add(self.checkingcolor_iconptr, 1, wxALIGN_BOTTOM)
+        colorsizer1.Add(StaticText('   Downloading: '), 1, wxALIGN_BOTTOM)
+        colorsizer1.Add(self.downloadcolor_iconptr, 1, wxALIGN_BOTTOM)
+        colorsizer1.Add(StaticText('   Seeding: '), 1, wxALIGN_BOTTOM)
+        colorsizer1.Add(self.seedingcolor_iconptr, 1, wxALIGN_BOTTOM)
+        colorsizer1.Add(StaticText('  '))
+        minsize = self.checkingcolor_iconptr.GetBestSize()
+        minsize.SetHeight(minsize.GetHeight()+5)
+        colorsizer1.SetMinSize(minsize)
+        colorsizer.Add(colorsizer1)
+       
+        block12sizer.Add(colorsizer, 1, wxALIGN_LEFT)
+
+        rowsizer.Add(block12sizer)
+
+        block3sizer = wxFlexGridSizer(cols = 1)
+
+        portsettingsSizer = wxStaticBoxSizer(wxStaticBox(panel, -1, "Port Range:*"), wxVERTICAL)
+        portsettingsSizer1 = wxGridSizer(cols = 2, vgap = 1)
+        portsettingsSizer1.Add(StaticText('From: '), 1, wxALIGN_CENTER_VERTICAL|wxALIGN_RIGHT)
+        portsettingsSizer1.Add(self.minport_data, 1, wxALIGN_BOTTOM)
+        portsettingsSizer1.Add(StaticText('To: '), 1, wxALIGN_CENTER_VERTICAL|wxALIGN_RIGHT)
+        portsettingsSizer1.Add(self.maxport_data, 1, wxALIGN_BOTTOM)
+        portsettingsSizer.Add(portsettingsSizer1)
+        portsettingsSizer.Add(self.randomport_checkbox, 1, wxALIGN_CENTER)
+        block3sizer.Add(portsettingsSizer, 1, wxALIGN_CENTER)
+        block3sizer.Add(StaticText(' '))
+        block3sizer.Add(self.gui_ratesettingsmode_data, 1, wxALIGN_CENTER)
+        block3sizer.Add(StaticText(' '))
+        ratesettingsSizer = wxFlexGridSizer(cols = 1, vgap = 2)
+        ratesettingsSizer.Add(StaticText('Default Rate Setting: *'), 1, wxALIGN_CENTER)
+        ratesettingsSizer.Add(self.gui_ratesettingsdefault_data, 1, wxALIGN_CENTER)
+        block3sizer.Add(ratesettingsSizer, 1, wxALIGN_CENTER)
+        if (sys.platform == 'win32'):
+            block3sizer.Add(StaticText(' '))
+            upnpSizer = wxFlexGridSizer(cols = 1, vgap = 2)
+            upnpSizer.Add(StaticText('UPnP Port Forwarding: *'), 1, wxALIGN_CENTER)
+            upnpSizer.Add(self.upnp_data, 1, wxALIGN_CENTER)
+            block3sizer.Add(upnpSizer, 1, wxALIGN_CENTER)
+        
+        rowsizer.Add(block3sizer)
+        colsizer.Add(rowsizer)
+
+        block4sizer = wxFlexGridSizer(cols = 3, hgap = 15)        
+        savepathsizer = wxFlexGridSizer(cols = 2, vgap = 1)
+        savepathsizer.Add(StaticText('Default Save Path: *'))
+        savepathsizer.Add(StaticText(' '))
+        savepathsizer.Add(self.gui_default_savedir_ctrl, 1, wxEXPAND)
+        savepathButton = wxButton(panel, -1, '...', size = (18,18))
+#        savepathButton.SetFont(self.default_font)
+        savepathsizer.Add(savepathButton, 0, wxALIGN_CENTER)
+        savepathsizer.Add(self.gui_savemode_data, 0, wxALIGN_CENTER)
+        block4sizer.Add(savepathsizer, -1, wxALIGN_BOTTOM)
+
+        fontsizer = wxFlexGridSizer(cols = 1, vgap = 2)
+        fontsizer.Add(StaticText(''))
+        fontsizer.Add(StaticText('Font: *'), 1, wxALIGN_CENTER)
+        fontsizer.Add(self.gui_font_data, 1, wxALIGN_CENTER)
+        block4sizer.Add(fontsizer, 1, wxALIGN_CENTER_VERTICAL)
+
+        dratesettingsSizer = wxFlexGridSizer(cols = 1, vgap = 2)
+        dratesettingsSizer.Add(StaticText('Default Max'), 1, wxALIGN_CENTER)
+        dratesettingsSizer.Add(StaticText('Download Rate'), 1, wxALIGN_CENTER)
+        dratesettingsSizer.Add(StaticText('(kB/s): *'), 1, wxALIGN_CENTER)
+        dratesettingsSizer.Add(self.maxdownload_data, 1, wxALIGN_CENTER)
+        dratesettingsSizer.Add(StaticText('(0 = disabled)'), 1, wxALIGN_CENTER)
+        
+        block4sizer.Add(dratesettingsSizer, 1, wxALIGN_CENTER_VERTICAL)
+
+        colsizer.Add(block4sizer, 0, wxALIGN_CENTER)
+
+        savesizer = wxGridSizer(cols = 4, hgap = 10)
+        saveButton = wxButton(panel, -1, 'Save')
+#        saveButton.SetFont(self.default_font)
+        savesizer.Add(saveButton, 0, wxALIGN_CENTER)
+
+        cancelButton = wxButton(panel, -1, 'Cancel')
+#        cancelButton.SetFont(self.default_font)
+        savesizer.Add(cancelButton, 0, wxALIGN_CENTER)
+
+        defaultsButton = wxButton(panel, -1, 'Revert to Defaults')
+#        defaultsButton.SetFont(self.default_font)
+        savesizer.Add(defaultsButton, 0, wxALIGN_CENTER)
+
+        advancedButton = wxButton(panel, -1, 'Advanced...')
+#        advancedButton.SetFont(self.default_font)
+        savesizer.Add(advancedButton, 0, wxALIGN_CENTER)
+        colsizer.Add(savesizer, 1, wxALIGN_CENTER)
+
+        resizewarningtext=StaticText('* These settings will not take effect until the next time you start BitTorrent', self.FONT-2)
+        colsizer.Add(resizewarningtext, 1, wxALIGN_CENTER)
+
+        border = wxBoxSizer(wxHORIZONTAL)
+        border.Add(colsizer, 1, wxEXPAND | wxALL, 4)
+        
+        panel.SetSizer(border)
+        panel.SetAutoLayout(True)
+
+        self.advancedConfig = {}
+        self.cryptoConfig = {}
+
+        def setDefaults(evt, self = self):
+          try:
+            self.minport_data.SetValue(self.defaults['minport'])
+            self.maxport_data.SetValue(self.defaults['maxport'])
+            self.randomport_checkbox.SetValue(self.defaults['random_port'])
+            self.gui_stretchwindow_checkbox.SetValue(self.defaults['gui_stretchwindow'])
+            self.gui_displaystats_checkbox.SetValue(self.defaults['gui_displaystats'])
+            self.gui_displaymiscstats_checkbox.SetValue(self.defaults['gui_displaymiscstats'])
+            self.buffering_checkbox.SetValue(self.defaults['buffer_reads'])
+            self.breakup_checkbox.SetValue(self.defaults['breakup_seed_bitfield'])
+            self.autoflush_checkbox.SetValue(self.defaults['auto_flush'])
+            if sys.version_info >= (2,3) and socket.has_ipv6:
+                self.ipv6enabled_checkbox.SetValue(self.defaults['ipv6_enabled'])
+            self.gui_forcegreenonfirewall_checkbox.SetValue(self.defaults['gui_forcegreenonfirewall'])
+            self.gui_font_data.SetValue(self.defaults['gui_font'])
+            self.gui_ratesettingsdefault_data.SetStringSelection(self.defaults['gui_ratesettingsdefault'])
+            self.maxdownload_data.SetValue(self.defaults['max_download_rate'])
+            self.gui_ratesettingsmode_data.SetStringSelection(self.defaults['gui_ratesettingsmode'])
+            self.gui_default_savedir_ctrl.SetValue(self.defaults['gui_default_savedir'])
+            self.gui_savemode_data.SetSelection(1-self.defaults['gui_saveas_ask'])
+
+            self.checkingcolor = HexToColor(self.defaults['gui_checkingcolor'])
+            self.setColorIcon(self.checkingcolor_icon, self.checkingcolor_iconptr, self.checkingcolor)
+            self.downloadcolor = HexToColor(self.defaults['gui_downloadcolor'])
+            self.setColorIcon(self.downloadcolor_icon, self.downloadcolor_iconptr, self.downloadcolor)
+            self.seedingcolor = HexToColor(self.defaults['gui_seedingcolor'])
+            self.setColorIcon(self.seedingcolor_icon, self.seedingcolor_iconptr, self.seedingcolor)
+
+            if (sys.platform == 'win32'):
+                self.win32_taskbar_icon_checkbox.SetValue(self.defaults['win32_taskbar_icon'])
+                self.upnp_data.SetSelection(self.defaults['upnp_nat_access'])
+
+            # reset advanced and crypto windows too
+            self.advancedConfig = {}
+            for key in ['ip', 'bind', 'min_peers', 'max_initiate', 'display_interval',
+        'alloc_type', 'alloc_rate', 'max_files_open', 'max_connections', 'super_seeder',
+        'ipv6_binds_v4', 'double_check', 'triple_check', 'lock_files', 'lock_while_reading',
+        'expire_cache_data']:
+                self.advancedConfig[key] = self.defaults[key]
+            self.cryptoConfig = {}
+            for key in ['security', 'auto_kick',
+        'crypto_allowed', 'crypto_only', 'crypto_stealth']:
+                self.cryptoConfig[key] = self.config[key]
+            self.CloseAdvanced()
+          except:
+            self.parent.exception()
+
+
+        def saveConfigs(evt, self = self):
+          try:
+            self.config['gui_stretchwindow']=int(self.gui_stretchwindow_checkbox.GetValue())
+            self.config['gui_displaystats']=int(self.gui_displaystats_checkbox.GetValue())
+            self.config['gui_displaymiscstats']=int(self.gui_displaymiscstats_checkbox.GetValue())
+            buffering=int(self.buffering_checkbox.GetValue())
+            self.config['buffer_reads']=buffering
+            if buffering:
+                self.config['write_buffer_size']=self.defaults['write_buffer_size']
+            else:
+                self.config['write_buffer_size']=0
+            self.config['breakup_seed_bitfield']=int(self.breakup_checkbox.GetValue())
+            if self.autoflush_checkbox.GetValue():
+                self.config['auto_flush']=5
+            else:
+                self.config['auto_flush']=0
+            if sys.version_info >= (2,3) and socket.has_ipv6:
+                self.config['ipv6_enabled']=int(self.ipv6enabled_checkbox.GetValue())
+            self.config['gui_forcegreenonfirewall']=int(self.gui_forcegreenonfirewall_checkbox.GetValue())
+            self.config['minport']=self.minport_data.GetValue()
+            self.config['maxport']=self.maxport_data.GetValue()
+            self.config['random_port']=int(self.randomport_checkbox.GetValue())
+            self.config['gui_font']=self.gui_font_data.GetValue()
+            self.config['gui_ratesettingsdefault']=self.gui_ratesettingsdefault_data.GetStringSelection()
+            self.config['max_download_rate']=self.maxdownload_data.GetValue()
+            self.config['gui_ratesettingsmode']=self.gui_ratesettingsmode_data.GetStringSelection()
+            self.config['gui_default_savedir']=self.gui_default_savedir_ctrl.GetValue()
+            self.config['gui_saveas_ask']=1-self.gui_savemode_data.GetSelection()
+            self.config['gui_checkingcolor']=ColorToHex(self.checkingcolor)
+            self.config['gui_downloadcolor']=ColorToHex(self.downloadcolor)
+            self.config['gui_seedingcolor']=ColorToHex(self.seedingcolor)
+            
+            if (sys.platform == 'win32'):
+                self.config['win32_taskbar_icon']=int(self.win32_taskbar_icon_checkbox.GetValue())
+                self.config['upnp_nat_access']=self.upnp_data.GetSelection()
+
+            if self.advancedConfig:
+                for key,val in self.advancedConfig.items():
+                    self.config[key] = val
+            if self.cryptoConfig:
+                for key,val in self.cryptoConfig.items():
+                    self.config[key] = val
+
+            self.writeConfigFile()
+            self._configReset = True
+            self.Close()
+          except:
+            self.parent.exception()
+
+        def cancelConfigs(evt, self = self):
+            self.Close()
+
+        def savepath_set(evt, self = self):
+          try:
+            d = self.gui_default_savedir_ctrl.GetValue()
+            if d == '':
+                d = self.config['last_saved']
+            dl = wxDirDialog(self.panel, 'Choose a default directory to save to', 
+                d, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
+            if dl.ShowModal() == wxID_OK:
+                self.gui_default_savedir_ctrl.SetValue(dl.GetPath())
+          except:
+            self.parent.exception()
+
+        def checkingcoloricon_set(evt, self = self):
+          try:
+            newcolor = self.getColorFromUser(self.panel,self.checkingcolor)
+            self.setColorIcon(self.checkingcolor_icon, self.checkingcolor_iconptr, newcolor)
+            self.checkingcolor = newcolor
+          except:
+            self.parent.exception()
+
+        def downloadcoloricon_set(evt, self = self):
+          try:
+            newcolor = self.getColorFromUser(self.panel,self.downloadcolor)
+            self.setColorIcon(self.downloadcolor_icon, self.downloadcolor_iconptr, newcolor)
+            self.downloadcolor = newcolor
+          except:
+            self.parent.exception()
+
+        def seedingcoloricon_set(evt, self = self):
+          try:
+            newcolor = self.getColorFromUser(self.panel,self.seedingcolor)
+            self.setColorIcon(self.seedingcolor_icon, self.seedingcolor_iconptr, newcolor)
+            self.seedingcolor = newcolor
+          except:
+            self.parent.exception()
+            
+        EVT_BUTTON(self.configMenuBox, saveButton.GetId(), saveConfigs)
+        EVT_BUTTON(self.configMenuBox, cancelButton.GetId(), cancelConfigs)
+        EVT_BUTTON(self.configMenuBox, defaultsButton.GetId(), setDefaults)
+        EVT_BUTTON(self.configMenuBox, advancedButton.GetId(), self.advancedMenu)
+        EVT_BUTTON(self.configMenuBox, cryptoButton.GetId(), self.cryptoMenu)
+        EVT_BUTTON(self.configMenuBox, savepathButton.GetId(), savepath_set)
+        EVT_LEFT_DOWN(self.checkingcolor_iconptr, checkingcoloricon_set)
+        EVT_LEFT_DOWN(self.downloadcolor_iconptr, downloadcoloricon_set)
+        EVT_LEFT_DOWN(self.seedingcolor_iconptr, seedingcoloricon_set)
+
+        self.configMenuBox.Show ()
+        border.Fit(panel)
+        self.configMenuBox.Fit()
+      except:
+        self.parent.exception()
+
+
+    def Close(self):
+        self.CloseAdvanced()
+        if self.configMenuBox is not None:
+            try:
+                self.configMenuBox.Close ()
+            except wxPyDeadObjectError, e:
+                pass
+            self.configMenuBox = None
+
+    def advancedMenu(self, event = None):
+      try:
+        if not self.advancedConfig:
+            for key in ['ip', 'bind', 'min_peers', 'max_initiate', 'display_interval',
+        'alloc_type', 'alloc_rate', 'max_files_open', 'max_connections', 'super_seeder',
+        'ipv6_binds_v4', 'double_check', 'triple_check', 'lock_files', 'lock_while_reading',
+        'expire_cache_data']:
+                self.advancedConfig[key] = self.config[key]
+
+        if (self.advancedMenuBox is not None):
+            try:
+                self.advancedMenuBox.Close ()
+            except wxPyDeadObjectError, e:
+                self.advancedMenuBox = None
+
+        self.advancedMenuBox = wxFrame(None, -1, 'BitTornado Advanced Preferences', size = (1,1),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+        if (sys.platform == 'win32'):
+            self.advancedMenuBox.SetIcon(self.icon)
+
+        panel = wxPanel(self.advancedMenuBox, -1)
+
+        def StaticText(text, font = self.FONT, underline = False, color = None, panel = panel):
+            x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+            x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+            if color is not None:
+                x.SetForegroundColour(color)
+            return x
+
+        colsizer = wxFlexGridSizer(cols = 1, hgap = 13, vgap = 13)
+        warningtext = StaticText('CHANGE THESE SETTINGS AT YOUR OWN RISK', self.FONT+4, True, 'Red')
+        colsizer.Add(warningtext, 1, wxALIGN_CENTER)
+
+        self.ip_data = wxTextCtrl(parent = panel, id = -1, 
+                    value = self.advancedConfig['ip'],
+                    size = (self.FONT*13, int(self.FONT*2.2)), style = wxTE_PROCESS_TAB)
+        self.ip_data.SetFont(self.default_font)
+        
+        self.bind_data = wxTextCtrl(parent = panel, id = -1, 
+                    value = self.advancedConfig['bind'],
+                    size = (self.FONT*13, int(self.FONT*2.2)), style = wxTE_PROCESS_TAB)
+        self.bind_data.SetFont(self.default_font)
+        
+        if sys.version_info >= (2,3) and socket.has_ipv6:
+            self.ipv6bindsv4_data=wxChoice(panel, -1,
+                             choices = ['separate sockets', 'single socket'])
+            self.ipv6bindsv4_data.SetFont(self.default_font)
+            self.ipv6bindsv4_data.SetSelection(self.advancedConfig['ipv6_binds_v4'])
+
+        self.minpeers_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*7, -1))
+        self.minpeers_data.SetFont(self.default_font)
+        self.minpeers_data.SetRange(10,100)
+        self.minpeers_data.SetValue(self.advancedConfig['min_peers'])
+        # max_initiate = 2*minpeers
+
+        self.displayinterval_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*7, -1))
+        self.displayinterval_data.SetFont(self.default_font)
+        self.displayinterval_data.SetRange(100,2000)
+        self.displayinterval_data.SetValue(int(self.advancedConfig['display_interval']*1000))
+
+        self.alloctype_data=wxChoice(panel, -1,
+                         choices = ['normal', 'background', 'pre-allocate', 'sparse'])
+        self.alloctype_data.SetFont(self.default_font)
+        self.alloctype_data.SetStringSelection(self.advancedConfig['alloc_type'])
+
+        self.allocrate_data = wxSpinCtrl(panel, -1, '', (-1,-1), (self.FONT*7,-1))
+        self.allocrate_data.SetFont(self.default_font)
+        self.allocrate_data.SetRange(1,100)
+        self.allocrate_data.SetValue(int(self.advancedConfig['alloc_rate']))
+
+        self.locking_data=wxChoice(panel, -1,
+                           choices = ['no locking', 'lock while writing', 'lock always'])
+        self.locking_data.SetFont(self.default_font)
+        if self.advancedConfig['lock_files']:
+            if self.advancedConfig['lock_while_reading']:
+                self.locking_data.SetSelection(2)
+            else:
+                self.locking_data.SetSelection(1)
+        else:
+            self.locking_data.SetSelection(0)
+
+        self.doublecheck_data=wxChoice(panel, -1,
+                           choices = ['no extra checking', 'double-check', 'triple-check'])
+        self.doublecheck_data.SetFont(self.default_font)
+        if self.advancedConfig['double_check']:
+            if self.advancedConfig['triple_check']:
+                self.doublecheck_data.SetSelection(2)
+            else:
+                self.doublecheck_data.SetSelection(1)
+        else:
+            self.doublecheck_data.SetSelection(0)
+
+        self.maxfilesopen_choices = ['50', '100', '200', 'no limit ']
+        self.maxfilesopen_data=wxChoice(panel, -1, choices = self.maxfilesopen_choices)
+        self.maxfilesopen_data.SetFont(self.default_font)
+        setval = self.advancedConfig['max_files_open']
+        if setval == 0:
+            setval = 'no limit '
+        else:
+            setval = str(setval)
+        if not setval in self.maxfilesopen_choices:
+            setval = self.maxfilesopen_choices[0]
+        self.maxfilesopen_data.SetStringSelection(setval)
+
+        self.maxconnections_choices = ['no limit ', '20', '30', '40', '50', '60', '100', '200']
+        self.maxconnections_data=wxChoice(panel, -1, choices = self.maxconnections_choices)
+        self.maxconnections_data.SetFont(self.default_font)
+        setval = self.advancedConfig['max_connections']
+        if setval == 0:
+            setval = 'no limit '
+        else:
+            setval = str(setval)
+        if not setval in self.maxconnections_choices:
+            setval = self.maxconnections_choices[0]
+        self.maxconnections_data.SetStringSelection(setval)
+
+        self.superseeder_data=wxChoice(panel, -1,
+                         choices = ['normal', 'super-seed'])
+        self.superseeder_data.SetFont(self.default_font)
+        self.superseeder_data.SetSelection(self.advancedConfig['super_seeder'])
+
+        self.expirecache_choices = ['never ', '3', '5', '7', '10', '15', '30', '60', '90']
+        self.expirecache_data=wxChoice(panel, -1, choices = self.expirecache_choices)
+        setval = self.advancedConfig['expire_cache_data']
+        if setval == 0:
+            setval = 'never '
+        else:
+            setval = str(setval)
+        if not setval in self.expirecache_choices:
+            setval = self.expirecache_choices[0]
+        self.expirecache_data.SetFont(self.default_font)
+        self.expirecache_data.SetStringSelection(setval)
+       
+
+        twocolsizer = wxFlexGridSizer(cols = 2, hgap = 20)
+        datasizer = wxFlexGridSizer(cols = 2, vgap = 2)
+        datasizer.Add(StaticText('Local IP: '), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.ip_data)
+        datasizer.Add(StaticText('IP to bind to: '), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.bind_data)
+        if sys.version_info >= (2,3) and socket.has_ipv6:
+            datasizer.Add(StaticText('IPv6 socket handling: '), 1, wxALIGN_CENTER_VERTICAL)
+            datasizer.Add(self.ipv6bindsv4_data)
+        datasizer.Add(StaticText('Minimum number of peers: '), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.minpeers_data)
+        datasizer.Add(StaticText('Display interval (ms): '), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.displayinterval_data)
+        datasizer.Add(StaticText('Disk allocation type:'), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.alloctype_data)
+        datasizer.Add(StaticText('Allocation rate (MiB/s):'), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.allocrate_data)
+        datasizer.Add(StaticText('File locking:'), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.locking_data)
+        datasizer.Add(StaticText('Extra data checking:'), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.doublecheck_data)
+        datasizer.Add(StaticText('Max files open:'), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.maxfilesopen_data)
+        datasizer.Add(StaticText('Max peer connections:'), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.maxconnections_data)
+        datasizer.Add(StaticText('Default seeding mode:'), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.superseeder_data)
+        datasizer.Add(StaticText('Expire resume data(days):'), 1, wxALIGN_CENTER_VERTICAL)
+        datasizer.Add(self.expirecache_data)
+        
+        twocolsizer.Add(datasizer)
+
+        infosizer = wxFlexGridSizer(cols = 1)
+        self.hinttext = StaticText('', self.FONT, False, 'Blue')
+        infosizer.Add(self.hinttext, 1, wxALIGN_LEFT|wxALIGN_CENTER_VERTICAL)
+        infosizer.SetMinSize((180,100))
+        twocolsizer.Add(infosizer, 1, wxEXPAND)
+
+        colsizer.Add(twocolsizer)
+
+        savesizer = wxGridSizer(cols = 3, hgap = 20)
+        okButton = wxButton(panel, -1, 'OK')
+#        okButton.SetFont(self.default_font)
+        savesizer.Add(okButton, 0, wxALIGN_CENTER)
+
+        cancelButton = wxButton(panel, -1, 'Cancel')
+#        cancelButton.SetFont(self.default_font)
+        savesizer.Add(cancelButton, 0, wxALIGN_CENTER)
+
+        defaultsButton = wxButton(panel, -1, 'Revert to Defaults')
+#        defaultsButton.SetFont(self.default_font)
+        savesizer.Add(defaultsButton, 0, wxALIGN_CENTER)
+        colsizer.Add(savesizer, 1, wxALIGN_CENTER)
+
+        resizewarningtext=StaticText('None of these settings will take effect until the next time you start BitTorrent', self.FONT-2)
+        colsizer.Add(resizewarningtext, 1, wxALIGN_CENTER)
+
+        border = wxBoxSizer(wxHORIZONTAL)
+        border.Add(colsizer, 1, wxEXPAND | wxALL, 4)
+        
+        panel.SetSizer(border)
+        panel.SetAutoLayout(True)
+
+        def setDefaults(evt, self = self):
+          try:
+            self.ip_data.SetValue(self.defaults['ip'])
+            self.bind_data.SetValue(self.defaults['bind'])
+            if sys.version_info >= (2,3) and socket.has_ipv6:
+                self.ipv6bindsv4_data.SetSelection(self.defaults['ipv6_binds_v4'])
+            self.minpeers_data.SetValue(self.defaults['min_peers'])
+            self.displayinterval_data.SetValue(int(self.defaults['display_interval']*1000))
+            self.alloctype_data.SetStringSelection(self.defaults['alloc_type'])
+            self.allocrate_data.SetValue(int(self.defaults['alloc_rate']))
+            if self.defaults['lock_files']:
+                if self.defaults['lock_while_reading']:
+                    self.locking_data.SetSelection(2)
+                else:
+                    self.locking_data.SetSelection(1)
+            else:
+                self.locking_data.SetSelection(0)
+            if self.defaults['double_check']:
+                if self.defaults['triple_check']:
+                    self.doublecheck_data.SetSelection(2)
+                else:
+                    self.doublecheck_data.SetSelection(1)
+            else:
+                self.doublecheck_data.SetSelection(0)
+            setval = self.defaults['max_files_open']
+            if setval == 0:
+                setval = 'no limit '
+            else:
+                setval = str(setval)
+            if not setval in self.maxfilesopen_choices:
+                setval = self.maxfilesopen_choices[0]
+            self.maxfilesopen_data.SetStringSelection(setval)
+            setval = self.defaults['max_connections']
+            if setval == 0:
+                setval = 'no limit '
+            else:
+                setval = str(setval)
+            if not setval in self.maxconnections_choices:
+                setval = self.maxconnections_choices[0]
+            self.maxconnections_data.SetStringSelection(setval)
+            self.superseeder_data.SetSelection(int(self.defaults['super_seeder']))
+            setval = self.defaults['expire_cache_data']
+            if setval == 0:
+                setval = 'never '
+            else:
+                setval = str(setval)
+            if not setval in self.expirecache_choices:
+                setval = self.expirecache_choices[0]
+            self.expirecache_data.SetStringSelection(setval)
+          except:
+            self.parent.exception()
+
+        def saveConfigs(evt, self = self):
+          try:
+            self.advancedConfig['ip'] = self.ip_data.GetValue()
+            self.advancedConfig['bind'] = self.bind_data.GetValue()
+            if sys.version_info >= (2,3) and socket.has_ipv6:
+                self.advancedConfig['ipv6_binds_v4'] = self.ipv6bindsv4_data.GetSelection()
+            self.advancedConfig['min_peers'] = self.minpeers_data.GetValue()
+            self.advancedConfig['display_interval'] = float(self.displayinterval_data.GetValue())/1000
+            self.advancedConfig['alloc_type'] = self.alloctype_data.GetStringSelection()
+            self.advancedConfig['alloc_rate'] = float(self.allocrate_data.GetValue())
+            self.advancedConfig['lock_files'] = int(self.locking_data.GetSelection() >= 1)
+            self.advancedConfig['lock_while_reading'] = int(self.locking_data.GetSelection() > 1)
+            self.advancedConfig['double_check'] = int(self.doublecheck_data.GetSelection() >= 1)
+            self.advancedConfig['triple_check'] = int(self.doublecheck_data.GetSelection() > 1)
+            try:
+                self.advancedConfig['max_files_open'] = int(self.maxfilesopen_data.GetStringSelection())
+            except:       # if it ain't a number, it must be "no limit"
+                self.advancedConfig['max_files_open'] = 0
+            try:
+                self.advancedConfig['max_connections'] = int(self.maxconnections_data.GetStringSelection())
+                self.advancedConfig['max_initiate'] = min(
+                    2*self.advancedConfig['min_peers'], self.advancedConfig['max_connections'])
+            except:       # if it ain't a number, it must be "no limit"
+                self.advancedConfig['max_connections'] = 0
+                self.advancedConfig['max_initiate'] = 2*self.advancedConfig['min_peers']
+            self.advancedConfig['super_seeder']=int(self.superseeder_data.GetSelection())
+            try:
+                self.advancedConfig['expire_cache_data'] = int(self.expirecache_data.GetStringSelection())
+            except:
+                self.advancedConfig['expire_cache_data'] = 0
+            self.advancedMenuBox.Close()
+          except:
+            self.parent.exception()
+
+        def cancelConfigs(evt, self = self):            
+            self.advancedMenuBox.Close()
+
+        def ip_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\nThe IP reported to the tracker.\n' +
+                                  'unless the tracker is on the\n' +
+                                  'same intranet as this client,\n' +
+                                  'the tracker will autodetect the\n' +
+                                  "client's IP and ignore this\n" +
+                                  "value.")
+
+        def bind_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\nThe IP the client will bind to.\n' +
+                                  'Only useful if your machine is\n' +
+                                  'directly handling multiple IPs.\n' +
+                                  "If you don't know what this is,\n" +
+                                  "leave it blank.")
+
+        def ipv6bindsv4_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\nCertain operating systems will\n' +
+                                  'open IPv4 protocol connections on\n' +
+                                  'an IPv6 socket; others require you\n' +
+                                  "to open two sockets on the same\n" +
+                                  "port, one IPv4 and one IPv6.")
+
+        def minpeers_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\nThe minimum number of peers the\n' +
+                                  'client tries to stay connected\n' +
+                                  'with.  Do not set this higher\n' +
+                                  'unless you have a very fast\n' +
+                                  "connection and a lot of system\n" +
+                                  "resources.")
+
+        def displayinterval_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\nHow often to update the\n' +
+                                  'graphical display, in 1/1000s\n' +
+                                  'of a second. Setting this too low\n' +
+                                  "will strain your computer's\n" +
+                                  "processor and video access.")
+
+        def alloctype_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\nHow to allocate disk space.\n' +
+                                  'normal allocates space as data is\n' +
+                                  'received, background also adds\n' +
+                                  "space in the background, pre-\n" +
+                                  "allocate reserves up front, and\n" +
+                                  'sparse is only for filesystems\n' +
+                                  'that support it by default.')
+
+        def allocrate_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\nAt what rate to allocate disk\n' +
+                                  'space when allocating in the\n' +
+                                  'background.  Set this too high on a\n' +
+                                  "slow filesystem and your download\n" +
+                                  "will slow to a crawl.")
+
+        def locking_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\n\nFile locking prevents other\n' +
+                                  'programs (including other instances\n' +
+                                  'of BitTorrent) from accessing files\n' +
+                                  "you are downloading.")
+
+        def doublecheck_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\nHow much extra checking to do\n' +
+                                  'making sure no data is corrupted.\n' +
+                                  'Double-check mode uses more CPU,\n' +
+                                  "while triple-check mode increases\n" +
+                                  "disk accesses.")
+
+        def maxfilesopen_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\n\nThe maximum number of files to\n' +
+                                  'keep open at the same time.  Zero\n' +
+                                  'means no limit.  Please note that\n' +
+                                  "if this option is in effect,\n" +
+                                  "files are not guaranteed to be\n" +
+                                  "locked.")
+
+        def maxconnections_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\nSome operating systems, most\n' +
+                                  'notably Windows 9x/ME combined\n' +
+                                  'with certain network drivers,\n' +
+                                  "cannot handle more than a certain\n" +
+                                  "number of open ports.  If the\n" +
+                                  "client freezes, try setting this\n" +
+                                  "to 60 or below.")
+
+        def superseeder_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\nThe "super-seed" method allows\n' +
+                                  'a single source to more efficiently\n' +
+                                  'seed a large torrent, but is not\n' +
+                                  "necessary in a well-seeded torrent,\n" +
+                                  "and causes problems with statistics.\n" +
+                                  "Unless you routinely seed torrents\n" +
+                                  "you can enable this by selecting\n" +
+                                  '"SUPER-SEED" for connection type.\n' +
+                                  '(once enabled it does not turn off.)')
+
+        def expirecache_hint(evt, self = self):
+            self.hinttext.SetLabel('\n\nThe client stores temporary data\n' +
+                                  'in order to handle downloading only\n' +
+                                  'specific files from the torrent and\n' +
+                                  "so it can resume downloads more\n" +
+                                  "quickly.  This sets how long the\n" +
+                                  "client will keep this data before\n" +
+                                  "deleting it to free disk space.")
+
+        EVT_BUTTON(self.advancedMenuBox, okButton.GetId(), saveConfigs)
+        EVT_BUTTON(self.advancedMenuBox, cancelButton.GetId(), cancelConfigs)
+        EVT_BUTTON(self.advancedMenuBox, defaultsButton.GetId(), setDefaults)
+        EVT_ENTER_WINDOW(self.ip_data, ip_hint)
+        EVT_ENTER_WINDOW(self.bind_data, bind_hint)
+        if sys.version_info >= (2,3) and socket.has_ipv6:
+            EVT_ENTER_WINDOW(self.ipv6bindsv4_data, ipv6bindsv4_hint)
+        EVT_ENTER_WINDOW(self.minpeers_data, minpeers_hint)
+        EVT_ENTER_WINDOW(self.displayinterval_data, displayinterval_hint)
+        EVT_ENTER_WINDOW(self.alloctype_data, alloctype_hint)
+        EVT_ENTER_WINDOW(self.allocrate_data, allocrate_hint)
+        EVT_ENTER_WINDOW(self.locking_data, locking_hint)
+        EVT_ENTER_WINDOW(self.doublecheck_data, doublecheck_hint)
+        EVT_ENTER_WINDOW(self.maxfilesopen_data, maxfilesopen_hint)
+        EVT_ENTER_WINDOW(self.maxconnections_data, maxconnections_hint)
+        EVT_ENTER_WINDOW(self.superseeder_data, superseeder_hint)
+        EVT_ENTER_WINDOW(self.expirecache_data, expirecache_hint)
+
+        self.advancedMenuBox.Show ()
+        border.Fit(panel)
+        self.advancedMenuBox.Fit()
+      except:
+        self.parent.exception()
+
+
+    def CloseAdvanced(self):
+        if self.advancedMenuBox is not None:
+            try:
+                self.advancedMenuBox.Close()
+            except wxPyDeadObjectError, e:
+                self.advancedMenuBox = None
+
+
+    def cryptoMenu(self, event = None):
+      try:
+        if not self.cryptoConfig:
+            for key in ['security', 'auto_kick',
+        'crypto_allowed', 'crypto_only', 'crypto_stealth']:
+                self.cryptoConfig[key] = self.config[key]
+
+        if (self.cryptoMenuBox is not None):
+            try:
+                self.cryptoMenuBox.Close ()
+            except wxPyDeadObjectError, e:
+                self.cryptoMenuBox = None
+
+        self.cryptoMenuBox = wxFrame(None, -1, 'BitTornado Encryption/Security Preferences', size = (1,1),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+        if (sys.platform == 'win32'):
+            self.cryptoMenuBox.SetIcon(self.icon)
+
+        panel = wxPanel(self.cryptoMenuBox, -1)
+#        self.panel = panel
+
+        def StaticText(text, font = self.FONT, underline = False, color = None, panel = panel):
+            x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+            x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+            if color is not None:
+                x.SetForegroundColour(color)
+            return x
+
+        colsizer = wxFlexGridSizer(cols = 1, hgap = 13, vgap = 13)
+
+        self.cryptomode_data=wxRadioBox(panel, -1, 'Encryption',
+                style = wxRA_SPECIFY_COLS, majorDimension = 1,
+                choices = [
+                    'no encryption permitted',
+                    'encryption enabled (default)',
+                    'encrypted connections only',
+                    'full stealth encryption'+
+                    ' (may cause effective firewalling)' ] )
+        self.cryptomode_data.SetFont(self.default_font)
+        if self.cryptoConfig['crypto_stealth']:
+            m = 3
+        elif self.cryptoConfig['crypto_only']:
+            m = 2
+        elif self.cryptoConfig['crypto_allowed']:
+            m = 1
+        else:
+            m = 0
+        self.cryptomode_data.SetSelection(m)
+        if not CRYPTO_OK:   # no crypto library in place
+            self.cryptomode_data.Enable(False)
+
+        self.security_checkbox = wxCheckBox(panel, -1, "Don't allow multiple connections from the same IP")
+        self.security_checkbox.SetFont(self.default_font)
+        self.security_checkbox.SetValue(self.cryptoConfig['security'])
+
+        self.autokick_checkbox = wxCheckBox(panel, -1, "Kick/ban clients that send you bad data")
+        self.autokick_checkbox.SetFont(self.default_font)
+        self.autokick_checkbox.SetValue(self.cryptoConfig['auto_kick'])
+
+        colsizer.Add(self.cryptomode_data)
+
+        block2sizer = wxFlexGridSizer(cols = 1, vgap = 2)
+        block2sizer.Add(self.security_checkbox)
+        block2sizer.Add(self.autokick_checkbox)
+        colsizer.Add(block2sizer)
+
+        savesizer = wxGridSizer(cols = 3, hgap = 20)
+        okButton = wxButton(panel, -1, 'OK')
+        savesizer.Add(okButton, 0, wxALIGN_CENTER)
+
+        cancelButton = wxButton(panel, -1, 'Cancel')
+        savesizer.Add(cancelButton, 0, wxALIGN_CENTER)
+
+        defaultsButton = wxButton(panel, -1, 'Revert to Defaults')
+        savesizer.Add(defaultsButton, 0, wxALIGN_CENTER)
+        colsizer.Add(savesizer, 1, wxALIGN_CENTER)
+
+        resizewarningtext=StaticText('None of these settings will take effect until the next time you start BitTorrent', self.FONT-2)
+        colsizer.Add(resizewarningtext, 1, wxALIGN_CENTER)
+
+        border = wxBoxSizer(wxHORIZONTAL)
+        border.Add(colsizer, 1, wxEXPAND | wxALL, 4)
+        
+        panel.SetSizer(border)
+        panel.SetAutoLayout(True)
+
+        def setDefaults(evt, self = self):
+          try:
+            if self.defaults['crypto_stealth']:
+                m = 3
+            elif self.defaults['crypto_only']:
+                m = 2
+            elif self.defaults['crypto_allowed']:
+                m = 1
+            else:
+                m = 0
+            self.cryptomode_data.SetSelection(m)
+            self.security_checkbox.SetValue(self.defaults['security'])
+            self.autokick_checkbox.SetValue(self.defaults['auto_kick'])
+          except:
+            self.parent.exception()
+
+        def saveConfigs(evt, self = self):
+          try:
+            m = self.cryptomode_data.GetSelection()
+            self.cryptoConfig['crypto_stealth'] = int(m==3)
+            self.cryptoConfig['crypto_only'] = int(m>=2)
+            self.cryptoConfig['crypto_allowed'] = int(m>=1)
+            self.cryptoConfig['security']=int(self.security_checkbox.GetValue())
+            self.cryptoConfig['auto_kick']=int(self.autokick_checkbox.GetValue())
+            self.cryptoMenuBox.Close()
+          except:
+            self.parent.exception()
+
+        def cancelConfigs(evt, self = self):            
+            self.cryptoMenuBox.Close()
+
+        EVT_BUTTON(self.cryptoMenuBox, okButton.GetId(), saveConfigs)
+        EVT_BUTTON(self.cryptoMenuBox, cancelButton.GetId(), cancelConfigs)
+        EVT_BUTTON(self.cryptoMenuBox, defaultsButton.GetId(), setDefaults)
+
+        self.cryptoMenuBox.Show ()
+        border.Fit(panel)
+        self.cryptoMenuBox.Fit()
+      except:
+        self.parent.exception()
+
+
+    def CloseCrypt(self):
+        if self.cryptMenuBox is not None:
+            try:
+                self.cryptMenuBox.Close()
+            except wxPyDeadObjectError, e:
+                self.cryptMenuBox = None
+
+

Added: debtorrent/branches/upstream/current/BitTornado/ConnChoice.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/ConnChoice.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/ConnChoice.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/ConnChoice.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,31 @@
+connChoices=(
+    {'name':'automatic',
+     'rate':{'min':0, 'max':5000, 'def': 0},
+     'conn':{'min':0, 'max':100,  'def': 0},
+     'automatic':1},
+    {'name':'unlimited',
+     'rate':{'min':0, 'max':5000, 'def': 0, 'div': 50},
+     'conn':{'min':4, 'max':100,  'def': 4}},
+    {'name':'dialup/isdn',
+     'rate':{'min':3,   'max':   8, 'def':  5},
+     'conn':{'min':2, 'max':  3, 'def': 2},
+     'initiate': 12},
+    {'name':'dsl/cable slow',
+     'rate':{'min':10,  'max':  48, 'def': 13},
+     'conn':{'min':4, 'max': 20, 'def': 4}},
+    {'name':'dsl/cable fast',
+     'rate':{'min':20,  'max': 100, 'def': 40},
+     'conn':{'min':4, 'max': 30, 'def': 6}},
+    {'name':'T1',
+     'rate':{'min':100, 'max': 300, 'def':150},
+     'conn':{'min':4, 'max': 40, 'def':10}},
+    {'name':'T3+',
+     'rate':{'min':400, 'max':2000, 'def':500},
+     'conn':{'min':4, 'max':100, 'def':20}},
+    {'name':'seeder',
+     'rate':{'min':0, 'max':5000, 'def':0, 'div': 50},
+     'conn':{'min':1, 'max':100, 'def':1}},
+    {'name':'SUPER-SEED', 'super-seed':1}
+     )
+
+connChoiceList = map(lambda x:x['name'], connChoices)

Added: debtorrent/branches/upstream/current/BitTornado/CreateIcons.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CreateIcons.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/CreateIcons.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/CreateIcons.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,105 @@
+# Generated from bt_MakeCreateIcons - 05/10/04 22:15:33
+# T-0.3.0 (BitTornado)
+
+from binascii import a2b_base64
+from zlib import decompress
+from os.path import join
+
+icons = {
+    "icon_bt.ico":
+        "eJyt1K+OFEEQx/FaQTh5GDRZhSQpiUHwCrxCBYXFrjyJLXeXEARPsZqUPMm+" +
+        "AlmP+PGtngoLDji69zMz2zt/qqtr1mxHv7621d4+MnvK/jl66Bl2drV+e7Wz" +
+        "S/v12A7rY4fDtuvOwfF4tOPXo52/fLLz+WwpWd6nqRXHKXux39sTrtnjNd7g" +
+        "PW7wGSd860f880kffjvJ2QYS1Zcw4AjcoaA5yRFIFDQXOgKJguZmjkCioB4T" +
+        "Y2CqxpTXA7sHEgVNEC8RSBQ0gfk7xtknCupgk3EEEgXlNgFHIFHQTMoRSBQ0" +
+        "E+1ouicKmsk7AomCJiGOQKKgSZIjkChoEucIJAqaZDoCiYImwb4iydULmqQ7" +
+        "AomC1kLcEQ/jSBQ0i+MIJAqaBXMEElVdi9siOgKJgmZhfWWlVjTddXW/FtsR" +
+        "SBQ0BeAIJAqaonAEEgVNoTgCiYKmeByBREHaqiVWRtSRrAJzBBIFTdE5AomC" +
+        "phBPpxPP57dVkDfrTl063nUVnWe383fZx9tb3uN+o7U+BLDtuvcQm8d/27Y/" +
+        "jO3o5/ay+YPv/+f6y30e1OyB7QcsGWFj",
+    "icon_done.ico":
+        "eJyt1K2OVEEQhuEaQbJyMWgyCklSEoPgFvYWKigsduRKbLndhCC4itGk5Erm" +
+        "Fsh4xMdbfSoMOGDpnuf89Jyf6uqaMdvRr69ttbdPzJ6xf4Eeeo6dXa3vXu/s" +
+        "0n49tsP62OGw7bpzcDwe7fj1aOcvn+x8PltKlg9pasVxyl7u9/aUe/Z4gxu8" +
+        "xy0+44Rv/Yp/vujDbxc520Ci+hYGHIF7FDQXOQKJguZGRyBR0DzMEUgU1GNi" +
+        "DEzVmPJ6YfdAoqAJ4hUCiYImMH/HOPtEQR1sMo5AoqDcJuAIJAqaSTkCiYJm" +
+        "oh1N90RBM3lHIFHQJMQRSBQ0SXIEEgVN4hyBREGTTEcgUdAk2FckuXpBk3RH" +
+        "IFHQWoh74mEciYJmcRyBREGzYI5AoqprcVtERyBR0Cysr6zUiqa7rh7WYjsC" +
+        "iYKmAByBREFTFI5AoqApFEcgUdAUjyOQKEhbtcTKiDqSVWCOQKKgKTpHIFHQ" +
+        "FOLpdOL9fLcK8nY9qUvHu66i8+x2/i77eHfH77h/0VofAth23Xuoz/+2bX8Y" +
+        "29HP7WXzB+f/5/7Lcx7V7JHtB9dPG3I=",
+    "black.ico":
+        "eJzt1zsOgkAYReFLLCztjJ2UlpLY485kOS7DpbgESwqTcQZDghjxZwAfyfl0" +
+        "LIieGzUWSom/pan840rHnbSUtPHHX9Je9+tAh2ybNe8TZZ/vk8ajJ4zl6JVJ" +
+        "+xFx+0R03Djx1/2B8bcT9L/bt0+4Wq+4se8e/VTfMvGqb4n3nYiIGz+lvt9s" +
+        "9EpE2T4xJN4xNFYWU6t+JWXuXDFzTom7SodSyi/S+iwtwjlJ80KaNY/C34rW" +
+        "aT8nvK5uhF7ohn7Yqfb87kffLAAAAAAAAAAAAAAAAAAAGMUNy7dADg==",
+    "blue.ico":
+        "eJzt10EOwUAYhuGv6cLSTux06QD2dTM9jmM4iiNYdiEZ81cIFTWddtDkfbQW" +
+        "De8XogtS5h9FIf+81H4jLSSt/ekvaavrdaCDez4SZV+PpPHoicBy9ErSfkQ8" +
+        "fCI6Hjgx6f7A+McJ+r/t95i46xMP7bf8Uz9o4k0/XMT338voP5shK0MkjXcM" +
+        "YSqam6Qunatyf7Nk7iztaqk8SaujNLfzIM0qKX88ZX8rWmf7Nfa+W8N61rW+" +
+        "7TR7fverHxYAAAAAAAAAAAAAAAAAAIziApVZ444=",
+    "green.ico":
+        "eJzt1zEOgjAAheFHGBzdjJuMHsAdbybxNB7Do3gERwaT2mJIBCOWlqok/yc4" +
+        "EP1fNDIoZfZRFLLPa5120krS1p72kvZ6XAeGHLtHouzrkTQePOFZDl5J2g+I" +
+        "+08Exz0nZt2PjH+coP/bvveEaY2L+/VN13/1PSbe9v0FfP+jTP6ziVmJkTQ+" +
+        "MISZaO6SujSmyu3dkpmbdKil8iptLtLSnWdpUUn58yn3t6J39l/j3tc2XM91" +
+        "Xd/tNHt296sfFgAAAAAAAAAAAAAAAAAATOIOVLEoDg==",
+    "red.ico":
+        "eJzt10EOwUAYhuGv6cLSTux06QD2dTOO4xiO4giWXUjG/BVCRTuddtDkfbQW" +
+        "De8XogtS5h9FIf+81GEjLSSt/ekvaavbdaCVez0SZd+PpPHoicBy9ErSfkQ8" +
+        "fCI6Hjgx6f7AeOcE/d/2QyceesaD+g1/1u+e+NwPF/H99zL6z2bIyhBJ4y1D" +
+        "mIb6LqlK5/a5v1syd5F2lVSepdVJmtt5lGZ7KX8+ZX8rGmfzNfa+e8N61rW+" +
+        "7dR7fverHxYAAAAAAAAAAAAAAAAAAIziCpgs444=",
+    "white.ico":
+        "eJzt1zsOgkAYReFLKCztjJ2ULsAed6bLcRnuwYTaJVhSmIwzGBLEiD8D+EjO" +
+        "p2NB9NyosVBK/C3L5B+XOmykhaS1P/6StrpfBzoUp6J5nyj7fJ80Hj1hLEev" +
+        "TNqPiNsnouPGib/uD4y/naD/3b59wtV6xY199+in+paJV31LvO9ERNz4KfX9" +
+        "ZqNXIsr2iSHxjqGxspha9Sspc+f2qXNK3FXalVJ+kVZnaR7OUZrtpbR5FP5W" +
+        "tE77OeF1dSP0Qjf0w06153c/+mYBAAAAAAAAAAAAAAAAAMAobj//I7s=",
+    "yellow.ico":
+        "eJzt1zsOgkAYReFLKCztjJ2ULsAedybLcRkuxSVYUpiM82M0ihGHgVFJzidY" +
+        "ED03vgqlzN+KQv5+qf1GWkha+9Nf0lbX60AX556ORNnXI2k8eiKwHL2StB8R" +
+        "D5+IjgdOTLo/MP5xgv5v+8ETd/3iYf2W/+oHTLzth4t4/3sZ/WszZGWIpPGO" +
+        "IUxE8yupS+eq3H9smTtLu1oqT9LqKM3tPEizSsofT9nfitbZfow979awnnWt" +
+        "bzvNnt/96osFAAAAAAAAAAAAAAAAAACjuABhjmIs",
+    "black1.ico":
+        "eJzt0zEOgkAUANEhFpZSGTstTWzkVt5Cj8ZROAIHMNGPWBCFDYgxMZkHn2Iz" +
+        "G5YCyOLKc+K54XSANbCPiSV2tOt/qjgW3XtSnN41FH/Qv29Jx/P7qefp7W8P" +
+        "4z85HQ+9JRG/7BpTft31DPUKyiVcFjEZzQ/TTtdzrWnKmCr6evv780qSJEmS" +
+        "JEmSJEmSJEmSpPnunVFDcA==",
+    "green1.ico":
+        "eJzt0zEKwkAQRuEXLCyTSuy0DHgxb6F4shzFI+QAgpkkFoombowIwvt2Z4vh" +
+        "X5gtFrJYRUGca/Y7WAFlVLTY0vf/1elxTwqP3xoKf5B/vjIenp+fOs+r/LWT" +
+        "/uQ34aGpUqQnv+1ygDqHagnHRVRG+2H6unfrtZkq6hz5evP7eSVJkiRJkiRJ" +
+        "kiRJkiRJ0nwNoWQ+AA==",
+    "yellow1.ico":
+        "eJzt0zEKwkAQRuEXLCxNJXZaCl7MW8Sj5SgeIQcQ4oS1UDTJxkhAeN/ubDH8" +
+        "C7PFQhGrLIlzx/kEW+AYFS0OpP6/atuXPSk8fKsv/EX+/cpweH5+6jyf8kn+" +
+        "k0fCfVPlyE/+2q2CZgP1Gi6rqILuw6R69uh1mTrqGvlmv/y8kiRJkiRJkiRJ" +
+        "kiRJkiRpvjsp9L8k",
+    "alloc.gif":
+        "eJxz93SzsEw0YRBh+M4ABi0MS3ue///P8H8UjIIRBhR/sjAyMDAx6IAyAihP" +
+        "MHAcYWDlkPHYsOBgM4ewVsyJDQsPNzEoebF8CHjo0smjH3dmRsDjI33C7Dw3" +
+        "MiYuOtjNyDShRSNwyemJguJJKhaGS32nGka61Vg2NJyYKRd+bY+nwtMzjbqV" +
+        "Qh84gxMCJgnlL4vJuqJyaa5NfFLNLsNVV2a7syacfVWkHd4bv7RN1ltM7ejm" +
+        "tMtNZ19Oyb02p8C3aqr3dr2GbXl/7fZyOej5rW653WZ7MzzHZV+v7O2/EZM+" +
+        "Pt45kbX6ScWHNWfOilo3n5thucXv8org1XF3DRQYrAEWiVY3"
+}
+
+def GetIcons():
+    return icons.keys()
+
+def CreateIcon(icon, savedir):
+    try:
+        f = open(join(savedir,icon),"wb")
+        f.write(decompress(a2b_base64(icons[icon])))
+        success = 1
+    except:
+        success = 0
+    try:
+        f.close()
+    except:
+        pass
+    return success

Added: debtorrent/branches/upstream/current/BitTornado/CurrentRateMeasure.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/CurrentRateMeasure.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/CurrentRateMeasure.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/CurrentRateMeasure.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,37 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from clock import clock
+
+class Measure:
+    def __init__(self, max_rate_period, fudge = 1):
+        self.max_rate_period = max_rate_period
+        self.ratesince = clock() - fudge
+        self.last = self.ratesince
+        self.rate = 0.0
+        self.total = 0l
+
+    def update_rate(self, amount):
+        self.total += amount
+        t = clock()
+        self.rate = (self.rate * (self.last - self.ratesince) + 
+            amount) / (t - self.ratesince + 0.0001)
+        self.last = t
+        if self.ratesince < t - self.max_rate_period:
+            self.ratesince = t - self.max_rate_period
+
+    def get_rate(self):
+        self.update_rate(0)
+        return self.rate
+
+    def get_rate_noupdate(self):
+        return self.rate
+
+    def time_until_rate(self, newrate):
+        if self.rate <= newrate:
+            return 0
+        t = clock() - self.ratesince
+        return ((self.rate * t) / newrate) - t
+
+    def get_total(self):
+        return self.total

Added: debtorrent/branches/upstream/current/BitTornado/HTTPHandler.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/HTTPHandler.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/HTTPHandler.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/HTTPHandler.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,167 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+from sys import stdout
+import time
+from clock import clock
+from gzip import GzipFile
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+months = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+    'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+class HTTPConnection:
+    def __init__(self, handler, connection):
+        self.handler = handler
+        self.connection = connection
+        self.buf = ''
+        self.closed = False
+        self.done = False
+        self.donereading = False
+        self.next_func = self.read_type
+
+    def get_ip(self):
+        return self.connection.get_ip()
+
+    def data_came_in(self, data):
+        if self.donereading or self.next_func is None:
+            return True
+        self.buf += data
+        while True:
+            try:
+                i = self.buf.index('\n')
+            except ValueError:
+                return True
+            val = self.buf[:i]
+            self.buf = self.buf[i+1:]
+            self.next_func = self.next_func(val)
+            if self.donereading:
+                return True
+            if self.next_func is None or self.closed:
+                return False
+
+    def read_type(self, data):
+        self.header = data.strip()
+        words = data.split()
+        if len(words) == 3:
+            self.command, self.path, garbage = words
+            self.pre1 = False
+        elif len(words) == 2:
+            self.command, self.path = words
+            self.pre1 = True
+            if self.command != 'GET':
+                return None
+        else:
+            return None
+        if self.command not in ('HEAD', 'GET'):
+            return None
+        self.headers = {}
+        return self.read_header
+
+    def read_header(self, data):
+        data = data.strip()
+        if data == '':
+            self.donereading = True
+            if self.headers.get('accept-encoding','').find('gzip') > -1:
+                self.encoding = 'gzip'
+            else:
+                self.encoding = 'identity'
+            r = self.handler.getfunc(self, self.path, self.headers)
+            if r is not None:
+                self.answer(r)
+            return None
+        try:
+            i = data.index(':')
+        except ValueError:
+            return None
+        self.headers[data[:i].strip().lower()] = data[i+1:].strip()
+        if DEBUG:
+            print data[:i].strip() + ": " + data[i+1:].strip()
+        return self.read_header
+
+    def answer(self, (responsecode, responsestring, headers, data)):
+        if self.closed:
+            return
+        if self.encoding == 'gzip':
+            compressed = StringIO()
+            gz = GzipFile(fileobj = compressed, mode = 'wb', compresslevel = 9)
+            gz.write(data)
+            gz.close()
+            cdata = compressed.getvalue()
+            if len(cdata) >= len(data):
+                self.encoding = 'identity'
+            else:
+                if DEBUG:
+                   print "Compressed: %i  Uncompressed: %i\n" % (len(cdata),len(data))
+                data = cdata
+                headers['Content-Encoding'] = 'gzip'
+
+        # i'm abusing the identd field here, but this should be ok
+        if self.encoding == 'identity':
+            ident = '-'
+        else:
+            ident = self.encoding
+        self.handler.log( self.connection.get_ip(), ident, '-',
+                          self.header, responsecode, len(data),
+                          self.headers.get('referer','-'),
+                          self.headers.get('user-agent','-') )
+        self.done = True
+        r = StringIO()
+        r.write('HTTP/1.0 ' + str(responsecode) + ' ' + 
+            responsestring + '\r\n')
+        if not self.pre1:
+            headers['Content-Length'] = len(data)
+            for key, value in headers.items():
+                r.write(key + ': ' + str(value) + '\r\n')
+            r.write('\r\n')
+        if self.command != 'HEAD':
+            r.write(data)
+        self.connection.write(r.getvalue())
+        if self.connection.is_flushed():
+            self.connection.shutdown(1)
+
+class HTTPHandler:
+    def __init__(self, getfunc, minflush):
+        self.connections = {}
+        self.getfunc = getfunc
+        self.minflush = minflush
+        self.lastflush = clock()
+
+    def external_connection_made(self, connection):
+        self.connections[connection] = HTTPConnection(self, connection)
+
+    def connection_flushed(self, connection):
+        if self.connections[connection].done:
+            connection.shutdown(1)
+
+    def connection_lost(self, connection):
+        ec = self.connections[connection]
+        ec.closed = True
+        del ec.connection
+        del ec.next_func
+        del self.connections[connection]
+
+    def data_came_in(self, connection, data):
+        c = self.connections[connection]
+        if not c.data_came_in(data) and not c.closed:
+            c.connection.shutdown(1)
+
+    def log(self, ip, ident, username, header,
+            responsecode, length, referrer, useragent):
+        year, month, day, hour, minute, second, a, b, c = time.localtime(time.time())
+        print '%s %s %s [%02d/%3s/%04d:%02d:%02d:%02d] "%s" %i %i "%s" "%s"' % (
+            ip, ident, username, day, months[month], year, hour,
+            minute, second, header, responsecode, length, referrer, useragent)
+        t = clock()
+        if t - self.lastflush > self.minflush:
+            self.lastflush = t
+            stdout.flush()

Added: debtorrent/branches/upstream/current/BitTornado/PSYCO.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/PSYCO.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/PSYCO.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/PSYCO.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,5 @@
+# edit this file to enable/disable Psyco
+# psyco = 1 -- enabled
+# psyco = 0 -- disabled
+
+psyco = 0

Added: debtorrent/branches/upstream/current/BitTornado/RateLimiter.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/RateLimiter.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/RateLimiter.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/RateLimiter.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,153 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from traceback import print_exc
+from binascii import b2a_hex
+from clock import clock
+from CurrentRateMeasure import Measure
+from cStringIO import StringIO
+from math import sqrt
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+try:
+    sum([1])
+except:
+    sum = lambda a: reduce(lambda x,y: x+y, a, 0)
+
+DEBUG = False
+
+MAX_RATE_PERIOD = 20.0
+MAX_RATE = 10e10
+PING_BOUNDARY = 1.2
+PING_SAMPLES = 7
+PING_DISCARDS = 1
+PING_THRESHHOLD = 5
+PING_DELAY = 5  # cycles 'til first upward adjustment
+PING_DELAY_NEXT = 2  # 'til next
+ADJUST_UP = 1.05
+ADJUST_DOWN = 0.95
+UP_DELAY_FIRST = 5
+UP_DELAY_NEXT = 2
+SLOTS_STARTING = 6
+SLOTS_FACTOR = 1.66/1000
+
+class RateLimiter:
+    def __init__(self, sched, unitsize, slotsfunc = lambda x: None):
+        self.sched = sched
+        self.last = None
+        self.unitsize = unitsize
+        self.slotsfunc = slotsfunc
+        self.measure = Measure(MAX_RATE_PERIOD)
+        self.autoadjust = False
+        self.upload_rate = MAX_RATE * 1000
+        self.slots = SLOTS_STARTING    # garbage if not automatic
+
+    def set_upload_rate(self, rate):
+        # rate = -1 # test automatic
+        if rate < 0:
+            if self.autoadjust:
+                return
+            self.autoadjust = True
+            self.autoadjustup = 0
+            self.pings = []
+            rate = MAX_RATE
+            self.slots = SLOTS_STARTING
+            self.slotsfunc(self.slots)
+        else:
+            self.autoadjust = False
+        if not rate:
+            rate = MAX_RATE
+        self.upload_rate = rate * 1000
+        self.lasttime = clock()
+        self.bytes_sent = 0
+
+    def queue(self, conn):
+        assert conn.next_upload is None
+        if self.last is None:
+            self.last = conn
+            conn.next_upload = conn
+            self.try_send(True)
+        else:
+            conn.next_upload = self.last.next_upload
+            self.last.next_upload = conn
+            self.last = conn
+
+    def try_send(self, check_time = False):
+        t = clock()
+        self.bytes_sent -= (t - self.lasttime) * self.upload_rate
+        self.lasttime = t
+        if check_time:
+            self.bytes_sent = max(self.bytes_sent, 0)
+        cur = self.last.next_upload
+        while self.bytes_sent <= 0:
+            bytes = cur.send_partial(self.unitsize)
+            self.bytes_sent += bytes
+            self.measure.update_rate(bytes)
+            if bytes == 0 or cur.backlogged():
+                if self.last is cur:
+                    self.last = None
+                    cur.next_upload = None
+                    break
+                else:
+                    self.last.next_upload = cur.next_upload
+                    cur.next_upload = None
+                    cur = self.last.next_upload
+            else:
+                self.last = cur
+                cur = cur.next_upload
+        else:
+            self.sched(self.try_send, self.bytes_sent / self.upload_rate)
+
+    def adjust_sent(self, bytes):
+        self.bytes_sent = min(self.bytes_sent+bytes, self.upload_rate*3)
+        self.measure.update_rate(bytes)
+
+
+    def ping(self, delay):
+        if DEBUG:
+            print delay
+        if not self.autoadjust:
+            return
+        self.pings.append(delay > PING_BOUNDARY)
+        if len(self.pings) < PING_SAMPLES+PING_DISCARDS:
+            return
+        if DEBUG:
+            print 'cycle'
+        pings = sum(self.pings[PING_DISCARDS:])
+        del self.pings[:]
+        if pings >= PING_THRESHHOLD:   # assume flooded
+            if self.upload_rate == MAX_RATE:
+                self.upload_rate = self.measure.get_rate()*ADJUST_DOWN
+            else:
+                self.upload_rate = min(self.upload_rate,
+                                       self.measure.get_rate()*1.1)
+            self.upload_rate = max(int(self.upload_rate*ADJUST_DOWN),2)
+            self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR))
+            self.slotsfunc(self.slots)
+            if DEBUG:
+                print 'adjust down to '+str(self.upload_rate)
+            self.lasttime = clock()
+            self.bytes_sent = 0
+            self.autoadjustup = UP_DELAY_FIRST
+        else:   # not flooded
+            if self.upload_rate == MAX_RATE:
+                return
+            self.autoadjustup -= 1
+            if self.autoadjustup:
+                return
+            self.upload_rate = int(self.upload_rate*ADJUST_UP)
+            self.slots = int(sqrt(self.upload_rate*SLOTS_FACTOR))
+            self.slotsfunc(self.slots)
+            if DEBUG:
+                print 'adjust up to '+str(self.upload_rate)
+            self.lasttime = clock()
+            self.bytes_sent = 0
+            self.autoadjustup = UP_DELAY_NEXT
+
+
+
+

Added: debtorrent/branches/upstream/current/BitTornado/RateMeasure.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/RateMeasure.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/RateMeasure.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/RateMeasure.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,75 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from clock import clock
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+FACTOR = 0.999
+
+class RateMeasure:
+    def __init__(self):
+        self.last = None
+        self.time = 1.0
+        self.got = 0.0
+        self.remaining = None
+        self.broke = False
+        self.got_anything = False
+        self.last_checked = None
+        self.rate = 0
+        self.lastten = False
+
+    def data_came_in(self, amount):
+        if not self.got_anything:
+            self.got_anything = True
+            self.last = clock()
+            return
+        self.update(amount)
+
+    def data_rejected(self, amount):
+        pass
+
+    def get_time_left(self, left):
+        t = clock()
+        if not self.got_anything:
+            return None
+        if t - self.last > 15:
+            self.update(0)
+        try:
+            remaining = left/self.rate
+            if not self.lastten and remaining <= 10:
+                self.lastten = True
+            if self.lastten:
+                return remaining
+            delta = max(remaining/20,2)
+            if self.remaining is None:
+                self.remaining = remaining
+            elif abs(self.remaining-remaining) > delta:
+                self.remaining = remaining
+            else:
+                self.remaining -= t - self.last_checked
+        except ZeroDivisionError:
+            self.remaining = None
+        if self.remaining is not None and self.remaining < 0.1:
+            self.remaining = 0.1
+        self.last_checked = t
+        return self.remaining
+
+    def update(self, amount):
+        t = clock()
+        t1 = int(t)
+        l1 = int(self.last)
+        for i in xrange(l1,t1):
+            self.time *= FACTOR
+            self.got *= FACTOR
+        self.got += amount
+        if t - self.last < 20:
+            self.time += t - self.last
+        self.last = t
+        try:
+            self.rate = self.got / self.time
+        except ZeroDivisionError:
+            pass

Added: debtorrent/branches/upstream/current/BitTornado/RawServer.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/RawServer.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/RawServer.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/RawServer.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,195 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from bisect import insort
+from SocketHandler import SocketHandler, UPnP_ERROR
+import socket
+from cStringIO import StringIO
+from traceback import print_exc
+from select import error
+from threading import Thread, Event
+from time import sleep
+from clock import clock
+import sys
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+
+def autodetect_ipv6():
+    try:
+        assert sys.version_info >= (2,3)
+        assert socket.has_ipv6
+        socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+    except:
+        return 0
+    return 1
+
+def autodetect_socket_style():
+	if sys.platform.find('linux') < 0:
+		return 1
+	else:
+		try:
+			f = open('/proc/sys/net/ipv6/bindv6only','r')
+			dual_socket_style = int(f.read())
+			f.close()
+			return int(not dual_socket_style)
+		except:
+			return 0
+
+
+READSIZE = 32768
+
+class RawServer:
+    def __init__(self, doneflag, timeout_check_interval, timeout, noisy = True,
+                 ipv6_enable = True, failfunc = lambda x: None, errorfunc = None,
+                 sockethandler = None, excflag = Event()):
+        self.timeout_check_interval = timeout_check_interval
+        self.timeout = timeout
+        self.servers = {}
+        self.single_sockets = {}
+        self.dead_from_write = []
+        self.doneflag = doneflag
+        self.noisy = noisy
+        self.failfunc = failfunc
+        self.errorfunc = errorfunc
+        self.exccount = 0
+        self.funcs = []
+        self.externally_added = []
+        self.finished = Event()
+        self.tasks_to_kill = []
+        self.excflag = excflag
+        
+        if sockethandler is None:
+            sockethandler = SocketHandler(timeout, ipv6_enable, READSIZE)
+        self.sockethandler = sockethandler
+        self.add_task(self.scan_for_timeouts, timeout_check_interval)
+
+    def get_exception_flag(self):
+        return self.excflag
+
+    def _add_task(self, func, delay, id = None):
+        assert float(delay) >= 0
+        insort(self.funcs, (clock() + delay, func, id))
+
+    def add_task(self, func, delay = 0, id = None):
+        assert float(delay) >= 0
+        self.externally_added.append((func, delay, id))
+
+    def scan_for_timeouts(self):
+        self.add_task(self.scan_for_timeouts, self.timeout_check_interval)
+        self.sockethandler.scan_for_timeouts()
+
+    def bind(self, port, bind = '', reuse = False,
+                        ipv6_socket_style = 1, upnp = False):
+        self.sockethandler.bind(port, bind, reuse, ipv6_socket_style, upnp)
+
+    def find_and_bind(self, minport, maxport, bind = '', reuse = False,
+                      ipv6_socket_style = 1, upnp = 0, randomizer = False):
+        return self.sockethandler.find_and_bind(minport, maxport, bind, reuse,
+                                 ipv6_socket_style, upnp, randomizer)
+
+    def start_connection_raw(self, dns, socktype, handler = None):
+        return self.sockethandler.start_connection_raw(dns, socktype, handler)
+
+    def start_connection(self, dns, handler = None, randomize = False):
+        return self.sockethandler.start_connection(dns, handler, randomize)
+
+    def get_stats(self):
+        return self.sockethandler.get_stats()
+
+    def pop_external(self):
+        while self.externally_added:
+            (a, b, c) = self.externally_added.pop(0)
+            self._add_task(a, b, c)
+
+
+    def listen_forever(self, handler):
+        self.sockethandler.set_handler(handler)
+        try:
+            while not self.doneflag.isSet():
+                try:
+                    self.pop_external()
+                    self._kill_tasks()
+                    if self.funcs:
+                        period = self.funcs[0][0] + 0.001 - clock()
+                    else:
+                        period = 2 ** 30
+                    if period < 0:
+                        period = 0
+                    events = self.sockethandler.do_poll(period)
+                    if self.doneflag.isSet():
+                        return
+                    while self.funcs and self.funcs[0][0] <= clock():
+                        garbage1, func, id = self.funcs.pop(0)
+                        if id in self.tasks_to_kill:
+                            pass
+                        try:
+#                            print func.func_name
+                            func()
+                        except (SystemError, MemoryError), e:
+                            self.failfunc(str(e))
+                            return
+                        except KeyboardInterrupt:
+#                            self.exception(True)
+                            return
+                        except:
+                            if self.noisy:
+                                self.exception()
+                    self.sockethandler.close_dead()
+                    self.sockethandler.handle_events(events)
+                    if self.doneflag.isSet():
+                        return
+                    self.sockethandler.close_dead()
+                except (SystemError, MemoryError), e:
+                    self.failfunc(str(e))
+                    return
+                except error:
+                    if self.doneflag.isSet():
+                        return
+                except KeyboardInterrupt:
+#                    self.exception(True)
+                    return
+                except:
+                    self.exception()
+                if self.exccount > 10:
+                    return
+        finally:
+#            self.sockethandler.shutdown()
+            self.finished.set()
+
+    def is_finished(self):
+        return self.finished.isSet()
+
+    def wait_until_finished(self):
+        self.finished.wait()
+
+    def _kill_tasks(self):
+        if self.tasks_to_kill:
+            new_funcs = []
+            for (t, func, id) in self.funcs:
+                if id not in self.tasks_to_kill:
+                    new_funcs.append((t, func, id))
+            self.funcs = new_funcs
+            self.tasks_to_kill = []
+
+    def kill_tasks(self, id):
+        self.tasks_to_kill.append(id)
+
+    def exception(self, kbint = False):
+        if not kbint:
+            self.excflag.set()
+        self.exccount += 1
+        if self.errorfunc is None:
+            print_exc()
+        else:
+            data = StringIO()
+            print_exc(file = data)
+#            print data.getvalue()   # report exception here too
+            if not kbint:           # don't report here if it's a keyboard interrupt
+                self.errorfunc(data.getvalue())
+
+    def shutdown(self):
+        self.sockethandler.shutdown()

Added: debtorrent/branches/upstream/current/BitTornado/ServerPortHandler.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/ServerPortHandler.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/ServerPortHandler.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/ServerPortHandler.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,243 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from cStringIO import StringIO
+#from RawServer import RawServer
+from BTcrypto import Crypto
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+from BT1.Encrypter import protocol_name
+
+default_task_id = []
+
+class SingleRawServer:
+    def __init__(self, info_hash, multihandler, doneflag, protocol):
+        self.info_hash = info_hash
+        self.doneflag = doneflag
+        self.protocol = protocol
+        self.multihandler = multihandler
+        self.rawserver = multihandler.rawserver
+        self.finished = False
+        self.running = False
+        self.handler = None
+        self.taskqueue = []
+
+    def shutdown(self):
+        if not self.finished:
+            self.multihandler.shutdown_torrent(self.info_hash)
+
+    def _shutdown(self):
+        if not self.finished:
+            self.finished = True
+            self.running = False
+            self.rawserver.kill_tasks(self.info_hash)
+            if self.handler:
+                self.handler.close_all()
+
+    def _external_connection_made(self, c, options, already_read,
+                                  encrypted = None ):
+        if self.running:
+            c.set_handler(self.handler)
+            self.handler.externally_handshaked_connection_made(
+                c, options, already_read, encrypted = encrypted)
+
+    ### RawServer functions ###
+
+    def add_task(self, func, delay=0, id = default_task_id):
+        if id is default_task_id:
+            id = self.info_hash
+        if not self.finished:
+            self.rawserver.add_task(func, delay, id)
+
+#    def bind(self, port, bind = '', reuse = False):
+#        pass    # not handled here
+        
+    def start_connection(self, dns, handler = None):
+        if not handler:
+            handler = self.handler
+        c = self.rawserver.start_connection(dns, handler)
+        return c
+
+#    def listen_forever(self, handler):
+#        pass    # don't call with this
+    
+    def start_listening(self, handler):
+        self.handler = handler
+        self.running = True
+        return self.shutdown    # obviously, doesn't listen forever
+
+    def is_finished(self):
+        return self.finished
+
+    def get_exception_flag(self):
+        return self.rawserver.get_exception_flag()
+
+
+class NewSocketHandler:     # hand a new socket off where it belongs
+    def __init__(self, multihandler, connection):
+        self.multihandler = multihandler
+        self.connection = connection
+        connection.set_handler(self)
+        self.closed = False
+        self.buffer = ''
+        self.complete = False
+        self.read = self._read
+        self.write = connection.write
+        self.next_len, self.next_func = 1+len(protocol_name), self.read_header
+        self.multihandler.rawserver.add_task(self._auto_close, 30)
+
+    def _auto_close(self):
+        if not self.complete:
+            self.close()
+        
+    def close(self):
+        if not self.closed:
+            self.connection.close()
+            self.closed = True
+
+    # copied from Encrypter and modified
+    
+    def _read_header(self, s):
+        if s == chr(len(protocol_name))+protocol_name:
+            self.protocol = protocol_name
+            return 8, self.read_options
+        return None
+
+    def read_header(self, s):
+        if self._read_header(s):
+            if self.multihandler.config['crypto_only']:
+                return None
+            return 8, self.read_options
+        if not self.multihandler.config['crypto_allowed']:
+            return None
+        self.encrypted = True
+        self.encrypter = Crypto(False)
+        self._write_buffer(s)
+        return self.encrypter.keylength, self.read_crypto_header
+
+    def read_crypto_header(self, s):
+        self.encrypter.received_key(s)
+        self.write(self.encrypter.pubkey+self.encrypter.padding())
+        self._max_search = 520
+        return 0, self.read_crypto_block3a
+
+    def _search_for_pattern(self, s, pat):
+        p = s.find(pat)
+        if p < 0:
+            self._max_search -= len(s)+1-len(pat)
+            if self._max_search < 0:
+                self.close()
+                return False
+            self._write_buffer(s[1-len(pat):])
+            return False
+        self._write_buffer(s[p+len(pat):])
+        return True
+
+    def read_crypto_block3a(self, s):
+        if not self._search_for_pattern(s,self.encrypter.block3a):
+            return -1, self.read_crypto_block3a     # wait for more data
+        return 20, self.read_crypto_block3b
+
+    def read_crypto_block3b(self, s):
+        for k in self.multihandler.singlerawservers.keys():
+            if self.encrypter.test_skey(s,k):
+                self.multihandler.singlerawservers[k]._external_connection_made(
+                        self.connection, None, self.buffer,
+                        encrypted = self.encrypter )
+                return True
+        return None
+
+    def read_options(self, s):
+        self.options = s
+        return 20, self.read_download_id
+
+    def read_download_id(self, s):
+        if self.multihandler.singlerawservers.has_key(s):
+            if self.multihandler.singlerawservers[s].protocol == self.protocol:
+                self.multihandler.singlerawservers[s]._external_connection_made(
+                        self.connection, self.options, self.buffer)
+                return True
+        return None
+
+
+    def read_dead(self, s):
+        return None
+
+    def data_came_in(self, garbage, s):
+        self.read(s)
+
+    def _write_buffer(self, s):
+        self.buffer = s+self.buffer
+
+    def _read(self, s):
+        self.buffer += s
+        while True:
+            if self.closed:
+                return
+            # self.next_len = # of characters function expects
+            # or 0 = all characters in the buffer
+            # or -1 = wait for next read, then all characters in the buffer
+            if self.next_len <= 0:
+                m = self.buffer
+                self.buffer = ''
+            elif len(self.buffer) >= self.next_len:
+                m = self.buffer[:self.next_len]
+                self.buffer = self.buffer[self.next_len:]
+            else:
+                return
+            try:
+                x = self.next_func(m)
+            except:
+                self.next_len, self.next_func = 1, self.read_dead
+                raise
+            if x is None:
+                self.close()
+                return
+            if x == True:
+                self.complete = True
+                return
+            self.next_len, self.next_func = x
+            if self.next_len < 0:  # already checked buffer
+                return             # wait for additional data
+
+
+    def connection_flushed(self, ss):
+        pass
+
+    def connection_lost(self, ss):
+        self.closed = True
+
+class MultiHandler:
+    def __init__(self, rawserver, doneflag, config):
+        self.rawserver = rawserver
+        self.masterdoneflag = doneflag
+        self.config = config
+        self.singlerawservers = {}
+        self.connections = {}
+        self.taskqueues = {}
+
+    def newRawServer(self, info_hash, doneflag, protocol=protocol_name):
+        new = SingleRawServer(info_hash, self, doneflag, protocol)
+        self.singlerawservers[info_hash] = new
+        return new
+
+    def shutdown_torrent(self, info_hash):
+        self.singlerawservers[info_hash]._shutdown()
+        del self.singlerawservers[info_hash]
+
+    def listen_forever(self):
+        self.rawserver.listen_forever(self)
+        for srs in self.singlerawservers.values():
+            srs.finished = True
+            srs.running = False
+            srs.doneflag.set()
+        
+    ### RawServer handler functions ###
+    # be wary of name collisions
+
+    def external_connection_made(self, ss):
+        NewSocketHandler(self, ss)

Added: debtorrent/branches/upstream/current/BitTornado/SocketHandler.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/SocketHandler.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/SocketHandler.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/SocketHandler.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,375 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+import socket
+from errno import EWOULDBLOCK, ECONNREFUSED, EHOSTUNREACH
+try:
+    from select import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
+    timemult = 1000
+except ImportError:
+    from selectpoll import poll, error, POLLIN, POLLOUT, POLLERR, POLLHUP
+    timemult = 1
+from time import sleep
+from clock import clock
+import sys
+from random import shuffle, randrange
+from natpunch import UPnP_open_port, UPnP_close_port
+# from BT1.StreamCheck import StreamCheck
+# import inspect
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+all = POLLIN | POLLOUT
+
+UPnP_ERROR = "unable to forward port via UPnP"
+
+class SingleSocket:
+    def __init__(self, socket_handler, sock, handler, ip = None):
+        self.socket_handler = socket_handler
+        self.socket = sock
+        self.handler = handler
+        self.buffer = []
+        self.last_hit = clock()
+        self.fileno = sock.fileno()
+        self.connected = False
+        self.skipped = 0
+#        self.check = StreamCheck()
+        try:
+            self.ip = self.socket.getpeername()[0]
+        except:
+            if ip is None:
+                self.ip = 'unknown'
+            else:
+                self.ip = ip
+        
+    def get_ip(self, real=False):
+        if real:
+            try:
+                self.ip = self.socket.getpeername()[0]
+            except:
+                pass
+        return self.ip
+        
+    def close(self):
+        '''
+        for x in xrange(5,0,-1):
+            try:
+                f = inspect.currentframe(x).f_code
+                print (f.co_filename,f.co_firstlineno,f.co_name)
+                del f
+            except:
+                pass
+        print ''
+        '''
+        assert self.socket
+        self.connected = False
+        sock = self.socket
+        self.socket = None
+        self.buffer = []
+        del self.socket_handler.single_sockets[self.fileno]
+        self.socket_handler.poll.unregister(sock)
+        sock.close()
+
+    def shutdown(self, val):
+        self.socket.shutdown(val)
+
+    def is_flushed(self):
+        return not self.buffer
+
+    def write(self, s):
+#        self.check.write(s)
+        assert self.socket is not None
+        self.buffer.append(s)
+        if len(self.buffer) == 1:
+            self.try_write()
+
+    def try_write(self):
+        if self.connected:
+            dead = False
+            try:
+                while self.buffer:
+                    buf = self.buffer[0]
+                    amount = self.socket.send(buf)
+                    if amount == 0:
+                        self.skipped += 1
+                        break
+                    self.skipped = 0
+                    if amount != len(buf):
+                        self.buffer[0] = buf[amount:]
+                        break
+                    del self.buffer[0]
+            except socket.error, e:
+                try:
+                    dead = e[0] != EWOULDBLOCK
+                except:
+                    dead = True
+                self.skipped += 1
+            if self.skipped >= 3:
+                dead = True
+            if dead:
+                self.socket_handler.dead_from_write.append(self)
+                return
+        if self.buffer:
+            self.socket_handler.poll.register(self.socket, all)
+        else:
+            self.socket_handler.poll.register(self.socket, POLLIN)
+
+    def set_handler(self, handler):
+        self.handler = handler
+
+class SocketHandler:
+    def __init__(self, timeout, ipv6_enable, readsize = 100000):
+        self.timeout = timeout
+        self.ipv6_enable = ipv6_enable
+        self.readsize = readsize
+        self.poll = poll()
+        # {socket: SingleSocket}
+        self.single_sockets = {}
+        self.dead_from_write = []
+        self.max_connects = 1000
+        self.port_forwarded = None
+        self.servers = {}
+
+    def scan_for_timeouts(self):
+        t = clock() - self.timeout
+        tokill = []
+        for s in self.single_sockets.values():
+            if s.last_hit < t:
+                tokill.append(s)
+        for k in tokill:
+            if k.socket is not None:
+                self._close_socket(k)
+
+    def bind(self, port, bind = '', reuse = False, ipv6_socket_style = 1, upnp = 0):
+        port = int(port)
+        addrinfos = []
+        self.servers = {}
+        self.interfaces = []
+        # if bind != "" thread it as a comma seperated list and bind to all
+        # addresses (can be ips or hostnames) else bind to default ipv6 and
+        # ipv4 address
+        if bind:
+            if self.ipv6_enable:
+                socktype = socket.AF_UNSPEC
+            else:
+                socktype = socket.AF_INET
+            bind = bind.split(',')
+            for addr in bind:
+                if sys.version_info < (2,2):
+                    addrinfos.append((socket.AF_INET, None, None, None, (addr, port)))
+                else:
+                    addrinfos.extend(socket.getaddrinfo(addr, port,
+                                               socktype, socket.SOCK_STREAM))
+        else:
+            if self.ipv6_enable:
+                addrinfos.append([socket.AF_INET6, None, None, None, ('', port)])
+            if not addrinfos or ipv6_socket_style != 0:
+                addrinfos.append([socket.AF_INET, None, None, None, ('', port)])
+        for addrinfo in addrinfos:
+            try:
+                server = socket.socket(addrinfo[0], socket.SOCK_STREAM)
+                if reuse:
+                    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+                server.setblocking(0)
+                server.bind(addrinfo[4])
+                self.servers[server.fileno()] = server
+                if bind:
+                    self.interfaces.append(server.getsockname()[0])
+                server.listen(64)
+                self.poll.register(server, POLLIN)
+            except socket.error, e:
+                for server in self.servers.values():
+                    try:
+                        server.close()
+                    except:
+                        pass
+                if self.ipv6_enable and ipv6_socket_style == 0 and self.servers:
+                    raise socket.error('blocked port (may require ipv6_binds_v4 to be set)')
+                raise socket.error(str(e))
+        if not self.servers:
+            raise socket.error('unable to open server port')
+        if upnp:
+            if not UPnP_open_port(port):
+                for server in self.servers.values():
+                    try:
+                        server.close()
+                    except:
+                        pass
+                    self.servers = None
+                    self.interfaces = None
+                raise socket.error(UPnP_ERROR)
+            self.port_forwarded = port
+        self.port = port
+
+    def find_and_bind(self, minport, maxport, bind = '', reuse = False,
+                      ipv6_socket_style = 1, upnp = 0, randomizer = False):
+        e = 'maxport less than minport - no ports to check'
+        if maxport-minport < 50 or not randomizer:
+            portrange = range(minport, maxport+1)
+            if randomizer:
+                shuffle(portrange)
+                portrange = portrange[:20]  # check a maximum of 20 ports
+        else:
+            portrange = []
+            while len(portrange) < 20:
+                listen_port = randrange(minport, maxport+1)
+                if not listen_port in portrange:
+                    portrange.append(listen_port)
+        for listen_port in portrange:
+            try:
+                self.bind(listen_port, bind,
+                               ipv6_socket_style = ipv6_socket_style, upnp = upnp)
+                return listen_port
+            except socket.error, e:
+                pass
+        raise socket.error(str(e))
+
+
+    def set_handler(self, handler):
+        self.handler = handler
+
+
+    def start_connection_raw(self, dns, socktype = socket.AF_INET, handler = None):
+        if handler is None:
+            handler = self.handler
+        sock = socket.socket(socktype, socket.SOCK_STREAM)
+        sock.setblocking(0)
+        try:
+            sock.connect_ex(dns)
+        except socket.error:
+            raise
+        except Exception, e:
+            raise socket.error(str(e))
+        self.poll.register(sock, POLLIN)
+        s = SingleSocket(self, sock, handler, dns[0])
+        self.single_sockets[sock.fileno()] = s
+        return s
+
+
+    def start_connection(self, dns, handler = None, randomize = False):
+        if handler is None:
+            handler = self.handler
+        if sys.version_info < (2,2):
+            s = self.start_connection_raw(dns,socket.AF_INET,handler)
+        else:
+            if self.ipv6_enable:
+                socktype = socket.AF_UNSPEC
+            else:
+                socktype = socket.AF_INET
+            try:
+                addrinfos = socket.getaddrinfo(dns[0], int(dns[1]),
+                                               socktype, socket.SOCK_STREAM)
+            except socket.error, e:
+                raise
+            except Exception, e:
+                raise socket.error(str(e))
+            if randomize:
+                shuffle(addrinfos)
+            for addrinfo in addrinfos:
+                try:
+                    s = self.start_connection_raw(addrinfo[4],addrinfo[0],handler)
+                    break
+                except:
+                    pass
+            else:
+                raise socket.error('unable to connect')
+        return s
+
+
+    def _sleep(self):
+        sleep(1)
+        
+    def handle_events(self, events):
+        for sock, event in events:
+            s = self.servers.get(sock)
+            if s:
+                if event & (POLLHUP | POLLERR) != 0:
+                    self.poll.unregister(s)
+                    s.close()
+                    del self.servers[sock]
+                    print "lost server socket"
+                elif len(self.single_sockets) < self.max_connects:
+                    try:
+                        newsock, addr = s.accept()
+                        newsock.setblocking(0)
+                        nss = SingleSocket(self, newsock, self.handler)
+                        self.single_sockets[newsock.fileno()] = nss
+                        self.poll.register(newsock, POLLIN)
+                        self.handler.external_connection_made(nss)
+                    except socket.error:
+                        self._sleep()
+            else:
+                s = self.single_sockets.get(sock)
+                if not s:
+                    continue
+                s.connected = True
+                if (event & (POLLHUP | POLLERR)):
+                    self._close_socket(s)
+                    continue
+                if (event & POLLIN):
+                    try:
+                        s.last_hit = clock()
+                        data = s.socket.recv(self.readsize)
+                        if not data:
+                            self._close_socket(s)
+                        else:
+                            s.handler.data_came_in(s, data)
+                    except socket.error, e:
+                        code, msg = e
+                        if code != EWOULDBLOCK:
+                            self._close_socket(s)
+                            continue
+                if (event & POLLOUT) and s.socket and not s.is_flushed():
+                    s.try_write()
+                    if s.is_flushed():
+                        s.handler.connection_flushed(s)
+
+    def close_dead(self):
+        while self.dead_from_write:
+            old = self.dead_from_write
+            self.dead_from_write = []
+            for s in old:
+                if s.socket:
+                    self._close_socket(s)
+
+    def _close_socket(self, s):
+        s.close()
+        s.handler.connection_lost(s)
+
+    def do_poll(self, t):
+        r = self.poll.poll(t*timemult)
+        if r is None:
+            connects = len(self.single_sockets)
+            to_close = int(connects*0.05)+1 # close 5% of sockets
+            self.max_connects = connects-to_close
+            closelist = self.single_sockets.values()
+            shuffle(closelist)
+            closelist = closelist[:to_close]
+            for sock in closelist:
+                self._close_socket(sock)
+            return []
+        return r     
+
+    def get_stats(self):
+        return { 'interfaces': self.interfaces,
+                 'port': self.port,
+                 'upnp': self.port_forwarded is not None }
+
+
+    def shutdown(self):
+        for ss in self.single_sockets.values():
+            try:
+                ss.close()
+            except:
+                pass
+        for server in self.servers.values():
+            try:
+                server.close()
+            except:
+                pass
+        if self.port_forwarded is not None:
+            UPnP_close_port(self.port_forwarded)
+

Added: debtorrent/branches/upstream/current/BitTornado/__init__.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/__init__.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/__init__.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/__init__.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,63 @@
+product_name = 'BitTornado'
+version_short = 'T-0.3.18'
+
+version = version_short+' ('+product_name+')'
+report_email = version_short+'@degreez.net'
+
+from types import StringType
+from sha import sha
+from time import time, clock
+try:
+    from os import getpid
+except ImportError:
+    def getpid():
+        return 1
+
+mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
+
+_idprefix = version_short[0]
+for subver in version_short[2:].split('.'):
+    try:
+        subver = int(subver)
+    except:
+        subver = 0
+    _idprefix += mapbase64[subver]
+_idprefix += ('-' * (6-len(_idprefix)))
+_idrandom = [None]
+
+def resetPeerIDs():
+    try:
+        f = open('/dev/urandom','rb')
+        x = f.read(20)
+        f.close()
+    except:
+        x = ''
+
+    l1 = 0
+    t = clock()
+    while t == clock():
+        l1 += 1
+    l2 = 0
+    t = long(time()*100)
+    while t == long(time()*100):
+        l2 += 1
+    l3 = 0
+    if l2 < 1000:
+        t = long(time()*10)
+        while t == long(clock()*10):
+            l3 += 1
+    x += ( repr(time()) + '/' + str(time()) + '/'
+           + str(l1) + '/' + str(l2) + '/' + str(l3) + '/'
+           + str(getpid()) )
+
+    s = ''
+    for i in sha(x).digest()[-11:]:
+        s += mapbase64[ord(i) & 0x3F]
+    _idrandom[0] = s
+        
+resetPeerIDs()
+
+def createPeerID(ins = '---'):
+    assert type(ins) is StringType
+    assert len(ins) == 3
+    return _idprefix + ins + _idrandom[0]

Added: debtorrent/branches/upstream/current/BitTornado/bencode.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/bencode.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/bencode.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/bencode.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,319 @@
+# Written by Petru Paler, Uoti Urpala, Ross Cohen and John Hoffman
+# see LICENSE.txt for license information
+
+from types import IntType, LongType, StringType, ListType, TupleType, DictType
+try:
+    from types import BooleanType
+except ImportError:
+    BooleanType = None
+try:
+    from types import UnicodeType
+except ImportError:
+    UnicodeType = None
+from cStringIO import StringIO
+
+def decode_int(x, f):
+    f += 1
+    newf = x.index('e', f)
+    try:
+        n = int(x[f:newf])
+    except:
+        n = long(x[f:newf])
+    if x[f] == '-':
+        if x[f + 1] == '0':
+            raise ValueError
+    elif x[f] == '0' and newf != f+1:
+        raise ValueError
+    return (n, newf+1)
+  
+def decode_string(x, f):
+    colon = x.index(':', f)
+    try:
+        n = int(x[f:colon])
+    except (OverflowError, ValueError):
+        n = long(x[f:colon])
+    if x[f] == '0' and colon != f+1:
+        raise ValueError
+    colon += 1
+    return (x[colon:colon+n], colon+n)
+
+def decode_unicode(x, f):
+    s, f = decode_string(x, f+1)
+    return (s.decode('UTF-8'),f)
+
+def decode_list(x, f):
+    r, f = [], f+1
+    while x[f] != 'e':
+        v, f = decode_func[x[f]](x, f)
+        r.append(v)
+    return (r, f + 1)
+
+def decode_dict(x, f):
+    r, f = {}, f+1
+    lastkey = None
+    while x[f] != 'e':
+        k, f = decode_string(x, f)
+        if lastkey >= k:
+            raise ValueError
+        lastkey = k
+        r[k], f = decode_func[x[f]](x, f)
+    return (r, f + 1)
+
+decode_func = {}
+decode_func['l'] = decode_list
+decode_func['d'] = decode_dict
+decode_func['i'] = decode_int
+decode_func['0'] = decode_string
+decode_func['1'] = decode_string
+decode_func['2'] = decode_string
+decode_func['3'] = decode_string
+decode_func['4'] = decode_string
+decode_func['5'] = decode_string
+decode_func['6'] = decode_string
+decode_func['7'] = decode_string
+decode_func['8'] = decode_string
+decode_func['9'] = decode_string
+#decode_func['u'] = decode_unicode
+  
+def bdecode(x, sloppy = 0):
+    try:
+        r, l = decode_func[x[0]](x, 0)
+#    except (IndexError, KeyError):
+    except (IndexError, KeyError, ValueError):
+        raise ValueError, "bad bencoded data"
+    if not sloppy and l != len(x):
+        raise ValueError, "bad bencoded data"
+    return r
+
+def test_bdecode():
+    try:
+        bdecode('0:0:')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('ie')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('i341foo382e')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('i4e') == 4L
+    assert bdecode('i0e') == 0L
+    assert bdecode('i123456789e') == 123456789L
+    assert bdecode('i-10e') == -10L
+    try:
+        bdecode('i-0e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('i123')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('i6easd')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('35208734823ljdahflajhdf')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('2:abfdjslhfld')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('0:') == ''
+    assert bdecode('3:abc') == 'abc'
+    assert bdecode('10:1234567890') == '1234567890'
+    try:
+        bdecode('02:xy')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('l')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('le') == []
+    try:
+        bdecode('leanfdldjfh')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('l0:0:0:e') == ['', '', '']
+    try:
+        bdecode('relwjhrlewjh')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('li1ei2ei3ee') == [1, 2, 3]
+    assert bdecode('l3:asd2:xye') == ['asd', 'xy']
+    assert bdecode('ll5:Alice3:Bobeli2ei3eee') == [['Alice', 'Bob'], [2, 3]]
+    try:
+        bdecode('d')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('defoobar')
+        assert 0
+    except ValueError:
+        pass
+    assert bdecode('de') == {}
+    assert bdecode('d3:agei25e4:eyes4:bluee') == {'age': 25, 'eyes': 'blue'}
+    assert bdecode('d8:spam.mp3d6:author5:Alice6:lengthi100000eee') == {'spam.mp3': {'author': 'Alice', 'length': 100000}}
+    try:
+        bdecode('d3:fooe')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('di1e0:e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('d1:b0:1:a0:e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('d1:a0:1:a0:e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('i03e')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('l01:ae')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('9999:x')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('l0:')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('d0:0:')
+        assert 0
+    except ValueError:
+        pass
+    try:
+        bdecode('d0:')
+        assert 0
+    except ValueError:
+        pass
+
+bencached_marker = []
+
+class Bencached:
+    def __init__(self, s):
+        self.marker = bencached_marker
+        self.bencoded = s
+
+BencachedType = type(Bencached('')) # insufficient, but good as a filter
+
+def encode_bencached(x,r):
+    assert x.marker == bencached_marker
+    r.append(x.bencoded)
+
+def encode_int(x,r):
+    r.extend(('i',str(x),'e'))
+
+def encode_bool(x,r):
+    encode_int(int(x),r)
+
+def encode_string(x,r):    
+    r.extend((str(len(x)),':',x))
+
+def encode_unicode(x,r):
+    #r.append('u')
+    encode_string(x.encode('UTF-8'),r)
+
+def encode_list(x,r):
+        r.append('l')
+        for e in x:
+            encode_func[type(e)](e, r)
+        r.append('e')
+
+def encode_dict(x,r):
+    r.append('d')
+    ilist = x.items()
+    ilist.sort()
+    for k,v in ilist:
+        r.extend((str(len(k)),':',k))
+        encode_func[type(v)](v, r)
+    r.append('e')
+
+encode_func = {}
+encode_func[BencachedType] = encode_bencached
+encode_func[IntType] = encode_int
+encode_func[LongType] = encode_int
+encode_func[StringType] = encode_string
+encode_func[ListType] = encode_list
+encode_func[TupleType] = encode_list
+encode_func[DictType] = encode_dict
+if BooleanType:
+    encode_func[BooleanType] = encode_bool
+if UnicodeType:
+    encode_func[UnicodeType] = encode_unicode
+    
+def bencode(x):
+    r = []
+    try:
+        encode_func[type(x)](x, r)
+    except:
+        print "*** error *** could not encode type %s (value: %s)" % (type(x), x)
+        assert 0
+    return ''.join(r)
+
+def test_bencode():
+    assert bencode(4) == 'i4e'
+    assert bencode(0) == 'i0e'
+    assert bencode(-10) == 'i-10e'
+    assert bencode(12345678901234567890L) == 'i12345678901234567890e'
+    assert bencode('') == '0:'
+    assert bencode('abc') == '3:abc'
+    assert bencode('1234567890') == '10:1234567890'
+    assert bencode([]) == 'le'
+    assert bencode([1, 2, 3]) == 'li1ei2ei3ee'
+    assert bencode([['Alice', 'Bob'], [2, 3]]) == 'll5:Alice3:Bobeli2ei3eee'
+    assert bencode({}) == 'de'
+    assert bencode({'age': 25, 'eyes': 'blue'}) == 'd3:agei25e4:eyes4:bluee'
+    assert bencode({'spam.mp3': {'author': 'Alice', 'length': 100000}}) == 'd8:spam.mp3d6:author5:Alice6:lengthi100000eee'
+    try:
+        bencode({1: 'foo'})
+        assert 0
+    except AssertionError:
+        pass
+
+  
+try:
+    import psyco
+    psyco.bind(bdecode)
+    psyco.bind(bencode)
+except ImportError:
+    pass

Added: debtorrent/branches/upstream/current/BitTornado/bitfield.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/bitfield.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/bitfield.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/bitfield.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,162 @@
+# Written by Bram Cohen, Uoti Urpala, and John Hoffman
+# see LICENSE.txt for license information
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    bool = lambda x: not not x
+
+try:
+    sum([1])
+    negsum = lambda a: len(a)-sum(a)
+except:
+    negsum = lambda a: reduce(lambda x,y: x+(not y), a, 0)
+    
+def _int_to_booleans(x):
+    r = []
+    for i in range(8):
+        r.append(bool(x & 0x80))
+        x <<= 1
+    return tuple(r)
+
+lookup_table = []
+reverse_lookup_table = {}
+for i in xrange(256):
+    x = _int_to_booleans(i)
+    lookup_table.append(x)
+    reverse_lookup_table[x] = chr(i)
+
+
+class Bitfield:
+    def __init__(self, length = None, bitstring = None, copyfrom = None):
+        if copyfrom is not None:
+            self.length = copyfrom.length
+            self.array = copyfrom.array[:]
+            self.numfalse = copyfrom.numfalse
+            return
+        if length is None:
+            raise ValueError, "length must be provided unless copying from another array"
+        self.length = length
+        if bitstring is not None:
+            extra = len(bitstring) * 8 - length
+            if extra < 0 or extra >= 8:
+                raise ValueError
+            t = lookup_table
+            r = []
+            for c in bitstring:
+                r.extend(t[ord(c)])
+            if extra > 0:
+                if r[-extra:] != [0] * extra:
+                    raise ValueError
+                del r[-extra:]
+            self.array = r
+            self.numfalse = negsum(r)
+        else:
+            self.array = [False] * length
+            self.numfalse = length
+
+    def __setitem__(self, index, val):
+        val = bool(val)
+        self.numfalse += self.array[index]-val
+        self.array[index] = val
+
+    def __getitem__(self, index):
+        return self.array[index]
+
+    def __len__(self):
+        return self.length
+
+    def tostring(self):
+        booleans = self.array
+        t = reverse_lookup_table
+        s = len(booleans) % 8
+        r = [ t[tuple(booleans[x:x+8])] for x in xrange(0, len(booleans)-s, 8) ]
+        if s:
+            r += t[tuple(booleans[-s:] + ([0] * (8-s)))]
+        return ''.join(r)
+
+    def complete(self):
+        return not self.numfalse
+
+
+def test_bitfield():
+    try:
+        x = Bitfield(7, 'ab')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(7, 'ab')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(9, 'abc')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(0, 'a')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(1, '')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(7, '')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(8, '')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(9, 'a')
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(7, chr(1))
+        assert False
+    except ValueError:
+        pass
+    try:
+        x = Bitfield(9, chr(0) + chr(0x40))
+        assert False
+    except ValueError:
+        pass
+    assert Bitfield(0, '').tostring() == ''
+    assert Bitfield(1, chr(0x80)).tostring() == chr(0x80)
+    assert Bitfield(7, chr(0x02)).tostring() == chr(0x02)
+    assert Bitfield(8, chr(0xFF)).tostring() == chr(0xFF)
+    assert Bitfield(9, chr(0) + chr(0x80)).tostring() == chr(0) + chr(0x80)
+    x = Bitfield(1)
+    assert x.numfalse == 1
+    x[0] = 1
+    assert x.numfalse == 0
+    x[0] = 1
+    assert x.numfalse == 0
+    assert x.tostring() == chr(0x80)
+    x = Bitfield(7)
+    assert len(x) == 7
+    x[6] = 1
+    assert x.numfalse == 6
+    assert x.tostring() == chr(0x02)
+    x = Bitfield(8)
+    x[7] = 1
+    assert x.tostring() == chr(1)
+    x = Bitfield(9)
+    x[8] = 1
+    assert x.numfalse == 8
+    assert x.tostring() == chr(0) + chr(0x80)
+    x = Bitfield(8, chr(0xC4))
+    assert len(x) == 8
+    assert x.numfalse == 5
+    assert x.tostring() == chr(0xC4)

Added: debtorrent/branches/upstream/current/BitTornado/clock.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/clock.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/clock.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/clock.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,27 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from time import *
+import sys
+
+_MAXFORWARD = 100
+_FUDGE = 1
+
+class RelativeTime:
+    def __init__(self):
+        self.time = time()
+        self.offset = 0
+
+    def get_time(self):        
+        t = time() + self.offset
+        if t < self.time or t > self.time + _MAXFORWARD:
+            self.time += _FUDGE
+            self.offset += self.time - t
+            return self.time
+        self.time = t
+        return t
+
+if sys.platform != 'win32':
+    _RTIME = RelativeTime()
+    def clock():
+        return _RTIME.get_time()

Added: debtorrent/branches/upstream/current/BitTornado/download_bt1.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/download_bt1.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/download_bt1.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/download_bt1.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,877 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from zurllib import urlopen
+from urlparse import urlparse
+from BT1.btformats import check_message
+from BT1.Choker import Choker
+from BT1.Storage import Storage
+from BT1.StorageWrapper import StorageWrapper
+from BT1.FileSelector import FileSelector
+from BT1.Uploader import Upload
+from BT1.Downloader import Downloader
+from BT1.HTTPDownloader import HTTPDownloader
+from BT1.Connecter import Connecter
+from RateLimiter import RateLimiter
+from BT1.Encrypter import Encoder
+from RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
+from BT1.Rerequester import Rerequester
+from BT1.DownloaderFeedback import DownloaderFeedback
+from RateMeasure import RateMeasure
+from CurrentRateMeasure import Measure
+from BT1.PiecePicker import PiecePicker
+from BT1.Statistics import Statistics
+from ConfigDir import ConfigDir
+from bencode import bencode, bdecode
+from natpunch import UPnP_test
+from sha import sha
+from os import path, makedirs, listdir
+from parseargs import parseargs, formatDefinitions, defaultargs
+from socket import error as socketerror
+from random import seed
+from threading import Thread, Event
+from clock import clock
+from BTcrypto import CRYPTO_OK
+from __init__ import createPeerID
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+defaults = [
+    ('max_uploads', 7,
+        "the maximum number of uploads to allow at once."),
+    ('keepalive_interval', 120.0,
+        'number of seconds to pause between sending keepalives'),
+    ('download_slice_size', 2 ** 14,
+        "How many bytes to query for per request."),
+    ('upload_unit_size', 1460,
+        "when limiting upload rate, how many bytes to send at a time"),
+    ('request_backlog', 10,
+        "maximum number of requests to keep in a single pipe at once."),
+    ('max_message_length', 2 ** 23,
+        "maximum length prefix encoding you'll accept over the wire - larger values get the connection dropped."),
+    ('ip', '',
+        "ip to report you have to the tracker."),
+    ('minport', 10000, 'minimum port to listen on, counts up if unavailable'),
+    ('maxport', 60000, 'maximum port to listen on'),
+    ('random_port', 1, 'whether to choose randomly inside the port range ' +
+        'instead of counting up linearly'),
+    ('responsefile', '',
+        'file the server response was stored in, alternative to url'),
+    ('url', '',
+        'url to get file from, alternative to responsefile'),
+    ('crypto_allowed', int(CRYPTO_OK),
+        'whether to allow the client to accept encrypted connections'),
+    ('crypto_only', 0,
+        'whether to only create or allow encrypted connections'),
+    ('crypto_stealth', 0,
+        'whether to prevent all non-encrypted connection attempts; ' +
+        'will result in an effectively firewalled state on older trackers'),
+    ('selector_enabled', 1,
+        'whether to enable the file selector and fast resume function'),
+    ('expire_cache_data', 10,
+        'the number of days after which you wish to expire old cache data ' +
+        '(0 = disabled)'),
+    ('priority', '',
+        'a list of file priorities separated by commas, must be one per file, ' +
+        '0 = highest, 1 = normal, 2 = lowest, -1 = download disabled'),
+    ('saveas', '',
+        'local file name to save the file as, null indicates query user'),
+    ('timeout', 300.0,
+        'time to wait between closing sockets which nothing has been received on'),
+    ('timeout_check_interval', 60.0,
+        'time to wait between checking if any connections have timed out'),
+    ('max_slice_length', 2 ** 17,
+        "maximum length slice to send to peers, larger requests are ignored"),
+    ('max_rate_period', 20.0,
+        "maximum amount of time to guess the current rate estimate represents"),
+    ('bind', '', 
+        'comma-separated list of ips/hostnames to bind to locally'),
+#    ('ipv6_enabled', autodetect_ipv6(),
+    ('ipv6_enabled', 0,
+         'allow the client to connect to peers via IPv6'),
+    ('ipv6_binds_v4', autodetect_socket_style(),
+        "set if an IPv6 server socket won't also field IPv4 connections"),
+    ('upnp_nat_access', 1,
+        'attempt to autoconfigure a UPnP router to forward a server port ' +
+        '(0 = disabled, 1 = mode 1 [fast], 2 = mode 2 [slow])'),
+    ('upload_rate_fudge', 5.0, 
+        'time equivalent of writing to kernel-level TCP buffer, for rate adjustment'),
+    ('tcp_ack_fudge', 0.03,
+        'how much TCP ACK download overhead to add to upload rate calculations ' +
+        '(0 = disabled)'),
+    ('display_interval', .5,
+        'time between updates of displayed information'),
+    ('rerequest_interval', 5 * 60,
+        'time to wait between requesting more peers'),
+    ('min_peers', 20, 
+        'minimum number of peers to not do rerequesting'),
+    ('http_timeout', 60, 
+        'number of seconds to wait before assuming that an http connection has timed out'),
+    ('max_initiate', 40,
+        'number of peers at which to stop initiating new connections'),
+    ('check_hashes', 1,
+        'whether to check hashes on disk'),
+    ('max_upload_rate', 0,
+        'maximum kB/s to upload at (0 = no limit, -1 = automatic)'),
+    ('max_download_rate', 0,
+        'maximum kB/s to download at (0 = no limit)'),
+    ('alloc_type', 'normal',
+        'allocation type (may be normal, background, pre-allocate or sparse)'),
+    ('alloc_rate', 2.0,
+        'rate (in MiB/s) to allocate space at using background allocation'),
+    ('buffer_reads', 1,
+        'whether to buffer disk reads'),
+    ('write_buffer_size', 4,
+        'the maximum amount of space to use for buffering disk writes ' +
+        '(in megabytes, 0 = disabled)'),
+    ('breakup_seed_bitfield', 1,
+        'sends an incomplete bitfield and then fills with have messages, '
+        'in order to get around stupid ISP manipulation'),
+    ('snub_time', 30.0,
+        "seconds to wait for data to come in over a connection before assuming it's semi-permanently choked"),
+    ('spew', 0,
+        "whether to display diagnostic info to stdout"),
+    ('rarest_first_cutoff', 2,
+        "number of downloads at which to switch from random to rarest first"),
+    ('rarest_first_priority_cutoff', 5,
+        'the number of peers which need to have a piece before other partials take priority over rarest first'),
+    ('min_uploads', 4,
+        "the number of uploads to fill out to with extra optimistic unchokes"),
+    ('max_files_open', 50,
+        'the maximum number of files to keep open at a time, 0 means no limit'),
+    ('round_robin_period', 30,
+        "the number of seconds between the client's switching upload targets"),
+    ('super_seeder', 0,
+        "whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)"),
+    ('security', 1,
+        "whether to enable extra security features intended to prevent abuse"),
+    ('max_connections', 0,
+        "the absolute maximum number of peers to connect with (0 = no limit)"),
+    ('auto_kick', 1,
+        "whether to allow the client to automatically kick/ban peers that send bad data"),
+    ('double_check', 1,
+        "whether to double-check data being written to the disk for errors (may increase CPU load)"),
+    ('triple_check', 0,
+        "whether to thoroughly check data being written to the disk (may slow disk access)"),
+    ('lock_files', 1,
+        "whether to lock files the client is working with"),
+    ('lock_while_reading', 0,
+        "whether to lock access to files being read"),
+    ('auto_flush', 0,
+        "minutes between automatic flushes to disk (0 = disabled)"),
+    ('dedicated_seed_id', '',
+        "code to send to tracker identifying as a dedicated seed"),
+    ]
+
+argslistheader = 'Arguments are:\n\n'
+
+
+def _failfunc(x):
+    print x
+
+# old-style downloader
+def download(params, filefunc, statusfunc, finfunc, errorfunc, doneflag, cols,
+             pathFunc = None, presets = {}, exchandler = None,
+             failed = _failfunc, paramfunc = None):
+
+    try:
+        config = parse_params(params, presets)
+    except ValueError, e:
+        failed('error: ' + str(e) + '\nrun with no args for parameter explanations')
+        return
+    if not config:
+        errorfunc(get_usage())
+        return
+    
+    myid = createPeerID()
+    seed(myid)
+
+    rawserver = RawServer(doneflag, config['timeout_check_interval'],
+                          config['timeout'], ipv6_enable = config['ipv6_enabled'],
+                          failfunc = failed, errorfunc = exchandler)
+
+    upnp_type = UPnP_test(config['upnp_nat_access'])
+    try:
+        listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
+                        config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
+                        upnp = upnp_type, randomizer = config['random_port'])
+    except socketerror, e:
+        failed("Couldn't listen - " + str(e))
+        return
+
+    response = get_response(config['responsefile'], config['url'], failed)
+    if not response:
+        return
+
+    infohash = sha(bencode(response['info'])).digest()
+
+    d = BT1Download(statusfunc, finfunc, errorfunc, exchandler, doneflag,
+                    config, response, infohash, myid, rawserver, listen_port)
+
+    if not d.saveAs(filefunc):
+        return
+
+    if pathFunc:
+        pathFunc(d.getFilename())
+
+    hashcheck = d.initFiles(old_style = True)
+    if not hashcheck:
+        return
+    if not hashcheck():
+        return
+    if not d.startEngine():
+        return
+    d.startRerequester()
+    d.autoStats()
+
+    statusfunc(activity = 'connecting to peers')
+
+    if paramfunc:
+        paramfunc({ 'max_upload_rate' : d.setUploadRate,  # change_max_upload_rate(<int KiB/sec>)
+                    'max_uploads': d.setConns, # change_max_uploads(<int max uploads>)
+                    'listen_port' : listen_port, # int
+                    'peer_id' : myid, # string
+                    'info_hash' : infohash, # string
+                    'start_connection' : d._startConnection, # start_connection((<string ip>, <int port>), <peer id>)
+                    })
+        
+    rawserver.listen_forever(d.getPortHandler())
+    
+    d.shutdown()
+
+
+def parse_params(params, presets = {}):
+    if len(params) == 0:
+        return None
+    config, args = parseargs(params, defaults, 0, 1, presets = presets)
+    if args:
+        if config['responsefile'] or config['url']:
+            raise ValueError,'must have responsefile or url as arg or parameter, not both'
+        if path.isfile(args[0]):
+            config['responsefile'] = args[0]
+        else:
+            try:
+                urlparse(args[0])
+            except:
+                raise ValueError, 'bad filename or url'
+            config['url'] = args[0]
+    elif (config['responsefile'] == '') == (config['url'] == ''):
+        raise ValueError, 'need responsefile or url, must have one, cannot have both'
+    return config
+
+
+def get_usage(defaults = defaults, cols = 100, presets = {}):
+    return (argslistheader + formatDefinitions(defaults, cols, presets))
+
+
+def get_response(file, url, errorfunc):
+    try:
+        if file:
+            h = open(file, 'rb')
+            try:
+                line = h.read(10)   # quick test to see if responsefile contains a dict
+                front,garbage = line.split(':',1)
+                assert front[0] == 'd'
+                int(front[1:])
+            except:
+                errorfunc(file+' is not a valid responsefile')
+                return None
+            try:
+                h.seek(0)
+            except:
+                try:
+                    h.close()
+                except:
+                    pass
+                h = open(file, 'rb')
+        else:
+            try:
+                h = urlopen(url)
+            except:
+                errorfunc(url+' bad url')
+                return None
+        response = h.read()
+    
+    except IOError, e:
+        errorfunc('problem getting response info - ' + str(e))
+        return None
+    try:    
+        h.close()
+    except:
+        pass
+    try:
+        try:
+            response = bdecode(response)
+        except:
+            errorfunc("warning: bad data in responsefile")
+            response = bdecode(response, sloppy=1)
+        check_message(response)
+    except ValueError, e:
+        errorfunc("got bad file info - " + str(e))
+        return None
+
+    return response
+
+
+class BT1Download:    
+    def __init__(self, statusfunc, finfunc, errorfunc, excfunc, doneflag,
+                 config, response, infohash, id, rawserver, port,
+                 appdataobj = None):
+        self.statusfunc = statusfunc
+        self.finfunc = finfunc
+        self.errorfunc = errorfunc
+        self.excfunc = excfunc
+        self.doneflag = doneflag
+        self.config = config
+        self.response = response
+        self.infohash = infohash
+        self.myid = id
+        self.rawserver = rawserver
+        self.port = port
+        
+        self.info = self.response['info']
+        self.pieces = [self.info['pieces'][x:x+20]
+                       for x in xrange(0, len(self.info['pieces']), 20)]
+        self.len_pieces = len(self.pieces)
+        self.argslistheader = argslistheader
+        self.unpauseflag = Event()
+        self.unpauseflag.set()
+        self.downloader = None
+        self.storagewrapper = None
+        self.fileselector = None
+        self.super_seeding_active = False
+        self.filedatflag = Event()
+        self.spewflag = Event()
+        self.superseedflag = Event()
+        self.whenpaused = None
+        self.finflag = Event()
+        self.rerequest = None
+        self.tcp_ack_fudge = config['tcp_ack_fudge']
+
+        self.selector_enabled = config['selector_enabled']
+        if appdataobj:
+            self.appdataobj = appdataobj
+        elif self.selector_enabled:
+            self.appdataobj = ConfigDir()
+            self.appdataobj.deleteOldCacheData( config['expire_cache_data'],
+                                                [self.infohash] )
+
+        self.excflag = self.rawserver.get_exception_flag()
+        self.failed = False
+        self.checking = False
+        self.started = False
+
+        self.picker = PiecePicker(self.len_pieces, config['rarest_first_cutoff'],
+                             config['rarest_first_priority_cutoff'])
+        self.choker = Choker(config, rawserver.add_task,
+                             self.picker, self.finflag.isSet)
+
+
+    def checkSaveLocation(self, loc):
+        if self.info.has_key('length'):
+            return path.exists(loc)
+        for x in self.info['files']:
+            if path.exists(path.join(loc, x['path'][0])):
+                return True
+        return False
+                
+
+    def saveAs(self, filefunc, pathfunc = None):
+        try:
+            def make(f, forcedir = False):
+                if not forcedir:
+                    f = path.split(f)[0]
+                if f != '' and not path.exists(f):
+                    makedirs(f)
+
+            if self.info.has_key('length'):
+                file_length = self.info['length']
+                file = filefunc(self.info['name'], file_length,
+                                self.config['saveas'], False)
+                if file is None:
+                    return None
+                make(file)
+                files = [(file, file_length)]
+            else:
+                file_length = 0L
+                for x in self.info['files']:
+                    file_length += x['length']
+                file = filefunc(self.info['name'], file_length,
+                                self.config['saveas'], True)
+                if file is None:
+                    return None
+
+                # if this path exists, and no files from the info dict exist, we assume it's a new download and 
+                # the user wants to create a new directory with the default name
+                existing = 0
+                if path.exists(file):
+                    if not path.isdir(file):
+                        self.errorfunc(file + 'is not a dir')
+                        return None
+                    if len(listdir(file)) > 0:  # if it's not empty
+                        for x in self.info['files']:
+                            if path.exists(path.join(file, x['path'][0])):
+                                existing = 1
+                        if not existing:
+                            file = path.join(file, self.info['name'])
+                            if path.exists(file) and not path.isdir(file):
+                                if file[-8:] == '.torrent':
+                                    file = file[:-8]
+                                if path.exists(file) and not path.isdir(file):
+                                    self.errorfunc("Can't create dir - " + self.info['name'])
+                                    return None
+                make(file, True)
+
+                # alert the UI to any possible change in path
+                if pathfunc != None:
+                    pathfunc(file)
+
+                files = []
+                for x in self.info['files']:
+                    n = file
+                    for i in x['path']:
+                        n = path.join(n, i)
+                    files.append((n, x['length']))
+                    make(n)
+        except OSError, e:
+            self.errorfunc("Couldn't allocate dir - " + str(e))
+            return None
+
+        self.filename = file
+        self.files = files
+        self.datalength = file_length
+
+        return file
+    
+
+    def getFilename(self):
+        return self.filename
+
+
+    def _finished(self):
+        self.finflag.set()
+        try:
+            self.storage.set_readonly()
+        except (IOError, OSError), e:
+            self.errorfunc('trouble setting readonly at end - ' + str(e))
+        if self.superseedflag.isSet():
+            self._set_super_seed()
+        self.choker.set_round_robin_period(
+            max( self.config['round_robin_period'],
+                 self.config['round_robin_period'] *
+                                     self.info['piece length'] / 200000 ) )
+        self.rerequest_complete()
+        self.finfunc()
+
+    def _data_flunked(self, amount, index):
+        self.ratemeasure_datarejected(amount)
+        if not self.doneflag.isSet():
+            self.errorfunc('piece %d failed hash check, re-downloading it' % index)
+
+    def _failed(self, reason):
+        self.failed = True
+        self.doneflag.set()
+        if reason is not None:
+            self.errorfunc(reason)
+        
+
+    def initFiles(self, old_style = False, statusfunc = None):
+        if self.doneflag.isSet():
+            return None
+        if not statusfunc:
+            statusfunc = self.statusfunc
+
+        disabled_files = None
+        if self.selector_enabled:
+            self.priority = self.config['priority']
+            if self.priority:
+                try:
+                    self.priority = self.priority.split(',')
+                    assert len(self.priority) == len(self.files)
+                    self.priority = [int(p) for p in self.priority]
+                    for p in self.priority:
+                        assert p >= -1
+                        assert p <= 2
+                except:
+                    self.errorfunc('bad priority list given, ignored')
+                    self.priority = None
+
+            data = self.appdataobj.getTorrentData(self.infohash)
+            try:
+                d = data['resume data']['priority']
+                assert len(d) == len(self.files)
+                disabled_files = [x == -1 for x in d]
+            except:
+                try:
+                    disabled_files = [x == -1 for x in self.priority]
+                except:
+                    pass
+
+        try:
+            try:
+                self.storage = Storage(self.files, self.info['piece length'],
+                                       self.doneflag, self.config, disabled_files)
+            except IOError, e:
+                self.errorfunc('trouble accessing files - ' + str(e))
+                return None
+            if self.doneflag.isSet():
+                return None
+
+            self.storagewrapper = StorageWrapper(self.storage, self.config['download_slice_size'],
+                self.pieces, self.info['piece length'], self._finished, self._failed,
+                statusfunc, self.doneflag, self.config['check_hashes'],
+                self._data_flunked, self.rawserver.add_task,
+                self.config, self.unpauseflag)
+            
+        except ValueError, e:
+            self._failed('bad data - ' + str(e))
+        except IOError, e:
+            self._failed('IOError - ' + str(e))
+        if self.doneflag.isSet():
+            return None
+
+        if self.selector_enabled:
+            self.fileselector = FileSelector(self.files, self.info['piece length'],
+                                             self.appdataobj.getPieceDir(self.infohash),
+                                             self.storage, self.storagewrapper,
+                                             self.rawserver.add_task,
+                                             self._failed)
+            if data:
+                data = data.get('resume data')
+                if data:
+                    self.fileselector.unpickle(data)
+                
+        self.checking = True
+        if old_style:
+            return self.storagewrapper.old_style_init()
+        return self.storagewrapper.initialize
+
+
+    def getCachedTorrentData(self):
+        return self.appdataobj.getTorrentData(self.infohash)
+
+
+    def _make_upload(self, connection, ratelimiter, totalup):
+        return Upload(connection, ratelimiter, totalup,
+                      self.choker, self.storagewrapper, self.picker,
+                      self.config)
+
+    def _kick_peer(self, connection):
+        def k(connection = connection):
+            connection.close()
+        self.rawserver.add_task(k,0)
+
+    def _ban_peer(self, ip):
+        self.encoder_ban(ip)
+
+    def _received_raw_data(self, x):
+        if self.tcp_ack_fudge:
+            x = int(x*self.tcp_ack_fudge)
+            self.ratelimiter.adjust_sent(x)
+
+    def _received_data(self, x):
+        self.downmeasure.update_rate(x)
+        self.ratemeasure.data_came_in(x)
+
+    def _received_http_data(self, x):
+        self.downmeasure.update_rate(x)
+        self.ratemeasure.data_came_in(x)
+        self.downloader.external_data_received(x)
+
+    def _cancelfunc(self, pieces):
+        self.downloader.cancel_piece_download(pieces)
+        self.httpdownloader.cancel_piece_download(pieces)
+    def _reqmorefunc(self, pieces):
+        self.downloader.requeue_piece_download(pieces)
+
+    def startEngine(self, ratelimiter = None, statusfunc = None):
+        if self.doneflag.isSet():
+            return False
+        if not statusfunc:
+            statusfunc = self.statusfunc
+
+        self.checking = False
+
+        if not CRYPTO_OK:
+            if self.config['crypto_allowed']:
+                self.errorfunc('warning - crypto library not installed')
+            self.config['crypto_allowed'] = 0
+            self.config['crypto_only'] = 0
+            self.config['crypto_stealth'] = 0
+
+        for i in xrange(self.len_pieces):
+            if self.storagewrapper.do_I_have(i):
+                self.picker.complete(i)
+        self.upmeasure = Measure(self.config['max_rate_period'],
+                            self.config['upload_rate_fudge'])
+        self.downmeasure = Measure(self.config['max_rate_period'])
+
+        if ratelimiter:
+            self.ratelimiter = ratelimiter
+        else:
+            self.ratelimiter = RateLimiter(self.rawserver.add_task,
+                                           self.config['upload_unit_size'],
+                                           self.setConns)
+            self.ratelimiter.set_upload_rate(self.config['max_upload_rate'])
+        
+        self.ratemeasure = RateMeasure()
+        self.ratemeasure_datarejected = self.ratemeasure.data_rejected
+
+        self.downloader = Downloader(self.storagewrapper, self.picker,
+            self.config['request_backlog'], self.config['max_rate_period'],
+            self.len_pieces, self.config['download_slice_size'],
+            self._received_data, self.config['snub_time'], self.config['auto_kick'],
+            self._kick_peer, self._ban_peer)
+        self.downloader.set_download_rate(self.config['max_download_rate'])
+        self.connecter = Connecter(self._make_upload, self.downloader, self.choker,
+                            self.len_pieces, self.upmeasure, self.config,
+                            self.ratelimiter, self.rawserver.add_task)
+        self.encoder = Encoder(self.connecter, self.rawserver,
+            self.myid, self.config['max_message_length'], self.rawserver.add_task,
+            self.config['keepalive_interval'], self.infohash,
+            self._received_raw_data, self.config)
+        self.encoder_ban = self.encoder.ban
+
+        self.httpdownloader = HTTPDownloader(self.storagewrapper, self.picker,
+            self.rawserver, self.finflag, self.errorfunc, self.downloader,
+            self.config['max_rate_period'], self.infohash, self._received_http_data,
+            self.connecter.got_piece)
+        if self.response.has_key('httpseeds') and not self.finflag.isSet():
+            for u in self.response['httpseeds']:
+                self.httpdownloader.make_download(u)
+
+        if self.selector_enabled:
+            self.fileselector.tie_in(self.picker, self._cancelfunc,
+                    self._reqmorefunc, self.rerequest_ondownloadmore)
+            if self.priority:
+                self.fileselector.set_priorities_now(self.priority)
+            self.appdataobj.deleteTorrentData(self.infohash)
+                                # erase old data once you've started modifying it
+
+        if self.config['super_seeder']:
+            self.set_super_seed()
+
+        self.started = True
+        return True
+
+
+    def rerequest_complete(self):
+        if self.rerequest:
+            self.rerequest.announce(1)
+
+    def rerequest_stopped(self):
+        if self.rerequest:
+            self.rerequest.announce(2)
+
+    def rerequest_lastfailed(self):
+        if self.rerequest:
+            return self.rerequest.last_failed
+        return False
+
+    def rerequest_ondownloadmore(self):
+        if self.rerequest:
+            self.rerequest.hit()
+
+    def startRerequester(self, seededfunc = None, force_rapid_update = False):
+        if self.response.has_key('announce-list'):
+            trackerlist = self.response['announce-list']
+        else:
+            trackerlist = [[self.response['announce']]]
+
+        self.rerequest = Rerequester(self.port, self.myid, self.infohash, 
+            trackerlist, self.config, 
+            self.rawserver.add_task, self.rawserver.add_task,
+            self.errorfunc, self.excfunc,
+            self.encoder.start_connections,
+            self.connecter.how_many_connections, 
+            self.storagewrapper.get_amount_left, 
+            self.upmeasure.get_total, self.downmeasure.get_total,
+            self.upmeasure.get_rate, self.downmeasure.get_rate,
+            self.doneflag, self.unpauseflag, seededfunc, force_rapid_update )
+
+        self.rerequest.start()
+
+
+    def _init_stats(self):
+        self.statistics = Statistics(self.upmeasure, self.downmeasure,
+                    self.connecter, self.httpdownloader, self.ratelimiter,
+                    self.rerequest_lastfailed, self.filedatflag)
+        if self.info.has_key('files'):
+            self.statistics.set_dirstats(self.files, self.info['piece length'])
+        if self.config['spew']:
+            self.spewflag.set()
+
+    def autoStats(self, displayfunc = None):
+        if not displayfunc:
+            displayfunc = self.statusfunc
+
+        self._init_stats()
+        DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task,
+            self.upmeasure.get_rate, self.downmeasure.get_rate,
+            self.ratemeasure, self.storagewrapper.get_stats,
+            self.datalength, self.finflag, self.spewflag, self.statistics,
+            displayfunc, self.config['display_interval'])
+
+    def startStats(self):
+        self._init_stats()
+        d = DownloaderFeedback(self.choker, self.httpdownloader, self.rawserver.add_task,
+            self.upmeasure.get_rate, self.downmeasure.get_rate,
+            self.ratemeasure, self.storagewrapper.get_stats,
+            self.datalength, self.finflag, self.spewflag, self.statistics)
+        return d.gather
+
+
+    def getPortHandler(self):
+        return self.encoder
+
+
+    def shutdown(self, torrentdata = {}):
+        if self.checking or self.started:
+            self.storagewrapper.sync()
+            self.storage.close()
+            self.rerequest_stopped()
+        if self.fileselector and self.started:
+            if not self.failed:
+                self.fileselector.finish()
+                torrentdata['resume data'] = self.fileselector.pickle()
+            try:
+                self.appdataobj.writeTorrentData(self.infohash,torrentdata)
+            except:
+                self.appdataobj.deleteTorrentData(self.infohash) # clear it
+        return not self.failed and not self.excflag.isSet()
+        # if returns false, you may wish to auto-restart the torrent
+
+
+    def setUploadRate(self, rate):
+        try:
+            def s(self = self, rate = rate):
+                self.config['max_upload_rate'] = rate
+                self.ratelimiter.set_upload_rate(rate)
+            self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+
+    def setConns(self, conns, conns2 = None):
+        if not conns2:
+            conns2 = conns
+        try:
+            def s(self = self, conns = conns, conns2 = conns2):
+                self.config['min_uploads'] = conns
+                self.config['max_uploads'] = conns2
+                if (conns > 30):
+                    self.config['max_initiate'] = conns + 10
+            self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+        
+    def setDownloadRate(self, rate):
+        try:
+            def s(self = self, rate = rate):
+                self.config['max_download_rate'] = rate
+                self.downloader.set_download_rate(rate)
+            self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+
+    def startConnection(self, ip, port, id):
+        self.encoder._start_connection((ip, port), id)
+      
+    def _startConnection(self, ipandport, id):
+        self.encoder._start_connection(ipandport, id)
+        
+    def setInitiate(self, initiate):
+        try:
+            def s(self = self, initiate = initiate):
+                self.config['max_initiate'] = initiate
+            self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+
+    def getConfig(self):
+        return self.config
+
+    def getDefaults(self):
+        return defaultargs(defaults)
+
+    def getUsageText(self):
+        return self.argslistheader
+
+    def reannounce(self, special = None):
+        try:
+            def r(self = self, special = special):
+                if special is None:
+                    self.rerequest.announce()
+                else:
+                    self.rerequest.announce(specialurl = special)
+            self.rawserver.add_task(r)
+        except AttributeError:
+            pass
+
+    def getResponse(self):
+        try:
+            return self.response
+        except:
+            return None
+
+    def Pause(self):
+        if not self.storagewrapper:
+            return False
+        self.unpauseflag.clear()
+        self.rawserver.add_task(self.onPause)
+        return True
+
+    def onPause(self):
+        self.whenpaused = clock()
+        if not self.downloader:
+            return
+        self.downloader.pause(True)
+        self.encoder.pause(True)
+        self.choker.pause(True)
+    
+    def Unpause(self):
+        self.unpauseflag.set()
+        self.rawserver.add_task(self.onUnpause)
+
+    def onUnpause(self):
+        if not self.downloader:
+            return
+        self.downloader.pause(False)
+        self.encoder.pause(False)
+        self.choker.pause(False)
+        if self.rerequest and self.whenpaused and clock()-self.whenpaused > 60:
+            self.rerequest.announce(3)      # rerequest automatically if paused for >60 seconds
+
+    def set_super_seed(self):
+        try:
+            self.superseedflag.set()
+            def s(self = self):
+                if self.finflag.isSet():
+                    self._set_super_seed()
+            self.rawserver.add_task(s)
+        except AttributeError:
+            pass
+
+    def _set_super_seed(self):
+        if not self.super_seeding_active:
+            self.super_seeding_active = True
+            self.errorfunc('        ** SUPER-SEED OPERATION ACTIVE **\n' +
+                           '  please set Max uploads so each peer gets 6-8 kB/s')
+            def s(self = self):
+                self.downloader.set_super_seed()
+                self.choker.set_super_seed()
+            self.rawserver.add_task(s)
+            if self.finflag.isSet():        # mode started when already finished
+                def r(self = self):
+                    self.rerequest.announce(3)  # so after kicking everyone off, reannounce
+                self.rawserver.add_task(r)
+
+    def am_I_finished(self):
+        return self.finflag.isSet()
+
+    def get_transfer_stats(self):
+        return self.upmeasure.get_total(), self.downmeasure.get_total()
+    

Added: debtorrent/branches/upstream/current/BitTornado/inifile.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/inifile.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/inifile.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/inifile.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,169 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+'''
+reads/writes a Windows-style INI file
+format:
+
+  aa = "bb"
+  cc = 11
+
+  [eee]
+  ff = "gg"
+
+decodes to:
+d = { '': {'aa':'bb','cc':'11'}, 'eee': {'ff':'gg'} }
+
+the encoder can also take this as input:
+
+d = { 'aa': 'bb, 'cc': 11, 'eee': {'ff':'gg'} }
+
+though it will only decode in the above format.  Keywords must be strings.
+Values that are strings are written surrounded by quotes, and the decoding
+routine automatically strips any.
+Booleans are written as integers.  Anything else aside from string/int/float
+may have unpredictable results.
+'''
+
+from cStringIO import StringIO
+from traceback import print_exc
+from types import DictType, StringType
+try:
+    from types import BooleanType
+except ImportError:
+    BooleanType = None
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+def ini_write(f, d, comment=''):
+    try:
+        a = {'':{}}
+        for k,v in d.items():
+            assert type(k) == StringType
+            k = k.lower()
+            if type(v) == DictType:
+                if DEBUG:
+                    print 'new section:' +k
+                if k:
+                    assert not a.has_key(k)
+                    a[k] = {}
+                aa = a[k]
+                for kk,vv in v:
+                    assert type(kk) == StringType
+                    kk = kk.lower()
+                    assert not aa.has_key(kk)
+                    if type(vv) == BooleanType:
+                        vv = int(vv)
+                    if type(vv) == StringType:
+                        vv = '"'+vv+'"'
+                    aa[kk] = str(vv)
+                    if DEBUG:
+                        print 'a['+k+']['+kk+'] = '+str(vv)
+            else:
+                aa = a['']
+                assert not aa.has_key(k)
+                if type(v) == BooleanType:
+                    v = int(v)
+                if type(v) == StringType:
+                    v = '"'+v+'"'
+                aa[k] = str(v)
+                if DEBUG:
+                    print 'a[\'\']['+k+'] = '+str(v)
+        r = open(f,'w')
+        if comment:
+            for c in comment.split('\n'):
+                r.write('# '+c+'\n')
+            r.write('\n')
+        l = a.keys()
+        l.sort()
+        for k in l:
+            if k:
+                r.write('\n['+k+']\n')
+            aa = a[k]
+            ll = aa.keys()
+            ll.sort()
+            for kk in ll:
+                r.write(kk+' = '+aa[kk]+'\n')
+        success = True
+    except:
+        if DEBUG:
+            print_exc()
+        success = False
+    try:
+        r.close()
+    except:
+        pass
+    return success
+
+
+if DEBUG:
+    def errfunc(lineno, line, err):
+        print '('+str(lineno)+') '+err+': '+line
+else:
+    errfunc = lambda lineno, line, err: None
+
+def ini_read(f, errfunc = errfunc):
+    try:
+        r = open(f,'r')
+        ll = r.readlines()
+        d = {}
+        dd = {'':d}
+        for i in xrange(len(ll)):
+            l = ll[i]
+            l = l.strip()
+            if not l:
+                continue
+            if l[0] == '#':
+                continue
+            if l[0] == '[':
+                if l[-1] != ']':
+                    errfunc(i,l,'syntax error')
+                    continue
+                l1 = l[1:-1].strip().lower()
+                if not l1:
+                    errfunc(i,l,'syntax error')
+                    continue
+                if dd.has_key(l1):
+                    errfunc(i,l,'duplicate section')
+                    d = dd[l1]
+                    continue
+                d = {}
+                dd[l1] = d
+                continue
+            try:
+                k,v = l.split('=',1)
+            except:
+                try:
+                    k,v = l.split(':',1)
+                except:
+                    errfunc(i,l,'syntax error')
+                    continue
+            k = k.strip().lower()
+            v = v.strip()
+            if len(v) > 1 and ( (v[0] == '"' and v[-1] == '"') or
+                                (v[0] == "'" and v[-1] == "'") ):
+                v = v[1:-1]
+            if not k:
+                errfunc(i,l,'syntax error')
+                continue
+            if d.has_key(k):
+                errfunc(i,l,'duplicate entry')
+                continue
+            d[k] = v
+        if DEBUG:
+            print dd
+    except:
+        if DEBUG:
+            print_exc()
+        dd = None
+    try:
+        r.close()
+    except:
+        pass
+    return dd

Added: debtorrent/branches/upstream/current/BitTornado/iprangeparse.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/iprangeparse.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/iprangeparse.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/iprangeparse.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,259 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from bisect import bisect, insort
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    bool = lambda x: not not x
+
+
+def to_long_ipv4(ip):
+    ip = ip.split('.')
+    if len(ip) != 4:
+        raise ValueError, "bad address"
+    b = 0L
+    for n in ip:
+        b *= 256
+        b += int(n)
+    return b
+
+
+def to_long_ipv6(ip):
+    if ip == '':
+        raise ValueError, "bad address"
+    if ip == '::':      # boundary handling
+        ip = ''
+    elif ip[:2] == '::':
+        ip = ip[1:]
+    elif ip[0] == ':':
+        raise ValueError, "bad address"
+    elif ip[-2:] == '::':
+        ip = ip[:-1]
+    elif ip[-1] == ':':
+        raise ValueError, "bad address"
+
+    b = []
+    doublecolon = False
+    for n in ip.split(':'):
+        if n == '':     # double-colon
+            if doublecolon:
+                raise ValueError, "bad address"
+            doublecolon = True
+            b.append(None)
+            continue
+        if n.find('.') >= 0: # IPv4
+            n = n.split('.')
+            if len(n) != 4:
+                raise ValueError, "bad address"
+            for i in n:
+                b.append(int(i))
+            continue
+        n = ('0'*(4-len(n))) + n
+        b.append(int(n[:2],16))
+        b.append(int(n[2:],16))
+    bb = 0L
+    for n in b:
+        if n is None:
+            for i in xrange(17-len(b)):
+                bb *= 256
+            continue
+        bb *= 256
+        bb += n
+    return bb
+
+ipv4addrmask = 65535L*256*256*256*256
+
+class IP_List:
+    def __init__(self, entrylist=None):
+        self.ipv4list = []  # starts of ranges
+        self.ipv4dict = {}  # start: end of ranges
+        self.ipv6list = []  # "
+        self.ipv6dict = {}  # "
+
+        if entrylist:
+            l4 = []
+            l6 = []
+            for b,e in entrylist:
+                assert b <= e
+                if b.find(':') < 0:        # IPv4
+                    b = to_long_ipv4(b)
+                    e = to_long_ipv4(e)
+                    l4.append((b,e))
+                else:
+                    b = to_long_ipv6(b)
+                    e = to_long_ipv6(e)
+                    bb = b % (256*256*256*256)
+                    if bb == ipv4addrmask:
+                        b -= bb
+                        e -= bb
+                        l4.append((b,e))
+                    else:
+                        l6.append((b,e))
+            self._import_ipv4(l4)
+            self._import_ipv6(l6)
+
+    def __nonzero__(self):
+        return bool(self.ipv4list or self.ipv6list)
+
+
+    def append(self, ip_beg, ip_end = None):
+        if ip_end is None:
+            ip_end = ip_beg
+        else:
+            assert ip_beg <= ip_end
+        if ip_beg.find(':') < 0:        # IPv4
+            ip_beg = to_long_ipv4(ip_beg)
+            ip_end = to_long_ipv4(ip_end)
+            l = self.ipv4list
+            d = self.ipv4dict
+        else:
+            ip_beg = to_long_ipv6(ip_beg)
+            ip_end = to_long_ipv6(ip_end)
+            bb = ip_beg % (256*256*256*256)
+            if bb == ipv4addrmask:
+                ip_beg -= bb
+                ip_end -= bb
+                l = self.ipv4list
+                d = self.ipv4dict
+            else:
+                l = self.ipv6list
+                d = self.ipv6dict
+
+        p = bisect(l,ip_beg)-1
+        if p >= 0:
+            while p < len(l):
+                range_beg = l[p]
+                if range_beg > ip_end+1:
+                    done = True
+                    break
+                range_end = d[range_beg]
+                if range_end < ip_beg-1:
+                    p += 1
+                    if p == len(l):
+                        done = True
+                        break
+                    continue
+                # if neither of the above conditions is true, the ranges overlap
+                ip_beg = min(ip_beg, range_beg)
+                ip_end = max(ip_end, range_end)
+                del l[p]
+                del d[range_beg]
+                break
+
+        insort(l,ip_beg)
+        d[ip_beg] = ip_end
+
+
+    def _import_ipv4(self, entrylist):  #entrylist = sorted list of pairs of ipv4s converted to longs
+        assert not self.ipv4list
+        if not entrylist:
+            return
+        entrylist.sort()
+        l = []
+        b1,e1 = entrylist[0]
+        for b2,e2 in entrylist:
+            if e1+1 >= b2:
+                e1 = max(e1,e2)
+            else:
+                l.append((b1,e1))
+                b1 = b2
+                e1 = e2
+        l.append((b1,e1))
+        self.ipv4list = [b for b,e in l]
+        for b,e in l:
+            self.ipv4dict[b] = e
+
+    def _import_ipv6(self, entrylist):  #entrylist = sorted list of pairs of ipv6s converted to longs
+        assert not self.ipv6list
+        if not entrylist:
+            return
+        entrylist.sort()
+        l = []
+        b1,e1 = entrylist[0]
+        for b2,e2 in entrylist:
+            if e1+1 >= b2:
+                e1 = max(e1,e2)
+            else:
+                l.append((b1,e1))
+                b1 = b2
+                e1 = e2
+        l.append((b1,e1))
+        self.ipv6list = [b for b,e in l]
+        for b,e in l:
+            self.ipv6dict[b] = e
+
+
+    def includes(self, ip):
+        if not (self.ipv4list or self.ipv6list):
+            return False
+        if ip.find(':') < 0:        # IPv4
+            ip = to_long_ipv4(ip)
+            l = self.ipv4list
+            d = self.ipv4dict
+        else:
+            ip = to_long_ipv6(ip)
+            bb = ip % (256*256*256*256)
+            if bb == ipv4addrmask:
+                ip -= bb
+                l = self.ipv4list
+                d = self.ipv4dict
+            else:
+                l = self.ipv6list
+                d = self.ipv6dict
+        for ip_beg in l[bisect(l,ip)-1:]:
+            if ip == ip_beg:
+                return True
+            ip_end = d[ip_beg]
+            if ip > ip_beg and ip <= ip_end:
+                return True
+        return False
+
+
+    # reads a list from a file in the format 'whatever:whatever:ip-ip'
+    # (not IPv6 compatible at all)
+    def read_rangelist(self, file):
+        l = []
+        f = open(file, 'r')
+        while True:
+            line = f.readline()
+            if not line:
+                break
+            line = line.strip()
+            if not line or line[0] == '#':
+                continue
+            line = line.split(':')[-1]
+            try:
+                ip1,ip2 = line.split('-')
+            except:
+                ip1 = line
+                ip2 = line
+            try:
+                ip1 = to_long_ipv4(ip1)
+                ip2 = to_long_ipv4(ip2)
+                assert ip1 <= ip2
+            except:
+                print '*** WARNING *** could not parse IP range: '+line
+            l.append((ip1,ip2))
+        f.close()
+        self._import_ipv4(l)
+
+
+def is_ipv4(ip):
+    return ip.find(':') < 0
+
+def is_valid_ip(ip):
+    try:
+        if is_ipv4(ip):
+            a = ip.split('.')
+            assert len(a) == 4
+            for i in a:
+                chr(int(i))
+            return True
+        to_long_ipv6(ip)
+        return True
+    except:
+        return False

Added: debtorrent/branches/upstream/current/BitTornado/launchmanycore.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/launchmanycore.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/launchmanycore.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/launchmanycore.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,381 @@
+#!/usr/bin/env python
+
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from download_bt1 import BT1Download
+from RawServer import RawServer, UPnP_ERROR
+from RateLimiter import RateLimiter
+from ServerPortHandler import MultiHandler
+from parsedir import parsedir
+from natpunch import UPnP_test
+from random import seed
+from socket import error as socketerror
+from threading import Event
+from sys import argv, exit
+import sys, os
+from clock import clock
+from __init__ import createPeerID, mapbase64, version
+from cStringIO import StringIO
+from traceback import print_exc
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+
+def fmttime(n):
+    try:
+        n = int(n)  # n may be None or too large
+        assert n < 5184000  # 60 days
+    except:
+        return 'downloading'
+    m, s = divmod(n, 60)
+    h, m = divmod(m, 60)
+    return '%d:%02d:%02d' % (h, m, s)
+
+class SingleDownload:
+    def __init__(self, controller, hash, response, config, myid):
+        self.controller = controller
+        self.hash = hash
+        self.response = response
+        self.config = config
+        
+        self.doneflag = Event()
+        self.waiting = True
+        self.checking = False
+        self.working = False
+        self.seed = False
+        self.closed = False
+
+        self.status_msg = ''
+        self.status_err = ['']
+        self.status_errtime = 0
+        self.status_done = 0.0
+
+        self.rawserver = controller.handler.newRawServer(hash, self.doneflag)
+
+        d = BT1Download(self.display, self.finished, self.error,
+                        controller.exchandler, self.doneflag, config, response,
+                        hash, myid, self.rawserver, controller.listen_port)
+        self.d = d
+
+    def start(self):
+        if not self.d.saveAs(self.saveAs):
+            self._shutdown()
+            return
+        self._hashcheckfunc = self.d.initFiles()
+        if not self._hashcheckfunc:
+            self._shutdown()
+            return
+        self.controller.hashchecksched(self.hash)
+
+
+    def saveAs(self, name, length, saveas, isdir):
+        return self.controller.saveAs(self.hash, name, saveas, isdir)
+
+    def hashcheck_start(self, donefunc):
+        if self.is_dead():
+            self._shutdown()
+            return
+        self.waiting = False
+        self.checking = True
+        self._hashcheckfunc(donefunc)
+
+    def hashcheck_callback(self):
+        self.checking = False
+        if self.is_dead():
+            self._shutdown()
+            return
+        if not self.d.startEngine(ratelimiter = self.controller.ratelimiter):
+            self._shutdown()
+            return
+        self.d.startRerequester()
+        self.statsfunc = self.d.startStats()
+        self.rawserver.start_listening(self.d.getPortHandler())
+        self.working = True
+
+    def is_dead(self):
+        return self.doneflag.isSet()
+
+    def _shutdown(self):
+        self.shutdown(False)
+
+    def shutdown(self, quiet=True):
+        if self.closed:
+            return
+        self.doneflag.set()
+        self.rawserver.shutdown()
+        if self.checking or self.working:
+            self.d.shutdown()
+        self.waiting = False
+        self.checking = False
+        self.working = False
+        self.closed = True
+        self.controller.was_stopped(self.hash)
+        if not quiet:
+            self.controller.died(self.hash)
+            
+
+    def display(self, activity = None, fractionDone = None):
+        # really only used by StorageWrapper now
+        if activity:
+            self.status_msg = activity
+        if fractionDone is not None:
+            self.status_done = float(fractionDone)
+
+    def finished(self):
+        self.seed = True
+
+    def error(self, msg):
+        if self.doneflag.isSet():
+            self._shutdown()
+        self.status_err.append(msg)
+        self.status_errtime = clock()
+
+
+class LaunchMany:
+    def __init__(self, config, Output):
+        try:
+            self.config = config
+            self.Output = Output
+
+            self.torrent_dir = config['torrent_dir']
+            self.torrent_cache = {}
+            self.file_cache = {}
+            self.blocked_files = {}
+            self.scan_period = config['parse_dir_interval']
+            self.stats_period = config['display_interval']
+
+            self.torrent_list = []
+            self.downloads = {}
+            self.counter = 0
+            self.doneflag = Event()
+
+            self.hashcheck_queue = []
+            self.hashcheck_current = None
+            
+            self.rawserver = RawServer(self.doneflag, config['timeout_check_interval'],
+                              config['timeout'], ipv6_enable = config['ipv6_enabled'],
+                              failfunc = self.failed, errorfunc = self.exchandler)
+            upnp_type = UPnP_test(config['upnp_nat_access'])
+            while True:
+                try:
+                    self.listen_port = self.rawserver.find_and_bind(
+                                    config['minport'], config['maxport'], config['bind'],
+                                    ipv6_socket_style = config['ipv6_binds_v4'],
+                                    upnp = upnp_type, randomizer = config['random_port'])
+                    break
+                except socketerror, e:
+                    if upnp_type and e == UPnP_ERROR:
+                        self.Output.message('WARNING: COULD NOT FORWARD VIA UPnP')
+                        upnp_type = 0
+                        continue
+                    self.failed("Couldn't listen - " + str(e))
+                    return
+
+            self.ratelimiter = RateLimiter(self.rawserver.add_task,
+                                           config['upload_unit_size'])
+            self.ratelimiter.set_upload_rate(config['max_upload_rate'])
+
+            self.handler = MultiHandler(self.rawserver, self.doneflag, config)
+            seed(createPeerID())
+            self.rawserver.add_task(self.scan, 0)
+            self.rawserver.add_task(self.stats, 0)
+
+            self.handler.listen_forever()
+
+            self.Output.message('shutting down')
+            self.hashcheck_queue = []
+            for hash in self.torrent_list:
+                self.Output.message('dropped "'+self.torrent_cache[hash]['path']+'"')
+                self.downloads[hash].shutdown()
+            self.rawserver.shutdown()
+
+        except:
+            data = StringIO()
+            print_exc(file = data)
+            Output.exception(data.getvalue())
+
+
+    def scan(self):
+        self.rawserver.add_task(self.scan, self.scan_period)
+                                
+        r = parsedir(self.torrent_dir, self.torrent_cache,
+                     self.file_cache, self.blocked_files,
+                     return_metainfo = True, errfunc = self.Output.message)
+
+        ( self.torrent_cache, self.file_cache, self.blocked_files,
+            added, removed ) = r
+
+        for hash, data in removed.items():
+            self.Output.message('dropped "'+data['path']+'"')
+            self.remove(hash)
+        for hash, data in added.items():
+            self.Output.message('added "'+data['path']+'"')
+            self.add(hash, data)
+
+    def stats(self):            
+        self.rawserver.add_task(self.stats, self.stats_period)
+        data = []
+        for hash in self.torrent_list:
+            cache = self.torrent_cache[hash]
+            if self.config['display_path']:
+                name = cache['path']
+            else:
+                name = cache['name']
+            size = cache['length']
+            d = self.downloads[hash]
+            progress = '0.0%'
+            peers = 0
+            seeds = 0
+            seedsmsg = "S"
+            dist = 0.0
+            uprate = 0.0
+            dnrate = 0.0
+            upamt = 0
+            dnamt = 0
+            t = 0
+            if d.is_dead():
+                status = 'stopped'
+            elif d.waiting:
+                status = 'waiting for hash check'
+            elif d.checking:
+                status = d.status_msg
+                progress = '%.1f%%' % (d.status_done*100)
+            else:
+                stats = d.statsfunc()
+                s = stats['stats']
+                if d.seed:
+                    status = 'seeding'
+                    progress = '100.0%'
+                    seeds = s.numOldSeeds
+                    seedsmsg = "s"
+                    dist = s.numCopies
+                else:
+                    if s.numSeeds + s.numPeers:
+                        t = stats['time']
+                        if t == 0:  # unlikely
+                            t = 0.01
+                        status = fmttime(t)
+                    else:
+                        t = -1
+                        status = 'connecting to peers'
+                    progress = '%.1f%%' % (int(stats['frac']*1000)/10.0)
+                    seeds = s.numSeeds
+                    dist = s.numCopies2
+                    dnrate = stats['down']
+                peers = s.numPeers
+                uprate = stats['up']
+                upamt = s.upTotal
+                dnamt = s.downTotal
+                   
+            if d.is_dead() or d.status_errtime+300 > clock():
+                msg = d.status_err[-1]
+            else:
+                msg = ''
+
+            data.append(( name, status, progress, peers, seeds, seedsmsg, dist,
+                          uprate, dnrate, upamt, dnamt, size, t, msg ))
+        stop = self.Output.display(data)
+        if stop:
+            self.doneflag.set()
+
+    def remove(self, hash):
+        self.torrent_list.remove(hash)
+        self.downloads[hash].shutdown()
+        del self.downloads[hash]
+        
+    def add(self, hash, data):
+        c = self.counter
+        self.counter += 1
+        x = ''
+        for i in xrange(3):
+            x = mapbase64[c & 0x3F]+x
+            c >>= 6
+        peer_id = createPeerID(x)
+        d = SingleDownload(self, hash, data['metainfo'], self.config, peer_id)
+        self.torrent_list.append(hash)
+        self.downloads[hash] = d
+        d.start()
+
+
+    def saveAs(self, hash, name, saveas, isdir):
+        x = self.torrent_cache[hash]
+        style = self.config['saveas_style']
+        if style == 1 or style == 3:
+            if saveas:
+                saveas = os.path.join(saveas,x['file'][:-1-len(x['type'])])
+            else:
+                saveas = x['path'][:-1-len(x['type'])]
+            if style == 3:
+                if not os.path.isdir(saveas):
+                    try:
+                        os.mkdir(saveas)
+                    except:
+                        raise OSError("couldn't create directory for "+x['path']
+                                      +" ("+saveas+")")
+                if not isdir:
+                    saveas = os.path.join(saveas, name)
+        else:
+            if saveas:
+                saveas = os.path.join(saveas, name)
+            else:
+                saveas = os.path.join(os.path.split(x['path'])[0], name)
+                
+        if isdir and not os.path.isdir(saveas):
+            try:
+                os.mkdir(saveas)
+            except:
+                raise OSError("couldn't create directory for "+x['path']
+                                      +" ("+saveas+")")
+        return saveas
+
+
+    def hashchecksched(self, hash = None):
+        if hash:
+            self.hashcheck_queue.append(hash)
+        if not self.hashcheck_current:
+            self._hashcheck_start()
+
+    def _hashcheck_start(self):
+        self.hashcheck_current = self.hashcheck_queue.pop(0)
+        self.downloads[self.hashcheck_current].hashcheck_start(self.hashcheck_callback)
+
+    def hashcheck_callback(self):
+        self.downloads[self.hashcheck_current].hashcheck_callback()
+        if self.hashcheck_queue:
+            self._hashcheck_start()
+        else:
+            self.hashcheck_current = None
+
+    def died(self, hash):
+        if self.torrent_cache.has_key(hash):
+            self.Output.message('DIED: "'+self.torrent_cache[hash]['path']+'"')
+        
+    def was_stopped(self, hash):
+        try:
+            self.hashcheck_queue.remove(hash)
+        except:
+            pass
+        if self.hashcheck_current == hash:
+            self.hashcheck_current = None
+            if self.hashcheck_queue:
+                self._hashcheck_start()
+
+    def failed(self, s):
+        self.Output.message('FAILURE: '+s)
+
+    def exchandler(self, s):
+        self.Output.exception(s)

Added: debtorrent/branches/upstream/current/BitTornado/natpunch.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/natpunch.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/natpunch.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/natpunch.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,254 @@
+# Written by John Hoffman
+# derived from NATPortMapping.py by Yejun Yang
+# and from example code by Myers Carpenter
+# see LICENSE.txt for license information
+
+import socket
+from traceback import print_exc
+from subnetparse import IP_List
+from clock import clock
+from __init__ import createPeerID
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+DEBUG = False
+
+EXPIRE_CACHE = 30 # seconds
+ID = "BT-"+createPeerID()[-4:]
+
+try:
+    import pythoncom, win32com.client
+    _supported = 1
+except ImportError:
+    _supported = 0
+
+
+
+class _UPnP1:   # derived from Myers Carpenter's code
+                # seems to use the machine's local UPnP
+                # system for its operation.  Runs fairly fast
+
+    def __init__(self):
+        self.map = None
+        self.last_got_map = -10e10
+
+    def _get_map(self):
+        if self.last_got_map + EXPIRE_CACHE < clock():
+            try:
+                dispatcher = win32com.client.Dispatch("HNetCfg.NATUPnP")
+                self.map = dispatcher.StaticPortMappingCollection
+                self.last_got_map = clock()
+            except:
+                self.map = None
+        return self.map
+
+    def test(self):
+        try:
+            assert self._get_map()     # make sure a map was found
+            success = True
+        except:
+            success = False
+        return success
+
+
+    def open(self, ip, p):
+        map = self._get_map()
+        try:
+            map.Add(p,'TCP',p,ip,True,ID)
+            if DEBUG:
+                print 'port opened: '+ip+':'+str(p)
+            success = True
+        except:
+            if DEBUG:
+                print "COULDN'T OPEN "+str(p)
+                print_exc()
+            success = False
+        return success
+
+
+    def close(self, p):
+        map = self._get_map()
+        try:
+            map.Remove(p,'TCP')
+            success = True
+            if DEBUG:
+                print 'port closed: '+str(p)
+        except:
+            if DEBUG:
+                print 'ERROR CLOSING '+str(p)
+                print_exc()
+            success = False
+        return success
+
+
+    def clean(self, retry = False):
+        if not _supported:
+            return
+        try:
+            map = self._get_map()
+            ports_in_use = []
+            for i in xrange(len(map)):
+                try:
+                    mapping = map[i]
+                    port = mapping.ExternalPort
+                    prot = str(mapping.Protocol).lower()
+                    desc = str(mapping.Description).lower()
+                except:
+                    port = None
+                if port and prot == 'tcp' and desc[:3] == 'bt-':
+                    ports_in_use.append(port)
+            success = True
+            for port in ports_in_use:
+                try:
+                    map.Remove(port,'TCP')
+                except:
+                    success = False
+            if not success and not retry:
+                self.clean(retry = True)
+        except:
+            pass
+
+
+class _UPnP2:   # derived from Yejun Yang's code
+                # apparently does a direct search for UPnP hardware
+                # may work in some cases where _UPnP1 won't, but is slow
+                # still need to implement "clean" method
+
+    def __init__(self):
+        self.services = None
+        self.last_got_services = -10e10
+                           
+    def _get_services(self):
+        if not self.services or self.last_got_services + EXPIRE_CACHE < clock():
+            self.services = []
+            try:
+                f=win32com.client.Dispatch("UPnP.UPnPDeviceFinder")
+                for t in ( "urn:schemas-upnp-org:service:WANIPConnection:1",
+                           "urn:schemas-upnp-org:service:WANPPPConnection:1" ):
+                    try:
+                        conns = f.FindByType(t,0)
+                        for c in xrange(len(conns)):
+                            try:
+                                svcs = conns[c].Services
+                                for s in xrange(len(svcs)):
+                                    try:
+                                        self.services.append(svcs[s])
+                                    except:
+                                        pass
+                            except:
+                                pass
+                    except:
+                        pass
+            except:
+                pass
+            self.last_got_services = clock()
+        return self.services
+
+    def test(self):
+        try:
+            assert self._get_services()    # make sure some services can be found
+            success = True
+        except:
+            success = False
+        return success
+
+
+    def open(self, ip, p):
+        svcs = self._get_services()
+        success = False
+        for s in svcs:
+            try:
+                s.InvokeAction('AddPortMapping',['',p,'TCP',p,ip,True,ID,0],'')
+                success = True
+            except:
+                pass
+        if DEBUG and not success:
+            print "COULDN'T OPEN "+str(p)
+            print_exc()
+        return success
+
+
+    def close(self, p):
+        svcs = self._get_services()
+        success = False
+        for s in svcs:
+            try:
+                s.InvokeAction('DeletePortMapping', ['',p,'TCP'], '')
+                success = True
+            except:
+                pass
+        if DEBUG and not success:
+            print "COULDN'T OPEN "+str(p)
+            print_exc()
+        return success
+
+
+class _UPnP:    # master holding class
+    def __init__(self):
+        self.upnp1 = _UPnP1()
+        self.upnp2 = _UPnP2()
+        self.upnplist = (None, self.upnp1, self.upnp2)
+        self.upnp = None
+        self.local_ip = None
+        self.last_got_ip = -10e10
+        
+    def get_ip(self):
+        if self.last_got_ip + EXPIRE_CACHE < clock():
+            local_ips = IP_List()
+            local_ips.set_intranet_addresses()
+            try:
+                for info in socket.getaddrinfo(socket.gethostname(),0,socket.AF_INET):
+                            # exception if socket library isn't recent
+                    self.local_ip = info[4][0]
+                    if local_ips.includes(self.local_ip):
+                        self.last_got_ip = clock()
+                        if DEBUG:
+                            print 'Local IP found: '+self.local_ip
+                        break
+                else:
+                    raise ValueError('couldn\'t find intranet IP')
+            except:
+                self.local_ip = None
+                if DEBUG:
+                    print 'Error finding local IP'
+                    print_exc()
+        return self.local_ip
+
+    def test(self, upnp_type):
+        if DEBUG:
+            print 'testing UPnP type '+str(upnp_type)
+        if not upnp_type or not _supported or self.get_ip() is None:
+            if DEBUG:
+                print 'not supported'
+            return 0
+        pythoncom.CoInitialize()                # leave initialized
+        self.upnp = self.upnplist[upnp_type]    # cache this
+        if self.upnp.test():
+            if DEBUG:
+                print 'ok'
+            return upnp_type
+        if DEBUG:
+            print 'tested bad'
+        return 0
+
+    def open(self, p):
+        assert self.upnp, "must run UPnP_test() with the desired UPnP access type first"
+        return self.upnp.open(self.get_ip(), p)
+
+    def close(self, p):
+        assert self.upnp, "must run UPnP_test() with the desired UPnP access type first"
+        return self.upnp.close(p)
+
+    def clean(self):
+        return self.upnp1.clean()
+
+_upnp_ = _UPnP()
+
+UPnP_test = _upnp_.test
+UPnP_open_port = _upnp_.open
+UPnP_close_port = _upnp_.close
+UPnP_reset = _upnp_.clean
+

Added: debtorrent/branches/upstream/current/BitTornado/parseargs.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/parseargs.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/parseargs.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/parseargs.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,137 @@
+# Written by Bill Bumgarner and Bram Cohen
+# see LICENSE.txt for license information
+
+from types import *
+from cStringIO import StringIO
+
+
+def splitLine(line, COLS=80, indent=10):
+    indent = " " * indent
+    width = COLS - (len(indent) + 1)
+    if indent and width < 15:
+        width = COLS - 2
+        indent = " "
+    s = StringIO()
+    i = 0
+    for word in line.split():
+        if i == 0:
+            s.write(indent+word)
+            i = len(word)
+            continue
+        if i + len(word) >= width:
+            s.write('\n'+indent+word)
+            i = len(word)
+            continue
+        s.write(' '+word)
+        i += len(word) + 1
+    return s.getvalue()
+
+def formatDefinitions(options, COLS, presets = {}):
+    s = StringIO()
+    for (longname, default, doc) in options:
+        s.write('--' + longname + ' <arg>\n')
+        default = presets.get(longname, default)
+        if type(default) in (IntType, LongType):
+            try:
+                default = int(default)
+            except:
+                pass
+        if default is not None:
+            doc += ' (defaults to ' + repr(default) + ')'
+        s.write(splitLine(doc,COLS,10))
+        s.write('\n\n')
+    return s.getvalue()
+
+
+def usage(str):
+    raise ValueError(str)
+
+
+def defaultargs(options):
+    l = {}
+    for (longname, default, doc) in options:
+        if default is not None:
+            l[longname] = default
+    return l
+        
+
+def parseargs(argv, options, minargs = None, maxargs = None, presets = {}):
+    config = {}
+    longkeyed = {}
+    for option in options:
+        longname, default, doc = option
+        longkeyed[longname] = option
+        config[longname] = default
+    for longname in presets.keys():        # presets after defaults but before arguments
+        config[longname] = presets[longname]
+    options = []
+    args = []
+    pos = 0
+    while pos < len(argv):
+        if argv[pos][:2] != '--':
+            args.append(argv[pos])
+            pos += 1
+        else:
+            if pos == len(argv) - 1:
+                usage('parameter passed in at end with no value')
+            key, value = argv[pos][2:], argv[pos+1]
+            pos += 2
+            if not longkeyed.has_key(key):
+                usage('unknown key --' + key)
+            longname, default, doc = longkeyed[key]
+            try:
+                t = type(config[longname])
+                if t is NoneType or t is StringType:
+                    config[longname] = value
+                elif t in (IntType, LongType):
+                    config[longname] = long(value)
+                elif t is FloatType:
+                    config[longname] = float(value)
+                else:
+                    assert 0
+            except ValueError, e:
+                usage('wrong format of --%s - %s' % (key, str(e)))
+    for key, value in config.items():
+        if value is None:
+            usage("Option --%s is required." % key)
+    if minargs is not None and len(args) < minargs:
+        usage("Must supply at least %d args." % minargs)
+    if maxargs is not None and len(args) > maxargs:
+        usage("Too many args - %d max." % maxargs)
+    return (config, args)
+
+def test_parseargs():
+    assert parseargs(('d', '--a', 'pq', 'e', '--b', '3', '--c', '4.5', 'f'), (('a', 'x', ''), ('b', 1, ''), ('c', 2.3, ''))) == ({'a': 'pq', 'b': 3, 'c': 4.5}, ['d', 'e', 'f'])
+    assert parseargs([], [('a', 'x', '')]) == ({'a': 'x'}, [])
+    assert parseargs(['--a', 'x', '--a', 'y'], [('a', '', '')]) == ({'a': 'y'}, [])
+    try:
+        parseargs([], [('a', 'x', '')])
+    except ValueError:
+        pass
+    try:
+        parseargs(['--a', 'x'], [])
+    except ValueError:
+        pass
+    try:
+        parseargs(['--a'], [('a', 'x', '')])
+    except ValueError:
+        pass
+    try:
+        parseargs([], [], 1, 2)
+    except ValueError:
+        pass
+    assert parseargs(['x'], [], 1, 2) == ({}, ['x'])
+    assert parseargs(['x', 'y'], [], 1, 2) == ({}, ['x', 'y'])
+    try:
+        parseargs(['x', 'y', 'z'], [], 1, 2)
+    except ValueError:
+        pass
+    try:
+        parseargs(['--a', '2.0'], [('a', 3, '')])
+    except ValueError:
+        pass
+    try:
+        parseargs(['--a', 'z'], [('a', 2.1, '')])
+    except ValueError:
+        pass
+

Added: debtorrent/branches/upstream/current/BitTornado/parsedir.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/parsedir.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/parsedir.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/parsedir.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,150 @@
+# Written by John Hoffman and Uoti Urpala
+# see LICENSE.txt for license information
+from bencode import bencode, bdecode
+from BT1.btformats import check_info
+from os.path import exists, isfile
+from sha import sha
+import sys, os
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+NOISY = False
+
+def _errfunc(x):
+    print ":: "+x
+
+def parsedir(directory, parsed, files, blocked,
+             exts = ['.torrent'], return_metainfo = False, errfunc = _errfunc):
+    if NOISY:
+        errfunc('checking dir')
+    dirs_to_check = [directory]
+    new_files = {}
+    new_blocked = {}
+    torrent_type = {}
+    while dirs_to_check:    # first, recurse directories and gather torrents
+        directory = dirs_to_check.pop()
+        newtorrents = False
+        for f in os.listdir(directory):
+            newtorrent = None
+            for ext in exts:
+                if f.endswith(ext):
+                    newtorrent = ext[1:]
+                    break
+            if newtorrent:
+                newtorrents = True
+                p = os.path.join(directory, f)
+                new_files[p] = [(os.path.getmtime(p), os.path.getsize(p)), 0]
+                torrent_type[p] = newtorrent
+        if not newtorrents:
+            for f in os.listdir(directory):
+                p = os.path.join(directory, f)
+                if os.path.isdir(p):
+                    dirs_to_check.append(p)
+
+    new_parsed = {}
+    to_add = []
+    added = {}
+    removed = {}
+    # files[path] = [(modification_time, size), hash], hash is 0 if the file
+    # has not been successfully parsed
+    for p,v in new_files.items():   # re-add old items and check for changes
+        oldval = files.get(p)
+        if not oldval:          # new file
+            to_add.append(p)
+            continue
+        h = oldval[1]
+        if oldval[0] == v[0]:   # file is unchanged from last parse
+            if h:
+                if blocked.has_key(p):  # parseable + blocked means duplicate
+                    to_add.append(p)    # other duplicate may have gone away
+                else:
+                    new_parsed[h] = parsed[h]
+                new_files[p] = oldval
+            else:
+                new_blocked[p] = 1  # same broken unparseable file
+            continue
+        if parsed.has_key(h) and not blocked.has_key(p):
+            if NOISY:
+                errfunc('removing '+p+' (will re-add)')
+            removed[h] = parsed[h]
+        to_add.append(p)
+
+    to_add.sort()
+    for p in to_add:                # then, parse new and changed torrents
+        new_file = new_files[p]
+        v,h = new_file
+        if new_parsed.has_key(h): # duplicate
+            if not blocked.has_key(p) or files[p][0] != v:
+                errfunc('**warning** '+
+                    p +' is a duplicate torrent for '+new_parsed[h]['path'])
+            new_blocked[p] = 1
+            continue
+                
+        if NOISY:
+            errfunc('adding '+p)
+        try:
+            ff = open(p, 'rb')
+            d = bdecode(ff.read())
+            check_info(d['info'])
+            h = sha(bencode(d['info'])).digest()
+            new_file[1] = h
+            if new_parsed.has_key(h):
+                errfunc('**warning** '+
+                    p +' is a duplicate torrent for '+new_parsed[h]['path'])
+                new_blocked[p] = 1
+                continue
+
+            a = {}
+            a['path'] = p
+            f = os.path.basename(p)
+            a['file'] = f
+            a['type'] = torrent_type[p]
+            i = d['info']
+            l = 0
+            nf = 0
+            if i.has_key('length'):
+                l = i.get('length',0)
+                nf = 1
+            elif i.has_key('files'):
+                for li in i['files']:
+                    nf += 1
+                    if li.has_key('length'):
+                        l += li['length']
+            a['numfiles'] = nf
+            a['length'] = l
+            a['name'] = i.get('name', f)
+            def setkey(k, d = d, a = a):
+                if d.has_key(k):
+                    a[k] = d[k]
+            setkey('failure reason')
+            setkey('warning message')
+            setkey('announce-list')
+            if return_metainfo:
+                a['metainfo'] = d
+        except:
+            errfunc('**warning** '+p+' has errors')
+            new_blocked[p] = 1
+            continue
+        try:
+            ff.close()
+        except:
+            pass
+        if NOISY:
+            errfunc('... successful')
+        new_parsed[h] = a
+        added[h] = a
+
+    for p,v in files.items():       # and finally, mark removed torrents
+        if not new_files.has_key(p) and not blocked.has_key(p):
+            if NOISY:
+                errfunc('removing '+p)
+            removed[v[1]] = parsed[v[1]]
+
+    if NOISY:
+        errfunc('done checking')
+    return (new_parsed, new_files, new_blocked, added, removed)
+

Added: debtorrent/branches/upstream/current/BitTornado/piecebuffer.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/piecebuffer.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/piecebuffer.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/piecebuffer.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,86 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from array import array
+from threading import Lock
+# import inspect
+try:
+    True
+except:
+    True = 1
+    False = 0
+    
+DEBUG = False
+
+class SingleBuffer:
+    def __init__(self, pool):
+        self.pool = pool
+        self.buf = array('c')
+
+    def init(self):
+        if DEBUG:
+            print self.count
+            '''
+            for x in xrange(6,1,-1):
+                try:
+                    f = inspect.currentframe(x).f_code
+                    print (f.co_filename,f.co_firstlineno,f.co_name)
+                    del f
+                except:
+                    pass
+            print ''
+            '''
+        self.length = 0
+
+    def append(self, s):
+        l = self.length+len(s)
+        self.buf[self.length:l] = array('c',s)
+        self.length = l
+
+    def __len__(self):
+        return self.length
+
+    def __getslice__(self, a, b):
+        if b > self.length:
+            b = self.length
+        if b < 0:
+            b += self.length
+        if a == 0 and b == self.length and len(self.buf) == b:
+            return self.buf  # optimization
+        return self.buf[a:b]
+
+    def getarray(self):
+        return self.buf[:self.length]
+
+    def release(self):
+        if DEBUG:
+            print -self.count
+        self.pool.release(self)
+
+
+class BufferPool:
+    def __init__(self):
+        self.pool = []
+        self.lock = Lock()
+        if DEBUG:
+            self.count = 0
+
+    def new(self):
+        self.lock.acquire()
+        if self.pool:
+            x = self.pool.pop()
+        else:
+            x = SingleBuffer(self)
+            if DEBUG:
+                self.count += 1
+                x.count = self.count
+        x.init()
+        self.lock.release()
+        return x
+
+    def release(self, x):
+        self.pool.append(x)
+
+
+_pool = BufferPool()
+PieceBuffer = _pool.new

Added: debtorrent/branches/upstream/current/BitTornado/selectpoll.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/selectpoll.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/selectpoll.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/selectpoll.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,109 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from select import select, error
+from time import sleep
+from types import IntType
+from bisect import bisect
+POLLIN = 1
+POLLOUT = 2
+POLLERR = 8
+POLLHUP = 16
+
+class poll:
+    def __init__(self):
+        self.rlist = []
+        self.wlist = []
+        
+    def register(self, f, t):
+        if type(f) != IntType:
+            f = f.fileno()
+        if (t & POLLIN):
+            insert(self.rlist, f)
+        else:
+            remove(self.rlist, f)
+        if (t & POLLOUT):
+            insert(self.wlist, f)
+        else:
+            remove(self.wlist, f)
+
+    def unregister(self, f):
+        if type(f) != IntType:
+            f = f.fileno()
+        remove(self.rlist, f)
+        remove(self.wlist, f)
+
+    def poll(self, timeout = None):
+        if self.rlist or self.wlist:
+            try:
+                r, w, e = select(self.rlist, self.wlist, [], timeout)
+            except ValueError:
+                return None
+        else:
+            sleep(timeout)
+            return []
+        result = []
+        for s in r:
+            result.append((s, POLLIN))
+        for s in w:
+            result.append((s, POLLOUT))
+        return result
+
+def remove(list, item):
+    i = bisect(list, item)
+    if i > 0 and list[i-1] == item:
+        del list[i-1]
+
+def insert(list, item):
+    i = bisect(list, item)
+    if i == 0 or list[i-1] != item:
+        list.insert(i, item)
+
+def test_remove():
+    x = [2, 4, 6]
+    remove(x, 2)
+    assert x == [4, 6]
+    x = [2, 4, 6]
+    remove(x, 4)
+    assert x == [2, 6]
+    x = [2, 4, 6]
+    remove(x, 6)
+    assert x == [2, 4]
+    x = [2, 4, 6]
+    remove(x, 5)
+    assert x == [2, 4, 6]
+    x = [2, 4, 6]
+    remove(x, 1)
+    assert x == [2, 4, 6]
+    x = [2, 4, 6]
+    remove(x, 7)
+    assert x == [2, 4, 6]
+    x = [2, 4, 6]
+    remove(x, 5)
+    assert x == [2, 4, 6]
+    x = []
+    remove(x, 3)
+    assert x == []
+
+def test_insert():
+    x = [2, 4]
+    insert(x, 1)
+    assert x == [1, 2, 4]
+    x = [2, 4]
+    insert(x, 3)
+    assert x == [2, 3, 4]
+    x = [2, 4]
+    insert(x, 5)
+    assert x == [2, 4, 5]
+    x = [2, 4]
+    insert(x, 2)
+    assert x == [2, 4]
+    x = [2, 4]
+    insert(x, 4)
+    assert x == [2, 4]
+    x = [2, 3, 4]
+    insert(x, 3)
+    assert x == [2, 3, 4]
+    x = []
+    insert(x, 3)
+    assert x == [3]

Added: debtorrent/branches/upstream/current/BitTornado/subnetparse.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/subnetparse.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/subnetparse.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/subnetparse.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,236 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from bisect import bisect, insort
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+    bool = lambda x: not not x
+
+hexbinmap = {
+    '0': '0000',
+    '1': '0001',
+    '2': '0010',
+    '3': '0011',
+    '4': '0100',
+    '5': '0101',
+    '6': '0110',
+    '7': '0111',
+    '8': '1000',
+    '9': '1001',
+    'a': '1010',
+    'b': '1011',
+    'c': '1100',
+    'd': '1101',
+    'e': '1110',
+    'f': '1111',
+    'x': '0000',
+}
+
+chrbinmap = {}
+for n in xrange(256):
+    b = []
+    nn = n
+    for i in xrange(8):
+        if nn & 0x80:
+            b.append('1')
+        else:
+            b.append('0')
+        nn <<= 1
+    chrbinmap[n] = ''.join(b)
+
+
+def to_bitfield_ipv4(ip):
+    ip = ip.split('.')
+    if len(ip) != 4:
+        raise ValueError, "bad address"
+    b = []
+    for i in ip:
+        b.append(chrbinmap[int(i)])
+    return ''.join(b)
+
+def to_bitfield_ipv6(ip):
+    b = ''
+    doublecolon = False
+
+    if ip == '':
+        raise ValueError, "bad address"
+    if ip == '::':      # boundary handling
+        ip = ''
+    elif ip[:2] == '::':
+        ip = ip[1:]
+    elif ip[0] == ':':
+        raise ValueError, "bad address"
+    elif ip[-2:] == '::':
+        ip = ip[:-1]
+    elif ip[-1] == ':':
+        raise ValueError, "bad address"
+    for n in ip.split(':'):
+        if n == '':     # double-colon
+            if doublecolon:
+                raise ValueError, "bad address"
+            doublecolon = True
+            b += ':'
+            continue
+        if n.find('.') >= 0: # IPv4
+            n = to_bitfield_ipv4(n)
+            b += n + '0'*(32-len(n))
+            continue
+        n = ('x'*(4-len(n))) + n
+        for i in n:
+            b += hexbinmap[i]
+    if doublecolon:
+        pos = b.find(':')
+        b = b[:pos]+('0'*(129-len(b)))+b[pos+1:]
+    if len(b) != 128:   # always check size
+        raise ValueError, "bad address"
+    return b
+
+ipv4addrmask = to_bitfield_ipv6('::ffff:0:0')[:96]
+
+class IP_List:
+    def __init__(self, entrylist=None):
+        self.ipv4list = []
+        self.ipv6list = []
+        if entrylist:
+            for ip, depth in entrylist:
+                self._append(ip,depth)
+            self.ipv4list.sort()
+            self.ipv6list.sort()
+
+
+    def __nonzero__(self):
+        return bool(self.ipv4list or self.ipv6list)
+
+
+    def _append(self, ip, depth = 256):
+        if ip.find(':') < 0:        # IPv4
+            self.ipv4list.append(to_bitfield_ipv4(ip)[:depth])
+        else:
+            b = to_bitfield_ipv6(ip)
+            if b.startswith(ipv4addrmask):
+                self.ipv4list.append(b[96:][:depth-96])
+            else:
+                self.ipv6list.append(b[:depth])
+
+    def append(self, ip, depth = 256):
+        if ip.find(':') < 0:        # IPv4
+            insort(self.ipv4list,to_bitfield_ipv4(ip)[:depth])
+        else:
+            b = to_bitfield_ipv6(ip)
+            if b.startswith(ipv4addrmask):
+                insort(self.ipv4list,b[96:][:depth-96])
+            else:
+                insort(self.ipv6list,b[:depth])
+
+
+    def includes(self, ip):
+        if not (self.ipv4list or self.ipv6list):
+            return False
+        if ip.find(':') < 0:        # IPv4
+            b = to_bitfield_ipv4(ip)
+        else:
+            b = to_bitfield_ipv6(ip)
+            if b.startswith(ipv4addrmask):
+                b = b[96:]
+        if len(b) > 32:
+            l = self.ipv6list
+        else:
+            l = self.ipv4list
+        for map in l[bisect(l,b)-1:]:
+            if b.startswith(map):
+                return True
+            if map > b:
+                return False
+        return False
+
+
+    def read_fieldlist(self, file):   # reads a list from a file in the format 'ip/len <whatever>'
+        f = open(file, 'r')
+        while True:
+            line = f.readline()
+            if not line:
+                break
+            line = line.strip().expandtabs()
+            if not line or line[0] == '#':
+                continue
+            try:
+                line, garbage = line.split(' ',1)
+            except:
+                pass
+            try:
+                line, garbage = line.split('#',1)
+            except:
+                pass
+            try:
+                ip, depth = line.split('/')
+            except:
+                ip = line
+                depth = None
+            try:
+                if depth is not None:                
+                    depth = int(depth)
+                self._append(ip,depth)
+            except:
+                print '*** WARNING *** could not parse IP range: '+line
+        f.close()
+        self.ipv4list.sort()
+        self.ipv6list.sort()
+
+
+    def set_intranet_addresses(self):
+        self.append('127.0.0.1',8)
+        self.append('10.0.0.0',8)
+        self.append('172.16.0.0',12)
+        self.append('192.168.0.0',16)
+        self.append('169.254.0.0',16)
+        self.append('::1')
+        self.append('fe80::',16)
+        self.append('fec0::',16)
+
+    def set_ipv4_addresses(self):
+        self.append('::ffff:0:0',96)
+
+def ipv6_to_ipv4(ip):
+    ip = to_bitfield_ipv6(ip)
+    if not ip.startswith(ipv4addrmask):
+        raise ValueError, "not convertible to IPv4"
+    ip = ip[-32:]
+    x = ''
+    for i in range(4):
+        x += str(int(ip[:8],2))
+        if i < 3:
+            x += '.'
+        ip = ip[8:]
+    return x
+
+def to_ipv4(ip):
+    if is_ipv4(ip):
+        _valid_ipv4(ip)
+        return ip
+    return ipv6_to_ipv4(ip)
+
+def is_ipv4(ip):
+    return ip.find(':') < 0
+
+def _valid_ipv4(ip):
+    ip = ip.split('.')
+    if len(ip) != 4:
+        raise ValueError
+    for i in ip:
+        chr(int(i))
+
+def is_valid_ip(ip):
+    try:
+        if not ip:
+            return False
+        if is_ipv4(ip):
+            _valid_ipv4(ip)
+            return True
+        to_bitfield_ipv6(ip)
+        return True
+    except:
+        return False

Added: debtorrent/branches/upstream/current/BitTornado/torrentlistparse.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/torrentlistparse.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/torrentlistparse.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/torrentlistparse.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,38 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from binascii import unhexlify
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+
+# parses a list of torrent hashes, in the format of one hash per line in hex format
+
+def parsetorrentlist(filename, parsed):
+    new_parsed = {}
+    added = {}
+    removed = parsed
+    f = open(filename, 'r')
+    while True:
+        l = f.readline()
+        if not l:
+            break
+        l = l.strip()
+        try:
+            if len(l) != 40:
+                raise ValueError, 'bad line'
+            h = unhexlify(l)
+        except:
+            print '*** WARNING *** could not parse line in torrent list: '+l
+        if parsed.has_key(h):
+            del removed[h]
+        else:
+            added[h] = True
+        new_parsed[h] = True
+    f.close()
+    return (new_parsed, added, removed)
+

Added: debtorrent/branches/upstream/current/BitTornado/zurllib.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/BitTornado/zurllib.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/BitTornado/zurllib.py (added)
+++ debtorrent/branches/upstream/current/BitTornado/zurllib.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,100 @@
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from httplib import HTTPConnection, HTTPSConnection, HTTPException
+from urlparse import urlparse
+from bencode import bdecode
+import socket
+from gzip import GzipFile
+from StringIO import StringIO
+from urllib import quote, unquote
+from __init__ import product_name, version_short
+
+VERSION = product_name+'/'+version_short
+MAX_REDIRECTS = 10
+
+
+class btHTTPcon(HTTPConnection): # attempt to add automatic connection timeout
+    def connect(self):
+        HTTPConnection.connect(self)
+        try:
+            self.sock.settimeout(30)
+        except:
+            pass
+
+class btHTTPScon(HTTPSConnection): # attempt to add automatic connection timeout
+    def connect(self):
+        HTTPSConnection.connect(self)
+        try:
+            self.sock.settimeout(30)
+        except:
+            pass 
+
+class urlopen:
+    def __init__(self, url):
+        self.tries = 0
+        self._open(url.strip())
+        self.error_return = None
+
+    def _open(self, url):
+        self.tries += 1
+        if self.tries > MAX_REDIRECTS:
+            raise IOError, ('http error', 500,
+                            "Internal Server Error: Redirect Recursion")
+        (scheme, netloc, path, pars, query, fragment) = urlparse(url)
+        if scheme != 'http' and scheme != 'https':
+            raise IOError, ('url error', 'unknown url type', scheme, url)
+        url = path
+        if pars:
+            url += ';'+pars
+        if query:
+            url += '?'+query
+#        if fragment:
+        try:
+            if scheme == 'http':
+                self.connection = btHTTPcon(netloc)
+            else:
+                self.connection = btHTTPScon(netloc)
+            self.connection.request('GET', url, None,
+                                { 'User-Agent': VERSION,
+                                  'Accept-Encoding': 'gzip' } )
+            self.response = self.connection.getresponse()
+        except HTTPException, e:
+            raise IOError, ('http error', str(e))
+        status = self.response.status
+        if status in (301,302):
+            try:
+                self.connection.close()
+            except:
+                pass
+            self._open(self.response.getheader('Location'))
+            return
+        if status != 200:
+            try:
+                data = self._read()
+                d = bdecode(data)
+                if d.has_key('failure reason'):
+                    self.error_return = data
+                    return
+            except:
+                pass
+            raise IOError, ('http error', status, self.response.reason)
+
+    def read(self):
+        if self.error_return:
+            return self.error_return
+        return self._read()
+
+    def _read(self):
+        data = self.response.read()
+        if self.response.getheader('Content-Encoding','').find('gzip') >= 0:
+            try:
+                compressed = StringIO(data)
+                f = GzipFile(fileobj = compressed)
+                data = f.read()
+            except:
+                raise IOError, ('http error', 'got corrupt response')
+        return data
+
+    def close(self):
+        self.connection.close()

Added: debtorrent/branches/upstream/current/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,33 @@
+D/BitTornado////
+D/docs////
+D/icons////
+D/multitracker////
+D/targets////
+D/thosts////
+D/test////
+/.cvsignore/1.1/Sat Dec 23 20:20:48 2006//
+/LICENSE.txt/1.1/Sat Dec 23 20:20:48 2006//
+/README.txt/1.1/Sat Dec 23 20:20:48 2006//
+/bittorrent.nsi/1.32/Sat Dec 23 20:20:48 2006//
+/bt-t-make.py/1.6/Sat Dec 23 20:20:50 2006//
+/bt_MakeCreateIcons.py/1.5/Sat Dec 23 20:20:50 2006//
+/btcompletedir.py/1.7/Sat Dec 23 20:20:50 2006//
+/btcompletedirgui.py/1.6/Sat Dec 23 20:20:50 2006//
+/btcopyannounce.py/1.2/Sat Dec 23 20:20:50 2006//
+/btdownloadcurses.py/1.40/Sat Dec 23 20:20:51 2006//
+/btdownloadgui.py/1.90/Sat Dec 23 20:20:54 2006//
+/btdownloadheadless.py/1.26/Sat Dec 23 20:20:54 2006//
+/btlaunchmany.py/1.25/Sat Dec 23 20:20:54 2006//
+/btlaunchmanycurses.py/1.33/Sat Dec 23 20:20:54 2006//
+/btmakemetafile.py/1.5/Sat Dec 23 20:20:54 2006//
+/btmaketorrentgui.py/1.7/Sat Dec 23 20:20:54 2006//
+/btreannounce.py/1.2/Sat Dec 23 20:20:55 2006//
+/btrename.py/1.2/Sat Dec 23 20:20:55 2006//
+/btsethttpseeds.py/1.2/Sat Dec 23 20:20:55 2006//
+/btshowmetainfo.py/1.2/Sat Dec 23 20:20:55 2006//
+/bttrack.py/1.4/Sat Dec 23 20:20:55 2006//
+/completedir.nsi/1.1/Sat Dec 23 20:20:55 2006//
+/ipranges.portugal.txt/1.1/Sat Dec 23 20:20:55 2006//
+/setup.py/1.7/Sat Dec 23 20:20:55 2006//
+/wincompletedirsetup.py/1.1/Sat Dec 23 20:20:55 2006//
+/winsetup.py/1.2/Sat Dec 23 20:20:55 2006//

Added: debtorrent/branches/upstream/current/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,33 @@
+D/BitTornado///////
+D/docs///////
+D/icons///////
+D/multitracker///////
+D/targets///////
+D/thosts///////
+D/test///////
+/.cvsignore////*///
+/LICENSE.txt////*///
+/README.txt////*///
+/bittorrent.nsi////*///
+/bt-t-make.py////*///
+/bt_MakeCreateIcons.py////*///
+/btcompletedir.py////*///
+/btcompletedirgui.py////*///
+/btcopyannounce.py////*///
+/btdownloadcurses.py////*///
+/btdownloadgui.py////*///
+/btdownloadheadless.py////*///
+/btlaunchmany.py////*///
+/btlaunchmanycurses.py////*///
+/btmakemetafile.py////*///
+/btmaketorrentgui.py////*///
+/btreannounce.py////*///
+/btrename.py////*///
+/btsethttpseeds.py////*///
+/btshowmetainfo.py////*///
+/bttrack.py////*///
+/completedir.nsi////*///
+/ipranges.portugal.txt////*///
+/setup.py////*///
+/wincompletedirsetup.py////*///
+/winsetup.py////*///

Added: debtorrent/branches/upstream/current/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/CVS/Entries.Extra.Old (added)
+++ debtorrent/branches/upstream/current/CVS/Entries.Extra.Old Sat Apr 14 18:47:18 2007
@@ -1,0 +1,33 @@
+D/BitTornado///////
+D/docs///////
+D/icons///////
+D/multitracker///////
+D/targets///////
+D/thosts///////
+/.cvsignore////*///
+/LICENSE.txt////*///
+/README.txt////*///
+/bt-t-make.py////*///
+/bt_MakeCreateIcons.py////*///
+/btcompletedir.py////*///
+/btcompletedirgui.py////*///
+/btcopyannounce.py////*///
+/btdownloadcurses.py////*///
+/btdownloadgui.py////*///
+/btdownloadheadless.py////*///
+/btlaunchmany.py////*///
+/btmakemetafile.py////*///
+/btmaketorrentgui.py////*///
+/btreannounce.py////*///
+/btrename.py////*///
+/btsethttpseeds.py////*///
+/btshowmetainfo.py////*///
+/bttrack.py////*///
+/completedir.nsi////*///
+/ipranges.portugal.txt////*///
+/setup.py////*///
+/wincompletedirsetup.py////*///
+/winsetup.py////*///
+/btlaunchmanycurses.py////*///
+/bittorrent.nsi////*///
+D/test///////

Added: debtorrent/branches/upstream/current/CVS/Entries.Log
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/CVS/Entries.Log?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/CVS/Entries.Log (added)
+++ debtorrent/branches/upstream/current/CVS/Entries.Log Sat Apr 14 18:47:18 2007
@@ -1,0 +1,6 @@
+A D/BitTornado////
+A D/docs////
+A D/icons////
+A D/targets////
+A D/test////
+A D/thosts////

Added: debtorrent/branches/upstream/current/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/CVS/Entries.Old?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/CVS/Entries.Old (added)
+++ debtorrent/branches/upstream/current/CVS/Entries.Old Sat Apr 14 18:47:18 2007
@@ -1,0 +1,33 @@
+D/BitTornado////
+D/docs////
+D/icons////
+D/multitracker////
+D/targets////
+D/thosts////
+/.cvsignore/1.1/Sun Feb 22 19:52:45 2004//
+/LICENSE.txt/1.1/Sun Feb 22 19:52:45 2004//
+/README.txt/1.1/Sun Feb 22 19:52:45 2004//
+/bt-t-make.py/1.6/Sun May  9 15:02:36 2004//
+/bt_MakeCreateIcons.py/1.5/Wed Jan  5 21:58:21 2005//
+/btcompletedir.py/1.7/Mon May 24 21:56:05 2004//
+/btcompletedirgui.py/1.6/Sun May  9 15:06:15 2004//
+/btcopyannounce.py/1.2/Tue Feb 24 17:46:17 2004//
+/btdownloadcurses.py/1.40/Mon Dec 13 03:59:52 2004//
+/btdownloadgui.py/1.90/Mon Mar  6 04:33:00 2006//
+/btdownloadheadless.py/1.26/Mon Dec 13 04:00:07 2004//
+/btlaunchmany.py/1.25/Tue Oct  5 18:12:51 2004//
+/btmakemetafile.py/1.5/Mon May 24 21:54:50 2004//
+/btmaketorrentgui.py/1.7/Fri Jul  2 18:39:07 2004//
+/btreannounce.py/1.2/Tue Feb 24 17:49:41 2004//
+/btrename.py/1.2/Tue Feb 24 17:49:57 2004//
+/btsethttpseeds.py/1.2/Tue Feb 24 17:50:07 2004//
+/btshowmetainfo.py/1.2/Tue Feb 24 17:50:23 2004//
+/bttrack.py/1.4/Fri Dec 31 19:53:53 2004//
+/completedir.nsi/1.1/Sun Feb 22 19:52:54 2004//
+/ipranges.portugal.txt/1.1/Sun Feb 22 19:52:55 2004//
+/setup.py/1.7/Wed Oct  6 19:50:08 2004//
+/wincompletedirsetup.py/1.1/Sun Feb 22 19:52:56 2004//
+/winsetup.py/1.2/Wed Mar 10 22:31:33 2004//
+/btlaunchmanycurses.py/1.33/Wed Dec 20 21:26:30 2006//
+/bittorrent.nsi/1.32/Sat Dec 23 04:32:58 2006//
+D/test////

Added: debtorrent/branches/upstream/current/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado

Added: debtorrent/branches/upstream/current/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/CVS/Root (added)
+++ debtorrent/branches/upstream/current/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/LICENSE.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/LICENSE.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/LICENSE.txt (added)
+++ debtorrent/branches/upstream/current/LICENSE.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,24 @@
+Unless otherwise noted, all files are released under the MIT
+license, exceptions contain licensing information in them.
+
+Copyright (C) 2001-2002 Bram Cohen
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation files
+(the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of the Software,
+and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+The Software is provided "AS IS", without warranty of any kind,
+express or implied, including but not limited to the warranties of
+merchantability,  fitness for a particular purpose and
+noninfringement. In no event shall the  authors or copyright holders
+be liable for any claim, damages or other liability, whether in an
+action of contract, tort or otherwise, arising from, out of or in
+connection with the Software or the use or other dealings in the
+Software.

Added: debtorrent/branches/upstream/current/README.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/README.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/README.txt (added)
+++ debtorrent/branches/upstream/current/README.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,110 @@
+BitTorrent is a tool for distributing files. It's extremely 
+easy to use - downloads are started by clicking on hyperlinks.
+Whenever more than one person is downloading at once 
+they send pieces of the file(s) to each other, thus relieving 
+the central server's bandwidth burden. Even with many 
+simultaneous downloads, the upload burden on the central server 
+remains quite small, since each new downloader introduces new 
+upload capacity.
+
+Windows web browser support is added by running an installer. 
+A prebuilt one is available, but instructions for building it 
+yourself are in BUILD.windows.txt
+
+Instructions for Unix installation are in INSTALL.unix.txt
+
+To start hosting -
+
+1) start running a tracker
+
+First, you need a tracker. If you're on a dynamic IP or otherwise 
+unreliable connection, you should find someone else's tracker and 
+use that. Otherwise, follow the rest of this step.
+
+Trackers refer downloaders to each other. The load on the tracker 
+is very small, so you only need one for all your files.
+
+To run a tracker, execute the command bttrack.py Here is an example -
+
+./bttrack.py --port 6969 --dfile dstate
+
+--dfile is where persistent information is kept on the tracker across 
+invocations. It makes everything start working again immediately if 
+you restart the tracker. A new one will be created if it doesn't exist 
+already.
+
+The tracker must be on a net-addressible box, and you must know the 
+ip number or dns name of it.
+
+The tracker outputs web logs to standard out. You can get information 
+about the files it's currently serving by getting its index page. 
+
+2) create a metainfo file using btmakemetafile.py
+
+To generate a metainfo file, run the publish btmakemetafile and give 
+it the file you want metainfo for and the url of the tracker
+
+./btmakemetafile.py http://my.tracker:6969/announce myfile.ext
+
+This will generate a file called myfile.ext.torrent
+
+Make sure to include the port number in the tracker url if it isn't 80.
+
+This command may take a while to scan over the whole file hashing it.
+
+The /announce path is special and hard-coded into the tracker. 
+Make sure to give the domain or ip your tracker is on instead of 
+my.tracker.
+
+You can use either a dns name or an IP address in the tracker url.
+
+3) associate .torrent with application/x-bittorrent on your web server
+
+The way you do this is dependent on the particular web server you're using.
+
+You must have a web server which can serve ordinary static files and is 
+addressable from the internet at large.
+
+4) put the newly made .torrent file on your web server
+
+Note that the file name you choose on the server must end in .torrent, so 
+it gets associated with the right mimetype.
+
+5) put up a static page which links to the location you uploaded to in step 4
+
+The file you uploaded in step 4 is linked to using an ordinary url.
+
+6) start a downloader as a resume on the complete file
+
+You have to run a downloader which already has the complete file, 
+so new downloaders have a place to get it from. Here's an example -
+
+./btdownloadheadless.py --url http://my.server/myfile.torrent --saveas myfile.ext
+
+Make sure the saveas argument points to the already complete file.
+
+If you're running the complete downloader on the same machine or LAN as 
+the tracker, give a --ip parameter to the complete downloader. The --ip 
+parameter can be either an IP address or DNS name.
+
+BitTorrent defaults to port 6881. If it can't use 6881, (probably because 
+another download is happening) it tries 6882, then 6883, etc. It gives up 
+after 6889.
+
+7) you're done!
+
+Now you just have to get people downloading! Refer them to the page you 
+created in step 5.
+
+BitTorrent can also publish whole directories - simply point 
+btmakemetafile.py at the directory with files in it, they'll be published 
+as one unit. All files in subdirectories will be included, although files 
+and directories named 'CVS' and 'core' are ignored.
+
+If you have any questions, try the web site or mailing list -
+
+http://bitconjurer.org/BitTorrent/
+
+http://groups.yahoo.com/group/BitTorrent
+
+You can also often find me, Bram, in #bittorrent of irc.freenode.net

Added: debtorrent/branches/upstream/current/bittorrent.nsi
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/bittorrent.nsi?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/bittorrent.nsi (added)
+++ debtorrent/branches/upstream/current/bittorrent.nsi Sat Apr 14 18:47:18 2007
@@ -1,0 +1,83 @@
+OutFile "BitTornado-0.3.18-w32install.exe"
+Name "BitTornado 0.3.18"
+SetCompressor lzma
+InstallDir "$PROGRAMFILES\BitTornado"
+Icon "icon_bt.ico"
+UninstallIcon "icon_done.ico"
+InstallDirRegKey  HKLM "Software\Microsoft\Windows\CurrentVersion\App Paths\btdownloadgui.exe" ""
+DirText "Setup will install BitTornado 0.3.18 in the following folder.$\r$\n$\r$\nTo install in a different folder, click Browse and select another folder."
+ShowInstDetails show
+ShowUnInstDetails show
+
+Section "MainGroup" SEC01
+  SetOutPath "$INSTDIR"
+  IfFileExists "$INSTDIR\_psyco.pyd" +1 +2
+  delete "$INSTDIR\_psyco.pyd"
+  SetOverwrite on
+  File "*.exe"
+  File "*.dll"
+  File "*.pyd"
+  File "library.zip"
+  CreateDirectory "$SMPROGRAMS\BitTornado"
+  CreateShortCut "$SMPROGRAMS\BitTornado\BitTornado.lnk" "$INSTDIR\btdownloadgui.exe"
+#  CreateShortCut "$DESKTOP\BitTornado.lnk" "$INSTDIR\btdownloadgui.exe"
+  CreateShortCut "$SMPROGRAMS\BitTornado\Uninstall.lnk" "$INSTDIR\uninst.exe"
+  SetOverwrite off
+SectionEnd
+
+Section -Post
+  WriteRegStr HKCR .torrent "" bittorrent
+  WriteRegStr HKCR .torrent "Content Type" application/x-bittorrent
+  WriteRegStr HKCR "MIME\Database\Content Type\application/x-bittorrent" Extension .torrent
+  WriteRegStr HKCR bittorrent "" "TORRENT File"
+  WriteRegBin HKCR bittorrent EditFlags 00000100
+  WriteRegStr HKCR "bittorrent\shell" "" open
+  WriteRegStr HKCR "bittorrent\shell\open\command" "" `"$INSTDIR\btdownloadgui.exe" --responsefile "%1"`
+
+  WriteUninstaller "$INSTDIR\uninst.exe"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\App Paths\btdownloadgui.exe" "" "$INSTDIR\btdownloadgui.exe"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\BitTornado" "DisplayName" "BitTornado 0.3.18"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\BitTornado" "UninstallString" "$INSTDIR\uninst.exe"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\BitTornado" "DisplayIcon" "$INSTDIR\btdownloadgui.exe"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\BitTornado" "DisplayVersion" "0.3.18"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\BitTornado" "URLInfoAbout" "http://www.bittornado.com/"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\BitTornado" "Publisher" "John Hoffman"
+SectionEnd
+
+
+Function un.onUninstSuccess
+  HideWindow
+  MessageBox MB_ICONINFORMATION|MB_OK "BitTornado 0.3.18 was successfully removed from your computer."
+FunctionEnd
+
+Function un.onInit
+  MessageBox MB_ICONQUESTION|MB_YESNO|MB_DEFBUTTON2 "Are you sure you want to completely remove BitTornado 0.3.18 and all of its components?" IDYES +2
+  Abort
+FunctionEnd
+
+Section Uninstall
+  Delete "$SMPROGRAMS\BitTornado\BitTornado.lnk"
+#  Delete "$DESKTOP\BitTornado.lnk"
+  Delete "$SMPROGRAMS\BitTornado\Uninstall.lnk"
+  RMDir "$SMPROGRAMS\BitTornado"
+#  DeleteRegKey HKCR software\bittorrent
+
+  push $1
+  ReadRegStr $1 HKCR "bittorrent\shell\open\command" ""
+  StrCmp $1 `"$INSTDIR\btdownloadgui.exe" --responsefile "%1"` 0 regnotempty
+  DeleteRegKey HKCR bittorrent\shell\open
+  DeleteRegKey /ifempty HKCR bittorrent\shell
+  DeleteRegKey /ifempty HKCR bittorrent
+  ReadRegStr $1 HKCR bittorrent\shell ""
+  StrCmp $1 "" 0 regnotempty
+  DeleteRegKey HKCR .torrent
+  DeleteRegKey HKCR "MIME\Database\Content Type\application/x-bittorrent"
+ regnotempty:
+  pop $1
+  RMDir /r "$INSTDIR"
+
+  DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\BitTornado"
+  DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\App Paths\btdownloadgui.exe"
+  SetAutoClose true
+SectionEnd
+

Added: debtorrent/branches/upstream/current/bt-t-make.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/bt-t-make.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/bt-t-make.py (added)
+++ debtorrent/branches/upstream/current/bt-t-make.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1063 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# modified for multitracker by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from sys import argv, platform, version
+assert version >= '2', "Install Python 2.0 or greater"
+from BitTornado.BT1.makemetafile import make_meta_file
+from threading import Event, Thread, Lock
+from BitTornado.bencode import bencode,bdecode
+import sys, os, shutil
+from os import getcwd, listdir
+from os.path import join, isdir
+from traceback import print_exc
+try:
+    from wxPython.wx import *
+except:
+    print 'wxPython is either not installed or has not been installed properly.'
+    sys.exit(1)
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+basepath=os.path.abspath(os.path.dirname(sys.argv[0]))
+
+if platform == 'win32':
+    DROP_HERE = '(drop here)'
+else:
+    DROP_HERE = ''
+
+
+wxEVT_INVOKE = wxNewEventType()
+
+def EVT_INVOKE(win, func):
+    win.Connect(-1, -1, wxEVT_INVOKE, func)
+
+class InvokeEvent(wxPyEvent):
+    def __init__(self, func, args, kwargs):
+        wxPyEvent.__init__(self)
+        self.SetEventType(wxEVT_INVOKE)
+        self.func = func
+        self.args = args
+        self.kwargs = kwargs
+
+
+class BasicDownloadInfo:
+    def __init__(self, config, calls):
+        self.config = config
+        self.calls = calls
+        
+        self.uiflag = Event()
+        self.cancelflag = Event()
+        self.switchlock = Lock()
+        self.working = False
+        self.queue = []
+        wxInitAllImageHandlers()
+        self.thostselection = self.calls['getCurrentTHost']()
+        self.thostselectnum = 0
+        self.choices = None
+        self.choices1 = None
+        self.announce = ''
+        self.announce_list = None
+
+        self.windowStyle = wxSYSTEM_MENU|wxCAPTION|wxMINIMIZE_BOX
+        if self.config['stayontop']:
+            self.windowStyle |= wxSTAY_ON_TOP
+        frame = wxFrame(None, -1, 'T-Make',
+                        size = wxSize(-1, -1),
+                        style = self.windowStyle)
+        self.frame = frame
+        panel = wxPanel(frame, -1)
+        mainSizer = wxBoxSizer(wxVERTICAL)
+        groupSizer = wxFlexGridSizer(cols = 1, vgap = 0, hgap = 0)
+#        self.dropTarget = self.calls['newDropTarget']((200,200))
+        self.dropTarget = self.calls['newDropTarget']()
+        self.dropTargetPtr = wxStaticBitmap(panel, -1, self.dropTarget)
+        self.calls['setDropTargetRefresh'](self.dropTargetPtr.Refresh)
+        self.dropTargetWidth = self.dropTarget.GetWidth()
+        EVT_LEFT_DOWN(self.dropTargetPtr,self.dropTargetClick)
+        EVT_ENTER_WINDOW(self.dropTargetPtr,self.calls['dropTargetHovered'])
+        EVT_LEAVE_WINDOW(self.dropTargetPtr,self.calls['dropTargetUnhovered'])
+        groupSizer.Add(self.dropTargetPtr,0,wxALIGN_CENTER)        
+        lowerSizer1 = wxGridSizer(cols = 6)
+        dirlink = wxStaticText(panel, -1, 'dir')
+        dirlink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+        dirlink.SetForegroundColour('blue')
+        EVT_LEFT_UP(dirlink,self.selectdir)
+        lowerSizer1.Add(dirlink, -1, wxALIGN_LEFT)
+        lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
+        lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
+        lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
+        lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
+        filelink = wxStaticText(panel, -1, 'file')
+        filelink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+        filelink.SetForegroundColour('blue')
+        EVT_LEFT_UP(filelink,self.selectfile)
+        lowerSizer1.Add(filelink, -1, wxALIGN_RIGHT)
+        
+        groupSizer.Add(lowerSizer1, -1, wxALIGN_CENTER)
+
+        self.gauge = wxGauge(panel, -1, range = 1000,
+                             style = wxGA_HORIZONTAL, size = (-1,15))
+        groupSizer.Add(self.gauge, 0, wxEXPAND)
+        self.statustext = wxStaticText(panel, -1, 'ready',
+                            style = wxALIGN_CENTER|wxST_NO_AUTORESIZE)
+        self.statustext.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxBOLD, False))
+        groupSizer.Add(self.statustext, -1, wxEXPAND)
+        self.choices = wxChoice(panel, -1, (-1, -1), (self.dropTargetWidth, -1),
+                                    choices = [])
+        self.choices.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, False))
+        EVT_CHOICE(self.choices, -1, self.set_thost)
+        groupSizer.Add(self.choices, 0, wxEXPAND)
+        cancellink = wxStaticText(panel, -1, 'cancel')
+        cancellink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+        cancellink.SetForegroundColour('red')
+        EVT_LEFT_UP(cancellink,self.cancel)
+        groupSizer.Add(cancellink, -1, wxALIGN_CENTER)
+        advlink = wxStaticText(panel, -1, 'advanced')
+        advlink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+        advlink.SetForegroundColour('blue')
+        EVT_LEFT_UP(advlink,self.calls['switchToAdvanced'])
+        groupSizer.Add(advlink, -1, wxALIGN_CENTER)
+        mainSizer.Add(groupSizer, 0, wxALIGN_CENTER)
+
+        self.refresh_thostlist()
+        self._set_thost()
+
+        if platform == 'win32':
+            self.dropTargetPtr.DragAcceptFiles(True)
+            EVT_DROP_FILES(self.dropTargetPtr, self.selectdrop)
+ 
+#        border = wxBoxSizer(wxHORIZONTAL)
+#        border.Add(mainSizer, 1, wxEXPAND | wxALL, 0)
+        panel.SetSizer(mainSizer)
+        panel.SetAutoLayout(True)
+#        border.Fit(panel)
+        mainSizer.Fit(panel)
+        frame.Fit()
+        frame.Show(True)
+
+        EVT_INVOKE(frame, self.onInvoke)
+        EVT_CLOSE(frame, self._close)
+
+
+    def selectdir(self, x = None):
+        self.calls['dropTargetHovered']()
+        dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
+        if dl.ShowModal() == wxID_OK:
+            self.calls['dropTargetDropped']()
+            self.complete(dl.GetPath())
+        else:
+            self.calls['dropTargetUnhovered']()
+
+    def selectfile(self, x = None):
+        self.calls['dropTargetHovered']()
+        dl = wxFileDialog (self.frame, 'Choose file to use', '', '', '', wxOPEN)
+        if dl.ShowModal() == wxID_OK:
+            self.calls['dropTargetDropped']()
+            self.complete(dl.GetPath())
+        else:
+            self.calls['dropTargetUnhovered']()
+
+    def selectdrop(self, dat):
+        self.calls['dropTargetDropped']()
+        for f in dat.GetFiles():
+            self.complete(f)
+
+    def _announcecopy(self, f):
+        try:
+            h = open(f, 'rb')
+            metainfo = bdecode(h.read())
+            h.close()
+            self.announce = metainfo['announce']
+            if metainfo.has_key('announce-list'):
+                self.announce_list = metainfo['announce-list']
+            else:
+                self.announce_list = None
+        except:
+            return
+
+    def complete(self, x):
+        params = {'piece_size_pow2': 0}
+        if self.announce_list:
+            params['real_announce_list'] = self.announce_list
+        self.queue.append((x, self.announce, params))
+        self.go_queue()
+
+    def go_queue(self):
+        self.switchlock.acquire()
+        if self.queue and not self.working:
+            self.working = True
+            self.statustext.SetLabel('working')
+            q = self.queue.pop(0)
+            MakeMetafile(q[0], q[1], q[2], self)
+        self.switchlock.release()
+
+    def cancel(self, x):
+        self.switchlock.acquire()
+        if self.working:
+            self.working = False
+            self.cancelflag.set()
+            self.cancelflag = Event()
+            self.queue = []
+            self.statustext.SetLabel('CANCELED')
+            self.calls['dropTargetError']()
+        self.switchlock.release()
+
+    def dropTargetClick(self, x):
+        if x.GetPosition()[0] < int(self.dropTargetWidth*0.4):
+            self.selectdir()
+        elif x.GetPosition()[0] > int(self.dropTargetWidth*0.6):
+            self.selectfile()
+
+    def refresh_thostlist(self):
+        l = []
+        d = 0
+        for f in listdir(join(basepath,'thosts')):
+            if f[-6:].lower() == '.thost':
+                l.append(f)
+                if f == self.thostselection:
+                    d = len(l)
+        self.choices.Clear()
+        if not d:
+            if l:
+                self.thostselection = l[0]
+                d = 1
+            else:
+                self.thostselection = ''
+                d = 1
+            self.config['thost'] = self.thostselection
+            self.calls['saveConfig']()
+        for f in l:
+            self.choices.Append(f[:-6])
+        self.thostselectnum = d-1
+        self.thostlist = l
+        self.choices.SetSelection(d-1)
+        return
+
+    def set_thost(self, x):
+        n = self.choices.GetSelection()
+        if n != self.thostselectnum:
+            self.thostselectnum = n
+            if n:
+                self.thostselection = self.thostlist[n-1]
+
+    def _set_thost(self):
+        self._announcecopy(join(join(basepath,'thosts'),self.thostselection))
+        self.calls['setCurrentTHost'](self.thostselection)
+
+    def onInvoke(self, event):
+        if not self.uiflag.isSet():
+            apply(event.func, event.args, event.kwargs)
+
+    def invokeLater(self, func, args = [], kwargs = {}):
+        if not self.uiflag.isSet():
+            wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
+
+    def build_setgauge(self, x):
+        self.invokeLater(self.on_setgauge, [x])
+
+    def on_setgauge(self, x):
+        self.gauge.SetValue(int(x*1000))
+
+    def build_done(self):
+        self.invokeLater(self.on_builddone)
+
+    def on_builddone(self):
+        self.gauge.SetValue(0)
+        self.statustext.SetLabel('done!')
+        self.calls['dropTargetSuccess']()
+        self.working = False
+        self.go_queue()
+
+    def build_failed(self, e):
+        self.invokeLater(self.on_buildfailed, [e])
+
+    def on_buildfailed(self, e):        
+        self.gauge.SetValue(0)
+        self.statustext.SetLabel('ERROR')
+        self.calls['dropTargetError']()
+        self.working = False
+        self.go_queue()
+
+    def close(self):
+        self.cancelflag = None   # this is a planned switch, don't cancel
+        self.uiflag.set()
+        self.frame.Close()
+
+    def _close(self, x = None):
+        self.uiflag.set()
+        try:
+            self.cancelflag.set()
+        except:
+            pass
+        self.frame.Destroy()
+
+
+
+class AdvancedDownloadInfo:
+    def __init__(self, config, calls):
+        self.config = config
+        self.calls = calls
+        
+        self.uiflag = Event()
+        self.cancelflag = Event()
+        self.switchlock = Lock()
+        self.working = False
+        self.queue = []
+        wxInitAllImageHandlers()
+        self.thostselection = self.calls['getCurrentTHost']()
+        self.thostselectnum = 0
+        self.choices = None
+        self.choices1 = None
+
+        self.windowStyle = wxSYSTEM_MENU|wxCAPTION|wxMINIMIZE_BOX
+        if self.config['stayontop']:
+            self.windowStyle |= wxSTAY_ON_TOP
+        frame = wxFrame(None, -1, 'T-Make',
+                        size = wxSize(-1, -1),
+                        style = self.windowStyle)
+        self.frame = frame
+        panel = wxPanel(frame, -1)
+
+        fullSizer = wxFlexGridSizer(cols = 1, vgap = 0, hgap = 8)
+        
+        colSizer = wxFlexGridSizer(cols = 2, vgap = 0, hgap = 8)
+        leftSizer = wxFlexGridSizer(cols = 1, vgap = 3)
+
+        self.stayontop_checkbox = wxCheckBox(panel, -1, "stay on top")
+        self.stayontop_checkbox.SetValue(self.config['stayontop'])
+        EVT_CHECKBOX(frame, self.stayontop_checkbox.GetId(), self.setstayontop)
+        leftSizer.Add(self.stayontop_checkbox, -1, wxALIGN_CENTER)
+        leftSizer.Add(wxStaticText(panel, -1, ''))
+
+        button = wxButton(panel, -1, 'use image...')
+        EVT_BUTTON(frame, button.GetId(), self.selectDropTarget)
+        leftSizer.Add(button, -1, wxALIGN_CENTER)
+        
+        self.groupSizer1Box = wxStaticBox(panel, -1, '')
+        groupSizer1 = wxStaticBoxSizer(self.groupSizer1Box, wxHORIZONTAL)
+        groupSizer = wxFlexGridSizer(cols = 1, vgap = 0)
+        self.dropTarget = self.calls['newDropTarget']((200,200))
+#        self.dropTarget = self.calls['newDropTarget']()
+        self.dropTargetPtr = wxStaticBitmap(panel, -1, self.dropTarget)
+        self.calls['setDropTargetRefresh'](self.dropTargetPtr.Refresh)
+        self.dropTargetWidth = self.dropTarget.GetWidth()
+        EVT_LEFT_DOWN(self.dropTargetPtr,self.dropTargetClick)
+        EVT_ENTER_WINDOW(self.dropTargetPtr,self.calls['dropTargetHovered'])
+        EVT_LEAVE_WINDOW(self.dropTargetPtr,self.calls['dropTargetUnhovered'])
+        groupSizer.Add(self.dropTargetPtr,0,wxALIGN_CENTER)        
+        lowerSizer1 = wxGridSizer(cols = 3)
+        dirlink = wxStaticText(panel, -1, 'dir')
+        dirlink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+        dirlink.SetForegroundColour('blue')
+        EVT_LEFT_UP(dirlink,self.selectdir)
+        lowerSizer1.Add(dirlink, -1, wxALIGN_LEFT)
+        lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
+        filelink = wxStaticText(panel, -1, 'file')
+        filelink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+        filelink.SetForegroundColour('blue')
+        EVT_LEFT_UP(filelink,self.selectfile)
+        lowerSizer1.Add(filelink, -1, wxALIGN_RIGHT)
+        
+        groupSizer.Add(lowerSizer1, -1, wxALIGN_CENTER)
+
+        self.gauge = wxGauge(panel, -1, range = 1000,
+                             style = wxGA_HORIZONTAL, size = (-1,15))
+        groupSizer.Add(self.gauge, 0, wxEXPAND)
+        self.statustext = wxStaticText(panel, -1, 'ready',
+                            style = wxALIGN_CENTER|wxST_NO_AUTORESIZE)
+        self.statustext.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxBOLD, False))
+        groupSizer.Add(self.statustext, -1, wxEXPAND)
+        self.choices = wxChoice(panel, -1, (-1, -1), (self.dropTargetWidth, -1),
+                                    choices = [])
+        self.choices.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, False))
+        EVT_CHOICE(self.choices, -1, self.set_thost)
+        groupSizer.Add(self.choices, 0, wxEXPAND)
+        cancellink = wxStaticText(panel, -1, 'cancel')
+        cancellink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+        cancellink.SetForegroundColour('red')
+        EVT_LEFT_UP(cancellink,self.cancel)
+        groupSizer.Add(cancellink, -1, wxALIGN_CENTER)
+        dummyadvlink = wxStaticText(panel, -1, 'advanced')
+        dummyadvlink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, False))
+        dummyadvlink.SetForegroundColour('blue')
+        EVT_LEFT_UP(dirlink,self.selectdir)
+        groupSizer.Add(dummyadvlink, -1, wxALIGN_CENTER)
+        groupSizer1.Add(groupSizer)
+        leftSizer.Add(groupSizer1, -1, wxALIGN_CENTER)
+
+        leftSizer.Add(wxStaticText(panel, -1, 'make torrent of:'),0,wxALIGN_CENTER)
+
+        self.dirCtl = wxTextCtrl(panel, -1, '', size = (250,-1))
+        leftSizer.Add(self.dirCtl, 1, wxEXPAND)
+        
+        b = wxBoxSizer(wxHORIZONTAL)
+        button = wxButton(panel, -1, 'dir')
+        EVT_BUTTON(frame, button.GetId(), self.selectdir)
+        b.Add(button, 0)
+
+        button2 = wxButton(panel, -1, 'file')
+        EVT_BUTTON(frame, button2.GetId(), self.selectfile)
+        b.Add(button2, 0)
+
+        leftSizer.Add(b, 0, wxALIGN_CENTER)
+
+        leftSizer.Add(wxStaticText(panel, -1, ''))
+
+        simple_link = wxStaticText(panel, -1, 'back to basic mode')
+        simple_link.SetFont(wxFont(-1, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+        simple_link.SetForegroundColour('blue')
+        EVT_LEFT_UP(simple_link,self.calls['switchToBasic'])
+        leftSizer.Add(simple_link, -1, wxALIGN_CENTER)
+
+        colSizer.Add(leftSizer, -1, wxALIGN_CENTER_VERTICAL)
+
+        gridSizer = wxFlexGridSizer(cols = 2, vgap = 6, hgap = 8)
+
+        gridSizer.Add(wxStaticText(panel, -1, 'Torrent host:'), -1,
+                      wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+
+        self.choices1 = wxChoice(panel, -1, (-1, -1), (-1, -1),
+                                    choices = [])
+        EVT_CHOICE(self.choices1, -1, self.set_thost1)
+        gridSizer.Add(self.choices1, 0, wxEXPAND)
+
+        b = wxBoxSizer(wxHORIZONTAL)
+        button1 = wxButton(panel, -1, 'set default')
+        EVT_BUTTON(frame, button1.GetId(), self.set_default_thost)
+        b.Add(button1, 0)
+        b.Add(wxStaticText(panel, -1, '       '))
+        button2 = wxButton(panel, -1, 'delete')
+        EVT_BUTTON(frame, button2.GetId(), self.delete_thost)
+        b.Add(button2, 0)
+        b.Add(wxStaticText(panel, -1, '       '))
+        button3 = wxButton(panel, -1, 'save as...')
+        EVT_BUTTON(frame, button3.GetId(), self.save_thost)
+        b.Add(button3, 0)
+
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        gridSizer.Add(b, 0, wxALIGN_CENTER)
+
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+
+        gridSizer.Add(wxStaticText(panel, -1, 'single tracker url:'),0,
+                      wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+        self.annCtl = wxTextCtrl(panel, -1, 'http://my.tracker:6969/announce')
+        gridSizer.Add(self.annCtl, 0, wxEXPAND)
+
+        a = wxFlexGridSizer(cols = 1, vgap = 3)
+        a.Add(wxStaticText(panel, -1, 'tracker list:'),0,wxALIGN_RIGHT)
+        a.Add(wxStaticText(panel, -1, ''))
+        abutton = wxButton(panel, -1, 'copy\nannounces\nfrom\ntorrent', size = (70,70))
+        EVT_BUTTON(frame, abutton.GetId(), self.announcecopy)
+        a.Add(abutton, -1, wxALIGN_CENTER)
+        a.Add(wxStaticText(panel, -1, DROP_HERE), -1, wxALIGN_CENTER)
+        gridSizer.Add(a, -1, wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+
+
+        self.annListCtl = wxTextCtrl(panel, -1, '\n\n\n\n\n', wxPoint(-1,-1), (300,120),
+                                            wxTE_MULTILINE|wxHSCROLL|wxTE_DONTWRAP)
+        gridSizer.Add(self.annListCtl, -1, wxEXPAND)
+
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        exptext = wxStaticText(panel, -1,
+                "a list of tracker urls separated by commas or whitespace\n" +
+                "and on several lines -trackers on the same line will be\n" +
+                "tried randomly, and all the trackers on one line\n" +
+                "will be tried before the trackers on the next line.")
+        exptext.SetFont(wxFont(6, wxDEFAULT, wxNORMAL, wxNORMAL, False))
+        gridSizer.Add(exptext, -1, wxALIGN_CENTER)
+
+        self.refresh_thostlist()
+        self._set_thost()
+
+        if platform == 'win32':
+            self.dropTargetPtr.DragAcceptFiles(True)
+            EVT_DROP_FILES(self.dropTargetPtr, self.selectdrop)
+            self.groupSizer1Box.DragAcceptFiles(True)
+            EVT_DROP_FILES(self.groupSizer1Box, self.selectdrop)
+            abutton.DragAcceptFiles(True)
+            EVT_DROP_FILES(abutton, self.announcedrop)
+            self.annCtl.DragAcceptFiles(True)
+            EVT_DROP_FILES(self.annCtl, self.announcedrop)
+            self.annListCtl.DragAcceptFiles(True)
+            EVT_DROP_FILES(self.annListCtl, self.announcedrop)
+
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+
+        gridSizer.Add(wxStaticText(panel, -1, 'piece size:'),0,
+                      wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+        self.piece_length = wxChoice(panel, -1,
+                 choices = ['automatic', '2MiB', '1MiB', '512KiB', '256KiB', '128KiB', '64KiB', '32KiB'])
+        self.piece_length_list = [0,       21,     20,      19,       18,       17,      16,      15]
+        self.piece_length.SetSelection(0)
+        gridSizer.Add(self.piece_length)
+
+        gridSizer.Add(wxStaticText(panel, -1, 'comment:'),0,
+                      wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+        self.commentCtl = wxTextCtrl(panel, -1, '')
+        gridSizer.Add(self.commentCtl, 0, wxEXPAND)
+
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+
+        b1 = wxButton(panel, -1, 'Cancel', size = (-1, 30))
+        EVT_BUTTON(frame, b1.GetId(), self.cancel)
+        gridSizer.Add(b1, 0, wxEXPAND)
+        b2 = wxButton(panel, -1, 'MAKE TORRENT', size = (-1, 30))
+        EVT_BUTTON(frame, b2.GetId(), self.complete)
+        gridSizer.Add(b2, 0, wxEXPAND)
+
+        gridSizer.AddGrowableCol(1)
+        colSizer.Add(gridSizer, -1, wxALIGN_CENTER_VERTICAL)
+        fullSizer.Add(colSizer)
+
+ 
+        border = wxBoxSizer(wxHORIZONTAL)
+        border.Add(fullSizer, 1, wxEXPAND | wxALL, 15)
+        panel.SetSizer(border)
+        panel.SetAutoLayout(True)
+        border.Fit(panel)
+        frame.Fit()
+        frame.Show(True)
+
+        EVT_INVOKE(frame, self.onInvoke)
+        EVT_CLOSE(frame, self._close)
+
+    def setstayontop(self, x):
+        if self.stayontop_checkbox.GetValue():
+            self.windowStyle |= wxSTAY_ON_TOP
+        else:
+            self.windowStyle &= ~wxSTAY_ON_TOP
+        self.frame.SetWindowStyle(self.windowStyle)
+        self.config['stayontop'] = self.stayontop_checkbox.GetValue()
+
+    def selectdir(self, x = None):
+        self.calls['dropTargetHovered']()
+        dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
+        if dl.ShowModal() == wxID_OK:
+            self.dirCtl.SetValue(dl.GetPath())
+            self.calls['dropTargetDropped']()
+        else:
+            self.calls['dropTargetUnhovered']()
+
+    def selectfile(self, x = None):
+        self.calls['dropTargetHovered']()
+        dl = wxFileDialog (self.frame, 'Choose file to use', '', '', '', wxOPEN)
+        if dl.ShowModal() == wxID_OK:
+            self.dirCtl.SetValue(dl.GetPath())
+            self.calls['dropTargetDropped']()
+        else:
+            self.calls['dropTargetUnhovered']()
+
+    def selectdrop(self, dat):
+        self.calls['dropTargetDropped']()
+        for f in dat.GetFiles():
+            self.complete(f)
+
+    def announcecopy(self, x):
+        dl = wxFileDialog (self.frame, 'Choose .torrent file to use', '', '', '*.torrent', wxOPEN)
+        if dl.ShowModal() == wxID_OK:
+            self._announcecopy(dl.GetPath(), True)
+
+    def announcedrop(self, dat):
+        self._announcecopy(dat.GetFiles()[0], True)
+
+    def _announcecopy(self, f, external = False):
+        try:
+            h = open(f, 'rb')
+            metainfo = bdecode(h.read())
+            h.close()
+            self.annCtl.SetValue(metainfo['announce'])
+            if metainfo.has_key('announce-list'):
+                list = []
+                for tier in metainfo['announce-list']:
+                    for tracker in tier:
+                        list += [tracker, ', ']
+                    del list[-1]
+                    list += ['\n']
+                liststring = ''
+                for i in list:
+                    liststring += i
+                self.annListCtl.SetValue(liststring+'\n\n')
+            else:
+                self.annListCtl.SetValue('')
+            if external:
+                self.choices.SetSelection(0)
+                self.choices1.SetSelection(0)
+        except:
+            return
+
+    def getannouncelist(self):
+        list = []
+        for t in self.annListCtl.GetValue().split('\n'):
+            tier = []
+            t = t.replace(',',' ')
+            for tr in t.split(' '):
+                if tr != '':
+                    tier += [tr]
+            if len(tier)>0:
+                list.append(tier)
+        return list
+    
+    def complete(self, x):
+        if not self.dirCtl.GetValue():
+            dlg = wxMessageDialog(self.frame, message = 'You must select a\nfile or directory', 
+                caption = 'Error', style = wxOK | wxICON_ERROR)
+            dlg.ShowModal()
+            dlg.Destroy()
+            return
+        if not self.annCtl.GetValue():
+            dlg = wxMessageDialog(self.frame, message = 'You must specify a\nsingle tracker url', 
+                caption = 'Error', style = wxOK | wxICON_ERROR)
+            dlg.ShowModal()
+            dlg.Destroy()
+            return
+        params = {'piece_size_pow2': self.piece_length_list[self.piece_length.GetSelection()]}
+        annlist = self.getannouncelist()
+        if len(annlist)>0:
+            warnings = ''
+            for tier in annlist:
+                if len(tier) > 1:
+                    warnings += (
+                        'WARNING: You should not specify multiple trackers\n' +
+                        '     on the same line of the tracker list unless\n' +
+                        '     you are certain they share peer data.\n')
+                    break
+            if not self.annCtl.GetValue() in annlist[0]:
+                    warnings += (
+                        'WARNING: The single tracker url is not present in\n' +
+                        '     the first line of the tracker list.  This\n' +
+                        '     may produce a dysfunctional torrent.\n')
+            if warnings:
+                warnings += ('Are you sure you wish to produce a .torrent\n' +
+                             'with these parameters?')
+                dlg = wxMessageDialog(self.frame,
+                        message = warnings,
+                        caption = 'Warning', style = wxYES_NO | wxICON_QUESTION)
+                if dlg.ShowModal() != wxID_YES:
+                    dlg.Destroy()
+                    return
+            params['real_announce_list'] = annlist
+        comment = self.commentCtl.GetValue()
+        if comment != '':
+            params['comment'] = comment
+        self.statustext.SetLabel('working')
+        self.queue.append((self.dirCtl.GetValue(), self.annCtl.GetValue(), params))
+        self.go_queue()
+
+    def go_queue(self):
+        self.switchlock.acquire()
+        if self.queue and not self.working:
+            self.working = True
+            self.statustext.SetLabel('working')
+            q = self.queue.pop(0)
+            MakeMetafile(q[0], q[1], q[2], self)
+        self.switchlock.release()
+
+    def cancel(self, x):
+        self.switchlock.acquire()
+        if self.working:
+            self.working = False
+            self.cancelflag.set()
+            self.cancelflag = Event()
+            self.queue = []
+            self.statustext.SetLabel('CANCELED')
+            self.calls['dropTargetError']()
+        self.switchlock.release()
+
+    def selectDropTarget(self, x):
+        dl = wxFileDialog (self.frame, 'Choose image to use', join(basepath,'targets'),
+                        join(join(basepath,'targets'), self.config['target']),
+                        'Supported images (*.bmp,*.gif)|*.*', wxOPEN|wxHIDE_READONLY)
+        if dl.ShowModal() == wxID_OK:
+            try:
+                self.calls['changeDropTarget'](dl.GetPath())
+                self.config['target'] = dl.GetPath()
+            except:
+                pass
+
+    def dropTargetClick(self, x):
+        if x.GetPosition()[0] < int(self.dropTargetWidth*0.4):
+            self.selectdir()
+        elif x.GetPosition()[0] > int(self.dropTargetWidth*0.6):
+            self.selectfile()
+
+    def refresh_thostlist(self):
+        l = []
+        d = 0
+        for f in listdir(join(basepath,'thosts')):
+            if f[-6:].lower() == '.thost':
+                l.append(f)
+                if f == self.thostselection:
+                    d = len(l)
+        self.choices.Clear()
+        self.choices.Append(' ')
+        self.choices1.Clear()
+        self.choices1.Append('---')
+        if not d:
+            if l:
+                self.thostselection = l[0]
+                d = 1
+            else:
+                self.thostselection = ''
+                d = 0
+            self.config['thost'] = self.thostselection
+        for f in l:
+            f1 = f[:-6]
+            self.choices.Append(f1)
+            if f == self.config['thost']:
+                f1 += ' (default)'
+            self.choices1.Append(f1)
+        self.thostselectnum = d
+        self.thostlist = l
+        self.choices.SetSelection(d)
+        self.choices1.SetSelection(d)
+
+    def set_thost(self, x):
+        n = self.choices.GetSelection()
+        if n != self.thostselectnum:
+            self.thostselectnum = n
+            self.choices1.SetSelection(n)
+            if n:
+                self.thostselection = self.thostlist[n-1]
+                self._set_thost()
+
+    def set_thost1(self, x):
+        n = self.choices1.GetSelection()
+        if n != self.thostselectnum:
+            self.thostselectnum = n
+            self.choices.SetSelection(n)
+            if n:
+                self.thostselection = self.thostlist[n-1]
+                self._set_thost()
+
+    def _set_thost(self):
+        self._announcecopy(join(join(basepath,'thosts'),self.thostselection))
+        self.calls['setCurrentTHost'](self.thostselection)
+
+    def set_default_thost(self, x):
+        if self.thostlist:
+            self.config['thost'] = self.thostselection
+            self.refresh_thostlist()
+
+    def save_thost(self, x):
+        if not self.annCtl.GetValue():
+            dlg = wxMessageDialog(self.frame, message = 'You must specify a\nsingle tracker url', 
+                caption = 'Error', style = wxOK | wxICON_ERROR)
+            dlg.ShowModal()
+            dlg.Destroy()
+            return
+        try:
+            metainfo = {}
+            metainfo['announce'] = self.annCtl.GetValue()
+            annlist = self.getannouncelist()
+            if len(annlist)>0:
+                warnings = ''
+                for tier in annlist:
+                    if len(tier) > 1:
+                        warnings += (
+                            'WARNING: You should not specify multiple trackers\n' +
+                            '     on the same line of the tracker list unless\n' +
+                            '     you are certain they share peer data.\n')
+                        break
+                if not self.annCtl.GetValue() in annlist[0]:
+                        warnings += (
+                            'WARNING: The single tracker url is not present in\n' +
+                            '     the first line of the tracker list.  This\n' +
+                            '     may produce a dysfunctional torrent.\n')
+                if warnings:
+                    warnings += ('Are you sure you wish to save a torrent host\n' +
+                                 'with these parameters?')
+                    dlg = wxMessageDialog(self.frame,
+                            message = warnings,
+                            caption = 'Warning', style = wxYES_NO | wxICON_QUESTION)
+                    if dlg.ShowModal() != wxID_YES:
+                        dlg.Destroy()
+                        return
+                metainfo['announce-list'] = annlist
+            metainfo = bencode(metainfo)
+        except:
+            return
+        
+        if self.thostselectnum:
+            d = self.thostselection
+        else:
+            d = '.thost'
+        dl = wxFileDialog (self.frame, 'Save tracker data as',
+                           join(basepath,'thosts'), d, '*.thost',
+                           wxSAVE|wxOVERWRITE_PROMPT)
+        if dl.ShowModal() != wxID_OK:
+            return
+        d = dl.GetPath()
+
+        try:
+            f = open(d,'wb')
+            f.write(metainfo)
+            f.close()
+            garbage, self.thostselection = os.path.split(d)
+        except:
+            pass
+        self.refresh_thostlist()
+
+    def delete_thost(self, x):
+        dlg = wxMessageDialog(self.frame,
+                message = 'Are you sure you want to delete\n'+self.thostselection[:-6]+'?', 
+                caption = 'Warning', style = wxYES_NO | wxICON_EXCLAMATION)
+        if dlg.ShowModal() != wxID_YES:
+            dlg.Destroy()
+            return
+        dlg.Destroy()
+        os.remove(join(join(basepath,'thosts'),self.thostselection))
+        self.thostselection = None
+        self.refresh_thostlist()
+
+    def onInvoke(self, event):
+        if not self.uiflag.isSet():
+            apply(event.func, event.args, event.kwargs)
+
+    def invokeLater(self, func, args = [], kwargs = {}):
+        if not self.uiflag.isSet():
+            wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
+
+    def build_setgauge(self, x):
+        self.invokeLater(self.on_setgauge, [x])
+
+    def on_setgauge(self, x):
+        self.gauge.SetValue(int(x*1000))
+
+    def build_done(self):
+        self.invokeLater(self.on_builddone)
+
+    def on_builddone(self):
+        self.gauge.SetValue(0)
+        self.statustext.SetLabel('done!')
+        self.calls['dropTargetSuccess']()
+        self.working = False
+        self.go_queue()
+
+    def build_failed(self, e):
+        self.invokeLater(self.on_buildfailed, [e])
+
+    def on_buildfailed(self, e):        
+        self.gauge.SetValue(0)
+        self.statustext.SetLabel('ERROR')
+        self.calls['dropTargetError']()
+        self.working = False
+        self.go_queue()
+
+    def close(self):
+        self.cancelflag = None   # this is a planned switch, don't cancel
+        self.uiflag.set()
+        self.frame.Close()
+
+    def _close(self, x = None):
+        self.uiflag.set()
+        try:
+            self.cancelflag.set()
+        except:
+            pass
+        self.calls['saveConfig']()
+        self.frame.Destroy()
+
+        
+class MakeMetafile:
+    def __init__(self, d, a, params, external = None):
+        self.d = d
+        self.a = a
+        self.params = params
+
+        self.call = external
+#        self.uiflag = external.uiflag
+        self.uiflag = external.cancelflag
+        Thread(target = self.complete).start()
+
+    def complete(self):
+        try:
+            make_meta_file(self.d, self.a, self.params, self.uiflag,
+                            self.call.build_setgauge, progress_percent = 1)
+            if not self.uiflag.isSet():
+                self.call.build_done()
+        except (OSError, IOError), e:
+            self.failed(e)
+        except Exception, e:
+            print_exc()
+            self.failed(e)
+
+    def failed(self, e):
+        e = str(e)
+        self.call.build_failed(e)
+        dlg = wxMessageDialog(self.frame, message = 'Error - ' + e, 
+            caption = 'Error', style = wxOK | wxICON_ERROR)
+        dlg.ShowModal()
+        dlg.Destroy()
+
+
+class T_make:
+    def __init__(self):
+        self.configobj = wxConfig('BitTorrent_T-make',style=wxCONFIG_USE_LOCAL_FILE)
+        self.getConfig()
+        self.currentTHost = self.config['thost']
+#        self.d = AdvancedDownloadInfo(self.config, self.getCalls())
+        self.d = BasicDownloadInfo(self.config, self.getCalls())
+
+    def getConfig(self):
+        config = {}
+        try:
+            config['stayontop'] = self.configobj.ReadInt('stayontop',True)
+        except:
+            config['stayontop'] = True
+            self.configobj.WriteInt('stayontop',True)
+        try:
+            config['target'] = self.configobj.Read('target','default.gif')
+        except:
+            config['target'] = 'default.gif'
+            self.configobj.Write('target','default.gif')
+        try:
+            config['thost'] = self.configobj.Read('thost','')
+        except:
+            config['thost'] = ''
+            self.configobj.Write('thost','')
+        self.configobj.Flush()
+        self.config = config
+
+    def saveConfig(self):
+        self.configobj.WriteInt('stayontop',self.config['stayontop'])
+        self.configobj.Write('target',self.config['target'])
+        self.configobj.Write('thost',self.config['thost'])
+        self.configobj.Flush()
+
+    def getCalls(self):
+        calls = {}
+        calls['saveConfig'] = self.saveConfig
+        calls['newDropTarget'] = self.newDropTarget
+        calls['setDropTargetRefresh'] = self.setDropTargetRefresh
+        calls['changeDropTarget'] = self.changeDropTarget
+        calls['setCurrentTHost'] = self.setCurrentTHost
+        calls['getCurrentTHost'] = self.getCurrentTHost
+        calls['dropTargetHovered'] = self.dropTargetHovered
+        calls['dropTargetUnhovered'] = self.dropTargetUnhovered
+        calls['dropTargetDropped'] = self.dropTargetDropped
+        calls['dropTargetSuccess'] = self.dropTargetSuccess
+        calls['dropTargetError'] = self.dropTargetError
+        calls['switchToBasic'] = self.switchToBasic
+        calls['switchToAdvanced'] = self.switchToAdvanced
+        return calls
+
+    def setCurrentTHost(self, x):
+        self.currentTHost = x
+
+    def getCurrentTHost(self):
+        return self.currentTHost
+
+    def newDropTarget(self, wh = None):
+        if wh:
+            self.dropTarget = wxEmptyBitmap(wh[0],wh[1])
+            try:
+                self.changeDropTarget(self.config['target'])
+            except:
+                pass
+        else:
+            try:
+                self.dropTarget = self._dropTargetRead(self.config['target'])
+            except:
+                try:
+                    self.dropTarget = self._dropTargetRead('default.gif')
+                    self.config['target'] = 'default.gif'
+                    self.saveConfig()
+                except:
+                    self.dropTarget = wxEmptyBitmap(100,100)
+        return self.dropTarget
+
+    def setDropTargetRefresh(self, refreshfunc):
+        self.dropTargetRefresh = refreshfunc
+
+    def changeDropTarget(self, new):
+        bmp = self._dropTargetRead(new)
+        w1,h1 = self.dropTarget.GetWidth(),self.dropTarget.GetHeight()
+        w,h = bmp.GetWidth(),bmp.GetHeight()
+        x1,y1 = int((w1-w)/2.0),int((h1-h)/2.0)
+        bbdata = wxMemoryDC()
+        bbdata.SelectObject(self.dropTarget)
+        bbdata.SetPen(wxTRANSPARENT_PEN)
+        bbdata.SetBrush(wxBrush(wx.wxSystemSettings_GetColour(wxSYS_COLOUR_MENU),wxSOLID))
+        bbdata.DrawRectangle(0,0,w1,h1)
+        bbdata.SetPen(wxBLACK_PEN)
+        bbdata.SetBrush(wxTRANSPARENT_BRUSH)
+        bbdata.DrawRectangle(x1-1,y1-1,w+2,h+2)
+        bbdata.DrawBitmap(bmp,x1,y1,True)
+        try:
+            self.dropTargetRefresh()
+        except:
+            pass
+
+    def _dropTargetRead(self, new):
+        a,b = os.path.split(new)
+        if a and a != join(basepath,'targets'):
+            if a != join(basepath,'targets'):
+                b1,b2 = os.path.splitext(b)
+                z = 0
+                while os.path.isfile(join(join(basepath,'targets'),b)):
+                    z += 1
+                    b = b1+'('+str(z)+')'+b2
+                shutil.copyfile(newname,join(join(basepath,'targets'),b))
+            new = b
+        name = join(join(basepath,'targets'),new)
+        garbage, e = os.path.splitext(new.lower())
+        if e == '.gif':
+            bmp = wxBitmap(name, wxBITMAP_TYPE_GIF)
+        elif e == '.bmp':
+            bmp = wxBitmap(name, wxBITMAP_TYPE_BMP)
+        else:
+            assert False
+        return bmp
+
+    def dropTargetHovered(self, x = None):
+        pass
+
+    def dropTargetUnhovered(self, x = None):
+        pass
+
+    def dropTargetDropped(self, x = None):
+        pass
+
+    def dropTargetSuccess(self, x = None):
+        pass
+
+    def dropTargetError(self, x = None):
+        pass
+
+    def switchToBasic(self, x = None):
+        self.d.close()
+        self.d = BasicDownloadInfo(self.config, self.getCalls())
+        
+    def switchToAdvanced(self, x = None):
+        self.d.close()
+        self.d = AdvancedDownloadInfo(self.config, self.getCalls())
+        
+
+
+class btWxApp(wxApp):
+    def OnInit(self):
+        self.APP = T_make()
+        return True
+
+if __name__ == '__main__':
+    btWxApp().MainLoop()

Propchange: debtorrent/branches/upstream/current/bt-t-make.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/bt_MakeCreateIcons.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/bt_MakeCreateIcons.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/bt_MakeCreateIcons.py (added)
+++ debtorrent/branches/upstream/current/bt_MakeCreateIcons.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,96 @@
+#!/usr/bin/env python
+# Written by John Hoffman
+
+from time import strftime
+from zlib import compress
+from binascii import b2a_base64
+from traceback import print_exc
+import sys
+from os.path import join
+from BitTornado import version
+
+icons = [ 'icon_bt.ico', 'icon_done.ico',
+          'black.ico', 'blue.ico', 'green.ico', 'red.ico', 'white.ico', 'yellow.ico',
+          'black1.ico', 'green1.ico', 'yellow1.ico', 'alloc.gif' ]
+
+width = 60
+
+normalstdout = sys.stdout
+try:
+    f = open('CreateIcons.py','w')
+    sys.stdout = f
+
+    print '# Generated from bt_MakeCreateIcons - '+strftime('%x %X')
+    print '# '+version
+    print ''
+    print 'from binascii import a2b_base64'
+    print 'from zlib import decompress'
+    print 'from os.path import join'
+    print ''
+
+    print 'icons = {'
+    for icon in icons:
+        print '    "'+icon+'":'
+        ff = open(join('icons',icon),'rb')
+        d = b2a_base64(compress(ff.read())).strip()
+        ff.close()
+        while d:
+            d1 = d[:width]
+            d = d[width:]
+            if d:
+                extra = ' +'
+            elif icon != icons[-1]:
+                extra = ','
+            else:
+                extra = ''
+            print '        "'+d1+'"'+extra
+    print '}'
+    print ''
+    print 'def GetIcons():'
+    print '    return icons.keys()'
+    print ''
+    print 'def CreateIcon(icon, savedir):'
+    print '    try:'
+    print '        f = open(join(savedir,icon),"wb")'
+    print '        f.write(decompress(a2b_base64(icons[icon])))'
+    print '        success = 1'
+    print '    except:'
+    print '        success = 0'
+    print '    try:'
+    print '        f.close()'
+    print '    except:'
+    print '        pass'
+    print '    return success'
+
+except:
+    sys.stdout = normalstdout
+    print_exc()
+    try:
+        ff.close()
+    except:
+        pass
+
+sys.stdout = normalstdout
+try:
+    f.close()
+except:
+    pass
+
+
+# here's the output code used
+
+def GetIcons():
+    return icons.keys()
+
+def CreateIcon(icon, savedir):
+    try:
+        f = open(icon,'wb')
+        f.write(decompress(a2b_base64(icons[icon])))
+        success = 1
+    except:
+        success = 0
+    try:
+        f.close()
+    except:
+        pass
+    return success

Propchange: debtorrent/branches/upstream/current/bt_MakeCreateIcons.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btcompletedir.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btcompletedir.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btcompletedir.py (added)
+++ debtorrent/branches/upstream/current/btcompletedir.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,38 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from sys import argv, version, exit
+assert version >= '2', "Install Python 2.0 or greater"
+from os.path import split
+from BitTornado.BT1.makemetafile import defaults, completedir, print_announcelist_details
+from BitTornado.parseargs import parseargs, formatDefinitions
+
+
+if len(argv) < 3:
+    a,b = split(argv[0])
+    print 'Usage: ' + b + ' <trackerurl> <dir> [dir...] [params...]'
+    print 'makes a .torrent file for every file or directory present in each dir.'
+    print
+    print formatDefinitions(defaults, 80)
+    print_announcelist_details()
+    print ('')
+    exit(2)
+
+try:
+    config, args = parseargs(argv[1:], defaults, 2, None)
+    for dir in args[1:]:
+        completedir(dir, args[0], config)
+except ValueError, e:
+    print 'error: ' + str(e)
+    print 'run with no args for parameter explanations'

Propchange: debtorrent/branches/upstream/current/btcompletedir.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btcompletedirgui.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btcompletedirgui.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btcompletedirgui.py (added)
+++ debtorrent/branches/upstream/current/btcompletedirgui.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,192 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from sys import argv, version
+assert version >= '2', "Install Python 2.0 or greater"
+from BitTornado.BT1.makemetafile import completedir
+from threading import Event, Thread
+import sys
+from os import getcwd
+from os.path import join
+try:
+    from wxPython.wx import *
+except:
+    print 'wxPython is either not installed or has not been installed properly.'
+    sys.exit(1)
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+wxEVT_INVOKE = wxNewEventType()
+
+def EVT_INVOKE(win, func):
+    win.Connect(-1, -1, wxEVT_INVOKE, func)
+
+class InvokeEvent(wxPyEvent):
+    def __init__(self, func, args, kwargs):
+        wxPyEvent.__init__(self)
+        self.SetEventType(wxEVT_INVOKE)
+        self.func = func
+        self.args = args
+        self.kwargs = kwargs
+
+class DownloadInfo:
+    def __init__(self):
+        frame = wxFrame(None, -1, 'BitTorrent complete dir 1.0.1', size = wxSize(550, 250))
+        self.frame = frame
+
+        panel = wxPanel(frame, -1)
+
+        gridSizer = wxFlexGridSizer(cols = 2, rows = 2, vgap = 15, hgap = 8)
+        
+        gridSizer.Add(wxStaticText(panel, -1, 'directory to build:'))
+        self.dirCtl = wxTextCtrl(panel, -1, '')
+
+        b = wxBoxSizer(wxHORIZONTAL)
+        b.Add(self.dirCtl, 1, wxEXPAND)
+#        b.Add(10, 10, 0, wxEXPAND)
+        button = wxButton(panel, -1, 'select')
+        b.Add(button, 0, wxEXPAND)
+        EVT_BUTTON(frame, button.GetId(), self.select)
+
+        gridSizer.Add(b, 0, wxEXPAND)
+
+        gridSizer.Add(wxStaticText(panel, -1, 'announce url:'))
+        self.annCtl = wxTextCtrl(panel, -1, 'http://my.tracker:6969/announce')
+        gridSizer.Add(self.annCtl, 0, wxEXPAND)
+
+        gridSizer.Add(wxStaticText(panel, -1, 'piece size:'))
+        self.piece_length = wxChoice(panel, -1, choices = ['2 ** 21', '2 ** 20', '2 ** 19', 
+            '2 ** 18', '2 ** 17', '2 ** 16', '2 ** 15'])
+        self.piece_length.SetSelection(3)
+        gridSizer.Add(self.piece_length)
+
+        gridSizer.AddGrowableCol(1)
+ 
+        border = wxBoxSizer(wxVERTICAL)
+        border.Add(gridSizer, 0, wxEXPAND | wxNORTH | wxEAST | wxWEST, 25)
+        b2 = wxButton(panel, -1, 'make')
+#        border.Add(10, 10, 1, wxEXPAND)
+        border.Add(b2, 0, wxALIGN_CENTER | wxSOUTH, 20)
+        EVT_BUTTON(frame, b2.GetId(), self.complete)
+        panel.SetSizer(border)
+        panel.SetAutoLayout(True)
+
+    def select(self, x):
+        dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
+        if dl.ShowModal() == wxID_OK:
+            self.dirCtl.SetValue(dl.GetPath())
+
+    def complete(self, x):
+        if self.dirCtl.GetValue() == '':
+            dlg = wxMessageDialog(self.frame, message = 'You must select a directory', 
+                caption = 'Error', style = wxOK | wxICON_ERROR)
+            dlg.ShowModal()
+            dlg.Destroy()
+            return
+        try:
+            ps = 2 ** (21 - self.piece_length.GetSelection())
+            CompleteDir(self.dirCtl.GetValue(), self.annCtl.GetValue(), ps)
+        except:
+            print_exc()
+
+from traceback import print_exc
+
+class CompleteDir:
+    def __init__(self, d, a, pl):
+        self.d = d
+        self.a = a
+        self.pl = pl
+        self.flag = Event()
+        frame = wxFrame(None, -1, 'BitTorrent make directory', size = wxSize(550, 250))
+        self.frame = frame
+
+        panel = wxPanel(frame, -1)
+
+        gridSizer = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
+
+        self.currentLabel = wxStaticText(panel, -1, 'checking file sizes')
+        gridSizer.Add(self.currentLabel, 0, wxEXPAND)
+        self.gauge = wxGauge(panel, -1, range = 1000, style = wxGA_SMOOTH)
+        gridSizer.Add(self.gauge, 0, wxEXPAND)
+        gridSizer.Add(10, 10, 1, wxEXPAND)
+        self.button = wxButton(panel, -1, 'cancel')
+        gridSizer.Add(self.button, 0, wxALIGN_CENTER)
+        gridSizer.AddGrowableRow(2)
+        gridSizer.AddGrowableCol(0)
+
+        g2 = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
+        g2.Add(gridSizer, 1, wxEXPAND | wxALL, 25)
+        g2.AddGrowableRow(0)
+        g2.AddGrowableCol(0)
+        panel.SetSizer(g2)
+        panel.SetAutoLayout(True)
+        EVT_BUTTON(frame, self.button.GetId(), self.done)
+        EVT_CLOSE(frame, self.done)
+        EVT_INVOKE(frame, self.onInvoke)
+        frame.Show(True)
+        Thread(target = self.complete).start()
+
+    def complete(self):
+        params = {'piece_size_pow2': self.pl}
+        try:
+            completedir(self.d, self.a, params, self.flag, self.valcallback, self.filecallback)
+            if not self.flag.isSet():
+                self.currentLabel.SetLabel('Done!')
+                self.gauge.SetValue(1000)
+                self.button.SetLabel('Close')
+        except (OSError, IOError), e:
+            self.currentLabel.SetLabel('Error!')
+            self.button.SetLabel('Close')
+            dlg = wxMessageDialog(self.frame, message = 'Error - ' + str(e), 
+                caption = 'Error', style = wxOK | wxICON_ERROR)
+            dlg.ShowModal()
+            dlg.Destroy()
+
+    def valcallback(self, amount):
+        self.invokeLater(self.onval, [amount])
+
+    def onval(self, amount):
+        self.gauge.SetValue(int(amount * 1000))
+
+    def filecallback(self, f):
+        self.invokeLater(self.onfile, [f])
+
+    def onfile(self, f):
+        self.currentLabel.SetLabel('building ' + join(self.d, f) + '.torrent')
+
+    def onInvoke(self, event):
+        if not self.flag.isSet():
+            apply(event.func, event.args, event.kwargs)
+
+    def invokeLater(self, func, args = [], kwargs = {}):
+        if not self.flag.isSet():
+            wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
+
+    def done(self, event):
+        self.flag.set()
+        self.frame.Destroy()
+
+class btWxApp(wxApp):
+    def OnInit(self):
+        d = DownloadInfo()
+        d.frame.Show(True)
+        self.SetTopWindow(d.frame)
+        return True
+
+if __name__ == '__main__':
+    btWxApp().MainLoop()

Propchange: debtorrent/branches/upstream/current/btcompletedirgui.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btcopyannounce.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btcopyannounce.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btcopyannounce.py (added)
+++ debtorrent/branches/upstream/current/btcopyannounce.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,59 @@
+#!/usr/bin/env python
+
+# btreannounce.py written by Henry 'Pi' James and Bram Cohen
+# multitracker extensions by John Hoffman
+# see LICENSE.txt for license information
+
+from sys import argv,exit
+from os.path import split
+from BitTornado.bencode import bencode, bdecode
+
+
+def give_announce_list(l):
+    list = []
+    for tier in l:
+        for tracker in tier:
+            list+=[tracker,',']
+        del list[-1]
+        list+=['|']
+    del list[-1]
+    liststring = ''
+    for i in list:
+        liststring+=i
+    return liststring
+
+
+if len(argv) < 3:
+    a,b = split(argv[0])
+    print 'Usage: ' + b + ' <source.torrent> <file1.torrent> [file2.torrent...]'
+    print 'copies announce information from source to all specified torrents'
+    exit(2) # common exit code for syntax error
+
+h = open(argv[1], 'rb')
+source_metainfo = bdecode(h.read())
+h.close()
+
+print 'new announce: ' + source_metainfo['announce']
+if source_metainfo.has_key('announce-list'):
+    print 'new announce-list: ' + give_announce_list(source_metainfo['announce-list'])
+
+
+for f in argv[2:]:
+    h = open(f, 'rb')
+    metainfo = bdecode(h.read())
+    h.close()
+    print 'old announce for %s: %s' % (f, metainfo['announce'])
+    metainfo['announce'] = source_metainfo['announce']
+    if metainfo.has_key('announce-list'):
+        print 'old announce-list for %s: %s' % (f, give_announce_list(metainfo['announce-list']))
+    if source_metainfo.has_key('announce-list'):
+        metainfo['announce-list'] = source_metainfo['announce-list']
+    elif metainfo.has_key('announce-list'):
+        try:
+            del metainfo['announce-list']
+        except:
+            pass
+        
+    h = open(f, 'wb')
+    h.write(bencode(metainfo))
+    h.close()

Propchange: debtorrent/branches/upstream/current/btcopyannounce.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btdownloadcurses.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btdownloadcurses.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btdownloadcurses.py (added)
+++ debtorrent/branches/upstream/current/btdownloadcurses.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,407 @@
+#!/usr/bin/env python
+
+# Written by Henry 'Pi' James
+# see LICENSE.txt for license information
+
+SPEW_SCROLL_RATE = 1
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from BitTornado.download_bt1 import BT1Download, defaults, parse_params, get_usage, get_response
+from BitTornado.RawServer import RawServer, UPnP_ERROR
+from random import seed
+from socket import error as socketerror
+from BitTornado.bencode import bencode
+from BitTornado.natpunch import UPnP_test
+from threading import Event
+from os.path import abspath
+from signal import signal, SIGWINCH
+from sha import sha
+from sys import argv, exit
+import sys
+from time import time, strftime
+from BitTornado.clock import clock
+from BitTornado import createPeerID, version
+from BitTornado.ConfigDir import ConfigDir
+
+try:
+    import curses
+    import curses.panel
+    from curses.wrapper import wrapper as curses_wrapper
+    from signal import signal, SIGWINCH 
+except:
+    print 'Textmode GUI initialization failed, cannot proceed.'
+    print
+    print 'This download interface requires the standard Python module ' \
+       '"curses", which is unfortunately not available for the native ' \
+       'Windows port of Python. It is however available for the Cygwin ' \
+       'port of Python, running on all Win32 systems (www.cygwin.com).'
+    print
+    print 'You may still use "btdownloadheadless.py" to download.'
+    sys.exit(1)
+
+assert sys.version >= '2', "Install Python 2.0 or greater"
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+def fmttime(n):
+    if n == 0:
+        return 'download complete!'
+    try:
+        n = int(n)
+        assert n >= 0 and n < 5184000  # 60 days
+    except:
+        return '<unknown>'
+    m, s = divmod(n, 60)
+    h, m = divmod(m, 60)
+    return 'finishing in %d:%02d:%02d' % (h, m, s)
+
+def fmtsize(n):
+    s = str(n)
+    size = s[-3:]
+    while len(s) > 3:
+        s = s[:-3]
+        size = '%s,%s' % (s[-3:], size)
+    if n > 999:
+        unit = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
+        i = 1
+        while i + 1 < len(unit) and (n >> 10) >= 999:
+            i += 1
+            n >>= 10
+        n = float(n) / (1 << 10)
+        size = '%s (%.0f %s)' % (size, n, unit[i])
+    return size
+
+
+class CursesDisplayer:
+    def __init__(self, scrwin, errlist, doneflag):
+        self.scrwin = scrwin
+        self.errlist = errlist
+        self.doneflag = doneflag
+        
+        signal(SIGWINCH, self.winch_handler)
+        self.changeflag = Event()
+
+        self.done = 0
+        self.file = ''
+        self.fileSize = ''
+        self.activity = ''
+        self.status = ''
+        self.progress = ''
+        self.downloadTo = ''
+        self.downRate = '---'
+        self.upRate = '---'
+        self.shareRating = ''
+        self.seedStatus = ''
+        self.peerStatus = ''
+        self.errors = []
+        self.last_update_time = 0
+        self.spew_scroll_time = 0
+        self.spew_scroll_pos = 0
+
+        self._remake_window()
+
+    def winch_handler(self, signum, stackframe):
+        self.changeflag.set()
+        curses.endwin()
+        self.scrwin.refresh()
+        self.scrwin = curses.newwin(0, 0, 0, 0)
+        self._remake_window()
+
+    def _remake_window(self):
+        self.scrh, self.scrw = self.scrwin.getmaxyx()
+        self.scrpan = curses.panel.new_panel(self.scrwin)
+        self.labelh, self.labelw, self.labely, self.labelx = 11, 9, 1, 2
+        self.labelwin = curses.newwin(self.labelh, self.labelw,
+                                      self.labely, self.labelx)
+        self.labelpan = curses.panel.new_panel(self.labelwin)
+        self.fieldh, self.fieldw, self.fieldy, self.fieldx = (
+                            self.labelh, self.scrw-2 - self.labelw-3,
+                            1, self.labelw+3)
+        self.fieldwin = curses.newwin(self.fieldh, self.fieldw,
+                                      self.fieldy, self.fieldx)
+        self.fieldwin.nodelay(1)
+        self.fieldpan = curses.panel.new_panel(self.fieldwin)
+        self.spewh, self.speww, self.spewy, self.spewx = (
+            self.scrh - self.labelh - 2, self.scrw - 3, 1 + self.labelh, 2)
+        self.spewwin = curses.newwin(self.spewh, self.speww,
+                                     self.spewy, self.spewx)
+        self.spewpan = curses.panel.new_panel(self.spewwin)
+        try:
+            self.scrwin.border(ord('|'),ord('|'),ord('-'),ord('-'),ord(' '),ord(' '),ord(' '),ord(' '))
+        except:
+            pass
+        self.labelwin.addstr(0, 0, 'file:')
+        self.labelwin.addstr(1, 0, 'size:')
+        self.labelwin.addstr(2, 0, 'dest:')
+        self.labelwin.addstr(3, 0, 'progress:')
+        self.labelwin.addstr(4, 0, 'status:')
+        self.labelwin.addstr(5, 0, 'dl speed:')
+        self.labelwin.addstr(6, 0, 'ul speed:')
+        self.labelwin.addstr(7, 0, 'sharing:')
+        self.labelwin.addstr(8, 0, 'seeds:')
+        self.labelwin.addstr(9, 0, 'peers:')
+        curses.panel.update_panels()
+        curses.doupdate()
+        self.changeflag.clear()
+
+
+    def finished(self):
+        self.done = 1
+        self.activity = 'download succeeded!'
+        self.downRate = '---'
+        self.display(fractionDone = 1)
+
+    def failed(self):
+        self.done = 1
+        self.activity = 'download failed!'
+        self.downRate = '---'
+        self.display()
+
+    def error(self, errormsg):
+        newerrmsg = strftime('[%H:%M:%S] ') + errormsg
+        self.errors.append(newerrmsg)
+        self.errlist.append(newerrmsg)
+        self.display()
+
+    def display(self, dpflag = Event(), fractionDone = None, timeEst = None,
+            downRate = None, upRate = None, activity = None,
+            statistics = None, spew = None, **kws):
+
+        inchar = self.fieldwin.getch()
+        if inchar == 12: # ^L
+            self._remake_window()
+        elif inchar in (ord('q'),ord('Q')):
+            self.doneflag.set()
+
+        if activity is not None and not self.done:
+            self.activity = activity
+        elif timeEst is not None:
+            self.activity = fmttime(timeEst)
+        if self.changeflag.isSet():
+            return
+        if self.last_update_time + 0.1 > clock() and fractionDone not in (0.0, 1.0) and activity is not None:
+            return
+        self.last_update_time = clock()
+        if fractionDone is not None:
+            blocknum = int(self.fieldw * fractionDone)
+            self.progress = blocknum * '#' + (self.fieldw - blocknum) * '_'
+            self.status = '%s (%.1f%%)' % (self.activity, fractionDone * 100)
+        else:
+            self.status = self.activity
+        if downRate is not None:
+            self.downRate = '%.1f KB/s' % (float(downRate) / (1 << 10))
+        if upRate is not None:
+            self.upRate = '%.1f KB/s' % (float(upRate) / (1 << 10))
+        if statistics is not None:
+           if (statistics.shareRating < 0) or (statistics.shareRating > 100):
+               self.shareRating = 'oo  (%.1f MB up / %.1f MB down)' % (float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
+           else:
+               self.shareRating = '%.3f  (%.1f MB up / %.1f MB down)' % (statistics.shareRating, float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
+           if not self.done:
+              self.seedStatus = '%d seen now, plus %.3f distributed copies' % (statistics.numSeeds,0.001*int(1000*statistics.numCopies2))
+           else:
+              self.seedStatus = '%d seen recently, plus %.3f distributed copies' % (statistics.numOldSeeds,0.001*int(1000*statistics.numCopies))
+           self.peerStatus = '%d seen now, %.1f%% done at %.1f kB/s' % (statistics.numPeers,statistics.percentDone,float(statistics.torrentRate) / (1 << 10))
+
+        self.fieldwin.erase()
+        self.fieldwin.addnstr(0, 0, self.file, self.fieldw, curses.A_BOLD)
+        self.fieldwin.addnstr(1, 0, self.fileSize, self.fieldw)
+        self.fieldwin.addnstr(2, 0, self.downloadTo, self.fieldw)
+        if self.progress:
+            self.fieldwin.addnstr(3, 0, self.progress, self.fieldw, curses.A_BOLD)
+        self.fieldwin.addnstr(4, 0, self.status, self.fieldw)
+        self.fieldwin.addnstr(5, 0, self.downRate, self.fieldw)
+        self.fieldwin.addnstr(6, 0, self.upRate, self.fieldw)
+        self.fieldwin.addnstr(7, 0, self.shareRating, self.fieldw)
+        self.fieldwin.addnstr(8, 0, self.seedStatus, self.fieldw)
+        self.fieldwin.addnstr(9, 0, self.peerStatus, self.fieldw)
+
+        self.spewwin.erase()
+
+        if not spew:
+            errsize = self.spewh
+            if self.errors:
+                self.spewwin.addnstr(0, 0, "error(s):", self.speww, curses.A_BOLD)
+                errsize = len(self.errors)
+                displaysize = min(errsize, self.spewh)
+                displaytop = errsize - displaysize
+                for i in range(displaysize):
+                    self.spewwin.addnstr(i, self.labelw, self.errors[displaytop + i],
+                                 self.speww-self.labelw-1, curses.A_BOLD)
+        else:
+            if self.errors:
+                self.spewwin.addnstr(0, 0, "error:", self.speww, curses.A_BOLD)
+                self.spewwin.addnstr(0, self.labelw, self.errors[-1],
+                                 self.speww-self.labelw-1, curses.A_BOLD)
+            self.spewwin.addnstr(2, 0, "  #     IP                 Upload           Download     Completed  Speed", self.speww, curses.A_BOLD)
+
+
+            if self.spew_scroll_time + SPEW_SCROLL_RATE < clock():
+                self.spew_scroll_time = clock()
+                if len(spew) > self.spewh-5 or self.spew_scroll_pos > 0:
+                    self.spew_scroll_pos += 1
+            if self.spew_scroll_pos > len(spew):
+                self.spew_scroll_pos = 0
+
+            for i in range(len(spew)):
+                spew[i]['lineno'] = i+1
+            spew.append({'lineno': None})
+            spew = spew[self.spew_scroll_pos:] + spew[:self.spew_scroll_pos]                
+            
+            for i in range(min(self.spewh - 5, len(spew))):
+                if not spew[i]['lineno']:
+                    continue
+                self.spewwin.addnstr(i+3, 0, '%3d' % spew[i]['lineno'], 3)
+                self.spewwin.addnstr(i+3, 4, spew[i]['ip']+spew[i]['direction'], 16)
+                if spew[i]['uprate'] > 100:
+                    self.spewwin.addnstr(i+3, 20, '%6.0f KB/s' % (float(spew[i]['uprate']) / 1000), 11)
+                self.spewwin.addnstr(i+3, 32, '-----', 5)
+                if spew[i]['uinterested'] == 1:
+                    self.spewwin.addnstr(i+3, 33, 'I', 1)
+                if spew[i]['uchoked'] == 1:
+                    self.spewwin.addnstr(i+3, 35, 'C', 1)
+                if spew[i]['downrate'] > 100:
+                    self.spewwin.addnstr(i+3, 38, '%6.0f KB/s' % (float(spew[i]['downrate']) / 1000), 11)
+                self.spewwin.addnstr(i+3, 50, '-------', 7)
+                if spew[i]['dinterested'] == 1:
+                    self.spewwin.addnstr(i+3, 51, 'I', 1)
+                if spew[i]['dchoked'] == 1:
+                    self.spewwin.addnstr(i+3, 53, 'C', 1)
+                if spew[i]['snubbed'] == 1:
+                    self.spewwin.addnstr(i+3, 55, 'S', 1)
+                self.spewwin.addnstr(i+3, 58, '%5.1f%%' % (float(int(spew[i]['completed']*1000))/10), 6)
+                if spew[i]['speed'] is not None:
+                    self.spewwin.addnstr(i+3, 64, '%5.0f KB/s' % (float(spew[i]['speed'])/1000), 10)
+
+            if statistics is not None:
+                self.spewwin.addnstr(self.spewh-1, 0,
+                        'downloading %d pieces, have %d fragments, %d of %d pieces completed'
+                        % ( statistics.storage_active, statistics.storage_dirty,
+                            statistics.storage_numcomplete,
+                            statistics.storage_totalpieces ), self.speww-1 )
+
+        curses.panel.update_panels()
+        curses.doupdate()
+        dpflag.set()
+
+    def chooseFile(self, default, size, saveas, dir):
+        self.file = default
+        self.fileSize = fmtsize(size)
+        if saveas == '':
+            saveas = default
+        self.downloadTo = abspath(saveas)
+        return saveas
+
+def run(scrwin, errlist, params):
+    doneflag = Event()
+    d = CursesDisplayer(scrwin, errlist, doneflag)
+    try:
+        while 1:
+            configdir = ConfigDir('downloadcurses')
+            defaultsToIgnore = ['responsefile', 'url', 'priority']
+            configdir.setDefaults(defaults,defaultsToIgnore)
+            configdefaults = configdir.loadConfig()
+            defaults.append(('save_options',0,
+             "whether to save the current options as the new default configuration " +
+             "(only for btdownloadcurses.py)"))
+            try:
+                config = parse_params(params, configdefaults)
+            except ValueError, e:
+                d.error('error: ' + str(e) + '\nrun with no args for parameter explanations')
+                break
+            if not config:
+                d.error(get_usage(defaults, d.fieldw, configdefaults))
+                break
+            if config['save_options']:
+                configdir.saveConfig(config)
+            configdir.deleteOldCacheData(config['expire_cache_data'])
+
+            myid = createPeerID()
+            seed(myid)
+
+            rawserver = RawServer(doneflag, config['timeout_check_interval'],
+                                  config['timeout'], ipv6_enable = config['ipv6_enabled'],
+                                  failfunc = d.failed, errorfunc = d.error)
+
+            upnp_type = UPnP_test(config['upnp_nat_access'])
+            while True:
+                try:
+                    listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
+                                    config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
+                                    upnp = upnp_type, randomizer = config['random_port'])
+                    break
+                except socketerror, e:
+                    if upnp_type and e == UPnP_ERROR:
+                        d.error('WARNING: COULD NOT FORWARD VIA UPnP')
+                        upnp_type = 0
+                        continue
+                    d.error("Couldn't listen - " + str(e))
+                    d.failed()
+                    return
+
+            response = get_response(config['responsefile'], config['url'], d.error)
+            if not response:
+                break
+
+            infohash = sha(bencode(response['info'])).digest()
+            
+            dow = BT1Download(d.display, d.finished, d.error, d.error, doneflag,
+                            config, response, infohash, myid, rawserver, listen_port,
+                            configdir)
+            
+            if not dow.saveAs(d.chooseFile):
+                break
+
+            if not dow.initFiles(old_style = True):
+                break
+            if not dow.startEngine():
+                dow.shutdown()
+                break
+            dow.startRerequester()
+            dow.autoStats()
+
+            if not dow.am_I_finished():
+                d.display(activity = 'connecting to peers')
+            rawserver.listen_forever(dow.getPortHandler())
+            d.display(activity = 'shutting down')
+            dow.shutdown()
+            break
+
+    except KeyboardInterrupt:
+        # ^C to exit.. 
+        pass 
+    try:
+        rawserver.shutdown()
+    except:
+        pass
+    if not d.done:
+        d.failed()
+
+
+if __name__ == '__main__':
+    if argv[1:] == ['--version']:
+        print version
+        exit(0)
+    if len(argv) <= 1:
+        print "Usage: btdownloadcurses.py <global options>\n"
+        print get_usage(defaults)
+        exit(1)
+
+    errlist = []
+    curses_wrapper(run, errlist, argv[1:])
+
+    if errlist:
+       print "These errors occurred during execution:"
+       for error in errlist:
+          print error

Propchange: debtorrent/branches/upstream/current/btdownloadcurses.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btdownloadgui.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btdownloadgui.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btdownloadgui.py (added)
+++ debtorrent/branches/upstream/current/btdownloadgui.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,2373 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen and Myers Carpenter
+# Modifications by various people
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from sys import argv, version, exit
+assert version >= '2', "Install Python 2.0 or greater"
+
+try:
+    from wxPython.wx import *
+except:
+    print 'wxPython is either not installed or has not been installed properly.'
+    exit(1)
+from BitTornado.download_bt1 import BT1Download, defaults, parse_params, get_usage, get_response
+from BitTornado.RawServer import RawServer, UPnP_ERROR
+from random import seed
+from socket import error as socketerror
+from BitTornado.ConnChoice import *
+from BitTornado.ConfigReader import configReader
+from BitTornado.bencode import bencode, bdecode
+from BitTornado.natpunch import UPnP_test
+from threading import Event, Thread
+from os.path import *
+from os import getcwd
+from time import strftime, time, localtime, sleep
+from BitTornado.clock import clock
+from webbrowser import open_new
+from traceback import print_exc
+from StringIO import StringIO
+from sha import sha
+import re
+import sys, os
+from BitTornado import version, createPeerID, report_email
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+PROFILER = False
+WXPROFILER = False
+
+try:
+    wxFULL_REPAINT_ON_RESIZE
+except:
+    wxFULL_REPAINT_ON_RESIZE = 0        # fix for wx pre-2.5
+
+# Note to packagers: edit OLDICONPATH in BitTornado/ConfigDir.py
+
+def hours(n):
+    if n == 0:
+        return 'download complete'
+    try:
+        n = int(n)
+        assert n >= 0 and n < 5184000  # 60 days
+    except:
+        return '<unknown>'
+    m, s = divmod(n, 60)
+    h, m = divmod(m, 60)
+    if h > 0:
+        return '%d hour(s) %02d min %02d sec' % (h, m, s)
+    else:
+        return '%d min %02d sec' % (m, s)
+
+def size_format(s):
+    if (s < 1024):
+        r = str(s) + 'B'
+    elif (s < 1048576):
+        r = str(int(s/1024)) + 'KiB'
+    elif (s < 1073741824L):
+        r = str(int(s/1048576)) + 'MiB'
+    elif (s < 1099511627776L):
+        r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
+    else:
+        r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
+    return(r)
+
+def comma_format(s):
+    r = str(s)
+    for i in range(len(r)-3, 0, -3):
+        r = r[:i]+','+r[i:]
+    return(r)
+
+hexchars = '0123456789abcdef'
+hexmap = []
+for i in xrange(256):
+    x = hexchars[(i&0xF0)/16]+hexchars[i&0x0F]
+    hexmap.append(x)
+
+def tohex(s):
+    r = []
+    for c in s:
+        r.append(hexmap[ord(c)])
+    return ''.join(r)
+
+wxEVT_INVOKE = wxNewEventType()
+
+def EVT_INVOKE(win, func):
+    win.Connect(-1, -1, wxEVT_INVOKE, func)
+
+class InvokeEvent(wxPyEvent):
+    def __init__(self, func = None, args = None, kwargs = None):
+        wxPyEvent.__init__(self)
+        self.SetEventType(wxEVT_INVOKE)
+        self.func = func
+        self.args = args
+        self.kwargs = kwargs
+
+
+
+class DownloadInfoFrame:
+    def __init__(self, flag, configfile):
+        self._errorwindow = None
+        try:
+            self.FONT = configfile.config['gui_font']
+            self.default_font = wxFont(self.FONT, wxDEFAULT, wxNORMAL, wxNORMAL, False)
+            frame = wxFrame(None, -1, 'BitTorrent ' + version + ' download',
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+            self.flag = flag
+            self.configfile = configfile
+            self.configfileargs = configfile.config
+            self.uiflag = Event()
+            self.fin = False
+            self.aboutBox = None
+            self.detailBox = None
+            self.advBox = None
+            self.creditsBox = None
+            self.statusIconHelpBox = None
+            self.reannouncelast = 0
+            self.spinlock = 0
+            self.scrollock = 0
+            self.lastError = 0
+            self.spewwait = clock()
+            self.config = None
+            self.updateSpinnerFlag = 0
+            self.updateSliderFlag = 0
+            self.statusIconValue = ' '
+            self.iconized = 0
+            self.taskbaricon = False
+            self.checking = None
+            self.activity = 'Starting up...'
+            self.firstupdate = True
+            self.shuttingdown = False
+            self.ispaused = False
+            self.bgalloc_periods = 0
+            self.gui_fractiondone = None
+            self.fileList = None
+            self.lastexternalannounce = ''
+            self.refresh_details = False
+            self.lastuploadsettings = 0
+            self.old_download = 0
+            self.old_upload = 0
+            self.old_ratesettings = None
+            self.current_ratesetting = None
+            self.gaugemode = None
+            self.autorate = False
+            
+            self.filename = None
+            self.dow = None
+            if sys.platform == 'win32':
+                self.invokeLaterEvent = InvokeEvent()
+                self.invokeLaterList = []
+
+            wxInitAllImageHandlers()
+            self.basepath = self.configfile.getIconDir()
+            self.icon = wxIcon(os.path.join(self.basepath,'icon_bt.ico'), wxBITMAP_TYPE_ICO)
+            self.finicon = wxIcon(os.path.join(self.basepath,'icon_done.ico'), wxBITMAP_TYPE_ICO)
+            self.statusIconFiles={
+                'startup':os.path.join(self.basepath,'white.ico'),
+                'disconnected':os.path.join(self.basepath,'black.ico'),
+                'noconnections':os.path.join(self.basepath,'red.ico'),
+                'nocompletes':os.path.join(self.basepath,'blue.ico'),
+                'noincoming':os.path.join(self.basepath,'yellow.ico'),
+                'allgood':os.path.join(self.basepath,'green.ico'),
+                }
+            self.statusIcons={}
+            self.filestatusIcons = wxImageList(16, 16)
+            self.filestatusIcons.Add(wxBitmap(os.path.join(self.basepath,'black1.ico'),wxBITMAP_TYPE_ICO))
+            self.filestatusIcons.Add(wxBitmap(os.path.join(self.basepath,'yellow1.ico'), wxBITMAP_TYPE_ICO))
+            self.filestatusIcons.Add(wxBitmap(os.path.join(self.basepath,'green1.ico'), wxBITMAP_TYPE_ICO))
+
+            self.allocbuttonBitmap = wxBitmap(os.path.join(self.basepath,'alloc.gif'), wxBITMAP_TYPE_GIF)
+
+            self.starttime = clock()
+
+            self.frame = frame
+            try:
+                self.frame.SetIcon(self.icon)
+            except:
+                pass
+
+            panel = wxPanel(frame, -1)
+            self.bgcolor = panel.GetBackgroundColour()
+
+            def StaticText(text, font = self.FONT-1, underline = False, color = None, panel = panel):
+                x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+                x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+                if color is not None:
+                    x.SetForegroundColour(color)
+                return x
+
+            colSizer = wxFlexGridSizer(cols = 1, vgap = 3)
+
+            border = wxBoxSizer(wxHORIZONTAL)
+            border.Add(colSizer, 1, wxEXPAND | wxALL, 4)
+            panel.SetSizer(border)
+            panel.SetAutoLayout(True)
+
+            topboxsizer = wxFlexGridSizer(cols = 3, vgap = 0)
+            topboxsizer.AddGrowableCol (0)
+
+            fnsizer = wxFlexGridSizer(cols = 1, vgap = 0)
+            fnsizer.AddGrowableCol (0)
+            fnsizer.AddGrowableRow (1)
+
+            fileNameText = StaticText('', self.FONT+4)
+            fnsizer.Add(fileNameText, 1, wxALIGN_BOTTOM|wxEXPAND)
+            self.fileNameText = fileNameText
+
+            fnsizer2 = wxFlexGridSizer(cols = 8, vgap = 0)
+            fnsizer2.AddGrowableCol (0)
+
+            fileSizeText = StaticText('')
+            fnsizer2.Add(fileSizeText, 1, wxALIGN_BOTTOM|wxEXPAND)
+            self.fileSizeText = fileSizeText
+
+            fileDetails = StaticText('Details', self.FONT, True, 'Blue')
+            fnsizer2.Add(fileDetails, 0, wxALIGN_BOTTOM)                                     
+
+            fnsizer2.Add(StaticText('  '))
+
+            advText = StaticText('Advanced', self.FONT, True, 'Blue')
+            fnsizer2.Add(advText, 0, wxALIGN_BOTTOM)
+            fnsizer2.Add(StaticText('  '))
+
+            prefsText = StaticText('Prefs', self.FONT, True, 'Blue')
+            fnsizer2.Add(prefsText, 0, wxALIGN_BOTTOM)
+            fnsizer2.Add(StaticText('  '))
+
+            aboutText = StaticText('About', self.FONT, True, 'Blue')
+            fnsizer2.Add(aboutText, 0, wxALIGN_BOTTOM)
+
+            fnsizer2.Add(StaticText('  '))
+            fnsizer.Add(fnsizer2,0,wxEXPAND)
+            topboxsizer.Add(fnsizer,0,wxEXPAND)
+            topboxsizer.Add(StaticText('  '))
+
+            self.statusIcon = wxEmptyBitmap(32,32)
+            statidata = wxMemoryDC()
+            statidata.SelectObject(self.statusIcon)
+            statidata.SetPen(wxTRANSPARENT_PEN)
+            statidata.SetBrush(wxBrush(self.bgcolor,wxSOLID))
+            statidata.DrawRectangle(0,0,32,32)
+            self.statusIconPtr = wxStaticBitmap(panel, -1, self.statusIcon)
+            topboxsizer.Add(self.statusIconPtr)
+
+            self.fnsizer = fnsizer
+            self.fnsizer2 = fnsizer2
+            self.topboxsizer = topboxsizer
+            colSizer.Add(topboxsizer, 0, wxEXPAND)
+
+            self.gauge = wxGauge(panel, -1, range = 1000, style = wxGA_SMOOTH)
+            colSizer.Add(self.gauge, 0, wxEXPAND)
+
+            timeSizer = wxFlexGridSizer(cols = 2)
+            timeSizer.Add(StaticText('Time elapsed / estimated : '))
+            self.timeText = StaticText(self.activity+'                    ')
+            timeSizer.Add(self.timeText)
+            timeSizer.AddGrowableCol(1)
+            colSizer.Add(timeSizer)
+
+            destSizer = wxFlexGridSizer(cols = 2, hgap = 8)
+            self.fileDestLabel = StaticText('Download to:')
+            destSizer.Add(self.fileDestLabel)
+            self.fileDestText = StaticText('')
+            destSizer.Add(self.fileDestText, flag = wxEXPAND)
+            destSizer.AddGrowableCol(1)
+            colSizer.Add(destSizer, flag = wxEXPAND)
+            self.destSizer = destSizer
+
+            statSizer = wxFlexGridSizer(cols = 3, hgap = 8)
+
+            self.ratesSizer = wxFlexGridSizer(cols = 2)
+            self.infoSizer = wxFlexGridSizer(cols = 2)
+
+            self.ratesSizer.Add(StaticText('   Download rate: '))
+            self.downRateText = StaticText('0 kB/s       ')
+            self.ratesSizer.Add(self.downRateText, flag = wxEXPAND)
+
+            self.downTextLabel = StaticText('Downloaded: ')
+            self.infoSizer.Add(self.downTextLabel)
+            self.downText = StaticText('0.00 MiB        ')
+            self.infoSizer.Add(self.downText, flag = wxEXPAND)
+
+            self.ratesSizer.Add(StaticText('   Upload rate: '))
+            self.upRateText = StaticText('0 kB/s       ')
+            self.ratesSizer.Add(self.upRateText, flag = wxEXPAND)
+
+            self.upTextLabel = StaticText('Uploaded: ')
+            self.infoSizer.Add(self.upTextLabel)
+            self.upText = StaticText('0.00 MiB        ')
+            self.infoSizer.Add(self.upText, flag = wxEXPAND)
+
+            shareSizer = wxFlexGridSizer(cols = 2, hgap = 8)
+            shareSizer.Add(StaticText('Share rating:'))
+            self.shareRatingText = StaticText('')
+            shareSizer.AddGrowableCol(1)
+            shareSizer.Add(self.shareRatingText, flag = wxEXPAND)
+
+            statSizer.Add(self.ratesSizer)
+            statSizer.Add(self.infoSizer)
+            statSizer.Add(shareSizer, flag = wxALIGN_CENTER_VERTICAL)
+            colSizer.Add (statSizer)
+
+            torrentSizer = wxFlexGridSizer(cols = 1)
+            self.peerStatusText = StaticText('')
+            torrentSizer.Add(self.peerStatusText, 0, wxEXPAND)
+            self.seedStatusText = StaticText('')
+            torrentSizer.Add(self.seedStatusText, 0, wxEXPAND)
+            torrentSizer.AddGrowableCol(0)
+            colSizer.Add(torrentSizer, 0, wxEXPAND)
+            self.torrentSizer = torrentSizer
+
+            self.errorTextSizer = wxFlexGridSizer(cols = 1)
+            self.errorText = StaticText('', self.FONT, False, 'Red')
+            self.errorTextSizer.Add(self.errorText, 0, wxEXPAND)
+            colSizer.Add(self.errorTextSizer, 0, wxEXPAND)
+
+            cancelSizer=wxGridSizer(cols = 2, hgap = 40)
+            self.pauseButton = wxButton(panel, -1, 'Pause')
+            cancelSizer.Add(self.pauseButton, 0, wxALIGN_CENTER)
+
+            self.cancelButton = wxButton(panel, -1, 'Cancel')
+            cancelSizer.Add(self.cancelButton, 0, wxALIGN_CENTER)
+            colSizer.Add(cancelSizer, 0, wxALIGN_CENTER)
+
+            # Setting options
+
+            slideSizer = wxFlexGridSizer(cols = 7, hgap = 0, vgap = 5)
+
+            # dropdown
+
+            self.connChoiceLabel = StaticText('Settings for ')
+            slideSizer.Add (self.connChoiceLabel, 0, wxALIGN_LEFT|wxALIGN_CENTER_VERTICAL)
+            self.connChoice = wxChoice (panel, -1, (-1, -1), (self.FONT*12, -1),
+                                        choices = connChoiceList)
+            self.connChoice.SetFont(self.default_font)
+            self.connChoice.SetSelection(0)
+            slideSizer.Add (self.connChoice, 0, wxALIGN_CENTER)
+            self.rateSpinnerLabel = StaticText(' Upload rate (kB/s) ')
+            slideSizer.Add (self.rateSpinnerLabel, 0, wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+
+            # max upload rate
+
+            self.rateSpinner = wxSpinCtrl (panel, -1, "", (-1,-1), (50, -1))
+            self.rateSpinner.SetFont(self.default_font)
+            self.rateSpinner.SetRange(0,5000)
+            self.rateSpinner.SetValue(0)
+            slideSizer.Add (self.rateSpinner, 0, wxALIGN_CENTER|wxALIGN_CENTER_VERTICAL)
+
+            self.rateLowerText = StaticText('  %5d' % (0))
+            self.rateUpperText = StaticText('%5d' % (5000))
+            self.rateslider = wxSlider(panel, -1, 0, 0, 5000, (-1, -1), (80, -1))
+
+            slideSizer.Add(self.rateLowerText, 0, wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+            slideSizer.Add(self.rateslider,    0, wxALIGN_CENTER|wxALIGN_CENTER_VERTICAL)
+            slideSizer.Add(self.rateUpperText, 0, wxALIGN_LEFT|wxALIGN_CENTER_VERTICAL)
+
+            slideSizer.Add(StaticText(''), 0, wxALIGN_LEFT)
+
+            self.bgallocText = StaticText('', self.FONT+2, False, 'Red')
+            slideSizer.Add(self.bgallocText, 0, wxALIGN_LEFT)
+
+            # max uploads
+
+            self.connSpinnerLabel = StaticText(' Max uploads ')
+            slideSizer.Add (self.connSpinnerLabel, 0, wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+            self.connSpinner = wxSpinCtrl (panel, -1, "", (-1,-1), (50, -1))
+            self.connSpinner.SetFont(self.default_font)
+            self.connSpinner.SetRange(4,100)
+            self.connSpinner.SetValue(4)
+            slideSizer.Add (self.connSpinner, 0, wxALIGN_CENTER|wxALIGN_CENTER_VERTICAL)
+
+            self.connLowerText = StaticText('  %5d' % (4))
+            self.connUpperText = StaticText('%5d' % (100))
+            self.connslider = wxSlider(panel, -1, 4, 4, 100, (-1, -1), (80, -1))
+
+            slideSizer.Add(self.connLowerText, 0, wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
+            slideSizer.Add(self.connslider,    0, wxALIGN_CENTER|wxALIGN_CENTER_VERTICAL)
+            slideSizer.Add(self.connUpperText, 0, wxALIGN_LEFT|wxALIGN_CENTER_VERTICAL)
+
+            colSizer.Add(slideSizer, 1, wxALL|wxALIGN_CENTER|wxEXPAND, 0)
+
+            self.unlimitedLabel = StaticText('0 kB/s means unlimited. Tip: your download rate is proportional to your upload rate', self.FONT-2)
+            colSizer.Add(self.unlimitedLabel, 0, wxALIGN_CENTER)
+
+            self.priorityIDs = [wxNewId(),wxNewId(),wxNewId(),wxNewId()]
+            self.prioritycolors = [ wxColour(160,160,160),
+                                    wxColour(255,64,0),
+                                    wxColour(0,0,0),
+                                    wxColour(64,64,255) ]
+
+
+            EVT_LEFT_DOWN(aboutText, self.about)
+            EVT_LEFT_DOWN(fileDetails, self.details)
+            EVT_LEFT_DOWN(self.statusIconPtr,self.statusIconHelp)
+            EVT_LEFT_DOWN(advText, self.advanced)
+            EVT_LEFT_DOWN(prefsText, self.openConfigMenu)
+            EVT_CLOSE(frame, self.done)
+            EVT_BUTTON(frame, self.pauseButton.GetId(), self.pause)
+            EVT_BUTTON(frame, self.cancelButton.GetId(), self.done)
+            EVT_INVOKE(frame, self.onInvoke)
+            EVT_SCROLL(self.rateslider, self.onRateScroll)
+            EVT_SCROLL(self.connslider, self.onConnScroll)
+            EVT_CHOICE(self.connChoice, -1, self.onConnChoice)
+            EVT_SPINCTRL(self.connSpinner, -1, self.onConnSpinner)
+            EVT_SPINCTRL(self.rateSpinner, -1, self.onRateSpinner)
+            if (sys.platform == 'win32'):
+                self.frame.tbicon = wxTaskBarIcon()
+                EVT_ICONIZE(self.frame, self.onIconify)
+                EVT_TASKBAR_LEFT_DCLICK(self.frame.tbicon, self.onTaskBarActivate)
+                EVT_TASKBAR_RIGHT_UP(self.frame.tbicon, self.onTaskBarMenu)
+                EVT_MENU(self.frame.tbicon, self.TBMENU_RESTORE, self.onTaskBarActivate)
+                EVT_MENU(self.frame.tbicon, self.TBMENU_CLOSE, self.done)
+            colSizer.AddGrowableCol (0)
+            colSizer.AddGrowableRow (6)
+            self.frame.Show()
+            border.Fit(panel)
+            self.frame.Fit()
+            self.panel = panel
+            self.border = border
+            self.addwidth = aboutText.GetBestSize().GetWidth() + fileDetails.GetBestSize().GetWidth() + (self.FONT*16)
+            self.fnsizer = fnsizer
+            self.colSizer = colSizer
+            minsize = self.colSizer.GetSize()
+            minsize.SetWidth (minsize.GetWidth())
+            minsize.SetHeight (minsize.GetHeight())
+            self.colSizer.SetMinSize (minsize)
+            self.colSizer.Fit(self.frame)
+            colSizer.Fit(frame)
+        except:
+            self.exception()
+
+    if sys.platform == 'win32':     # windows-only optimization
+        def onInvoke(self, event):
+            while self.invokeLaterList:
+                func,args,kwargs = self.invokeLaterList[0]
+                if self.uiflag.isSet():
+                    return
+                try:
+                    apply(func,args,kwargs)
+                except:
+                    self.exception()
+                del self.invokeLaterList[0]
+
+        def invokeLater(self, func, args = [], kwargs = {}):
+            if not self.uiflag.isSet():
+                self.invokeLaterList.append((func,args,kwargs))
+                if len(self.invokeLaterList) == 1:
+                    wxPostEvent(self.frame, self.invokeLaterEvent)
+    else:
+        def onInvoke(self, event):
+            if not self.uiflag.isSet():
+                try:
+                    apply(event.func, event.args, event.kwargs)
+                except:
+                    self.exception()
+
+        def invokeLater(self, func, args = [], kwargs = {}):
+            if not self.uiflag.isSet():
+                wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
+
+
+    def getStatusIcon(self, name, bitmap=False):
+        if self.statusIcons.has_key(name):
+            i = self.statusIcons[name]
+            if type(i)  == type(self.icon) and not bitmap:
+                return i
+        if bitmap:
+            i = wxBitmap(self.statusIconFiles[name], wxBITMAP_TYPE_ICO)
+        else:
+            i = wxIcon(self.statusIconFiles[name], wxBITMAP_TYPE_ICO)
+        self.statusIcons[name] = i
+        return i
+
+
+    def setStatusIcon(self, name):
+        if name == self.statusIconValue:
+            return
+        self.statusIconValue = name
+        statidata = wxMemoryDC()
+        statidata.SelectObject(self.statusIcon)
+        statidata.BeginDrawing()
+        try:
+            statidata.DrawIcon(self.getStatusIcon(name),0,0)
+        except:
+            statidata.DrawBitmap(self.getStatusIcon(name,True),0,0,True)
+        statidata.EndDrawing()
+        statidata.SelectObject(wxNullBitmap)
+        self.statusIconPtr.Refresh()
+
+
+    def createStatusIcon(self, name):
+        iconbuffer = wxEmptyBitmap(32,32)
+        bbdata = wxMemoryDC()
+        bbdata.SelectObject(iconbuffer)
+        bbdata.SetPen(wxTRANSPARENT_PEN)
+        bbdata.SetBrush(wxBrush(self.bgcolor,wxSOLID))
+        bbdata.DrawRectangle(0,0,32,32)
+        try:
+            bbdata.DrawIcon(self.getStatusIcon(name),0,0)
+        except:
+            bbdata.DrawBitmap(self.getStatusIcon(name,True),0,0,True)
+        return iconbuffer
+
+
+    def setgaugemode(self, selection):
+        if selection is None:
+            selection = self.gaugemode
+        elif selection == self.gaugemode:
+            return
+        else:
+            self.gaugemode = selection
+        if selection < 0:
+            self.gauge.SetForegroundColour(self.configfile.getcheckingcolor())
+            self.gauge.SetBackgroundColour(wxSystemSettings_GetColour(wxSYS_COLOUR_MENU))
+        elif selection == 0:
+            self.gauge.SetForegroundColour(self.configfile.getdownloadcolor())
+            self.gauge.SetBackgroundColour(wxSystemSettings_GetColour(wxSYS_COLOUR_MENU))
+        else:
+            self.gauge.SetForegroundColour(self.configfile.getseedingcolor())
+            self.gauge.SetBackgroundColour(self.configfile.getdownloadcolor())
+
+
+    def onIconify(self, evt):
+        try:
+            if self.configfileargs['win32_taskbar_icon']:
+                if self.fin:
+                    self.frame.tbicon.SetIcon(self.finicon, "BitTorrent")
+                else:
+                    self.frame.tbicon.SetIcon(self.icon, "BitTorrent")
+                self.frame.Hide()
+                self.taskbaricon = True
+            else:
+                return
+        except:
+            self.exception()
+
+
+    def onTaskBarActivate(self, evt):
+        try:
+            if self.frame.IsIconized():
+                self.frame.Iconize(False)
+            if not self.frame.IsShown():
+                self.frame.Show(True)
+                self.frame.Raise()
+            self.frame.tbicon.RemoveIcon()
+            self.taskbaricon = False
+        except wxPyDeadObjectError:
+            pass
+        except:
+            self.exception()
+
+    TBMENU_RESTORE = 1000
+    TBMENU_CLOSE   = 1001
+
+    def onTaskBarMenu(self, evt):
+        menu = wxMenu()
+        menu.Append(self.TBMENU_RESTORE, "Restore BitTorrent")
+        menu.Append(self.TBMENU_CLOSE,   "Close")
+        self.frame.tbicon.PopupMenu(menu)
+        menu.Destroy()
+
+
+    def _try_get_config(self):
+        if self.config is None:
+            try:
+                self.config = self.dow.getConfig()
+            except:
+                pass
+        return self.config != None
+
+    def onRateScroll(self, event):
+        try:
+            if self.autorate:
+                return
+            if not self._try_get_config():
+                return
+            if (self.scrollock == 0):
+                self.scrollock = 1
+                self.updateSpinnerFlag = 1
+                self.dow.setUploadRate(self.rateslider.GetValue()
+                            * connChoices[self.connChoice.GetSelection()]['rate'].get('div',1))
+                self.scrollock = 0
+        except:
+            self.exception()
+
+    def onConnScroll(self, event):
+        try:
+            if self.autorate:
+                return
+            if not self._try_get_config():
+                return
+            self.connSpinner.SetValue (self.connslider.GetValue ())
+            self.dow.setConns(self.connslider.GetValue())
+        except:
+            self.exception()
+
+    def onRateSpinner(self, event = None):
+        try:
+            if self.autorate:
+                return
+            if not self._try_get_config():
+                return
+            if (self.spinlock == 0):
+                self.spinlock = 1
+                spinnerValue = self.rateSpinner.GetValue()
+                div = connChoices[self.connChoice.GetSelection()]['rate'].get('div',1)
+                if div > 1:
+                    if spinnerValue > (self.config['max_upload_rate']):
+                        round_up = div - 1
+                    else:
+                        round_up = 0
+                    newValue = int((spinnerValue + round_up) / div) * div
+                    if newValue != spinnerValue:
+                        self.rateSpinner.SetValue(newValue)
+                else:
+                    newValue = spinnerValue
+                self.dow.setUploadRate(newValue)
+                self.updateSliderFlag = 1
+                self.spinlock = 0
+        except:
+            self.exception()
+
+    def onDownRateSpinner(self, event=None):
+        try:
+            if not self._try_get_config():
+                return
+            spinnerValue = self.downrateSpinner.GetValue()
+            self.dow.setDownloadRate(self.downrateSpinner.GetValue())
+        except:
+            self.exception()
+
+    def onConnSpinner(self, event = None):
+        try:
+            if self.autorate:
+                return
+            if not self._try_get_config():
+                return
+            self.connslider.SetValue (self.connSpinner.GetValue())
+            self.dow.setConns(self.connslider.GetValue())
+        except:
+            self.exception()
+
+    def onConnChoice(self, event, cons=None, rate=None):
+        try:
+            if not self._try_get_config():
+                return
+            num = self.connChoice.GetSelection()
+            choice = connChoices[num]
+            if choice.has_key('super-seed'):  # selecting super-seed is now a toggle
+                self.dow.set_super_seed()     # one way change, don't go back
+                self.connChoice.SetSelection(self.lastuploadsettings)
+                return
+            self.lastuploadsettings = num
+            self.current_ratesetting = self.connChoice.GetStringSelection()
+            if rate is None:
+                rate = choice['rate']['def']
+            self.rateSpinner.SetRange (choice['rate']['min'],
+                                   choice['rate']['max'])
+            self.rateSpinner.SetValue(rate)
+            self.rateslider.SetRange(
+                choice['rate']['min']/choice['rate'].get('div',1),
+                choice['rate']['max']/choice['rate'].get('div',1))
+            self.rateslider.SetValue (rate/choice['rate'].get('div',1))
+            self.rateLowerText.SetLabel ('  %d' % (choice['rate']['min']))
+            self.rateUpperText.SetLabel ('%d' % (choice['rate']['max']))
+            if cons is None:
+                cons = choice['conn']['def']
+            self.connSpinner.SetRange (choice['conn']['min'],
+                                       choice['conn']['max'])
+            self.connSpinner.SetValue (cons)
+            self.connslider.SetRange (choice['conn']['min'],
+                                      choice['conn']['max'])
+            self.connslider.SetValue (cons)
+            self.connLowerText.SetLabel ('  %d' % (choice['conn']['min']))
+            self.connUpperText.SetLabel ('%d' % (choice['conn']['max']))
+            self.onConnScroll (0)
+            self.onRateScroll (0)
+            self.dow.setInitiate(choice.get('initiate', 40))
+            if choice.has_key('automatic'):
+                if not self.autorate:
+                    self.autorate = True
+                    self.rateSpinner.Enable(False)
+                    self.connSpinner.Enable(False)
+                    self.dow.setUploadRate(-1)
+            else:
+                if self.autorate:
+                    self.autorate = False
+                    self.rateSpinner.Enable(True)
+                    self.connSpinner.Enable(True)
+                    self.onRateSpinner()
+                    self.onConnSpinner()
+        except:
+            self.exception()
+
+
+    def about(self, event):
+        try:
+            if (self.aboutBox is not None):
+                try:
+                    self.aboutBox.Close ()
+                except wxPyDeadObjectError, e:
+                    self.aboutBox = None
+
+            self.aboutBox = wxFrame(None, -1, 'About BitTorrent', size = (1,1),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+            try:
+                self.aboutBox.SetIcon(self.icon)
+            except:
+                pass
+
+            panel = wxPanel(self.aboutBox, -1)
+
+            def StaticText(text, font = self.FONT, underline = False, color = None, panel = panel):
+                x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+                x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+                if color is not None:
+                    x.SetForegroundColour(color)
+                return x
+
+            colSizer = wxFlexGridSizer(cols = 1, vgap = 3)
+
+            titleSizer = wxBoxSizer(wxHORIZONTAL)
+            aboutTitle = StaticText('BitTorrent ' + version + '  ', self.FONT+4)
+            titleSizer.Add (aboutTitle)
+            linkDonate = StaticText('Donate to Bram', self.FONT, True, 'Blue')
+            titleSizer.Add (linkDonate, 1, wxALIGN_BOTTOM&wxEXPAND)
+            colSizer.Add(titleSizer, 0, wxEXPAND)
+
+            colSizer.Add(StaticText('created by Bram Cohen, Copyright 2001-2003,'))
+            colSizer.Add(StaticText('experimental version maintained by John Hoffman 2003'))
+            colSizer.Add(StaticText('modified from experimental version by Eike Frost 2003'))
+            credits = StaticText('full credits\n', self.FONT, True, 'Blue')
+            colSizer.Add(credits);
+
+            si = ( 'exact Version String: ' + version + '\n' +
+                   'Python version: ' + sys.version + '\n' +
+                   'wxPython version: ' + wxVERSION_STRING + '\n' )
+            try:
+                si += 'Psyco version: ' + hex(psyco.__version__)[2:] + '\n'
+            except:
+                pass
+            colSizer.Add(StaticText(si))
+
+            babble1 = StaticText(
+             'This is an experimental, unofficial build of BitTorrent.\n' +
+             'It is Free Software under an MIT-Style license.')
+            babble2 = StaticText('BitTorrent Homepage (link)', self.FONT, True, 'Blue')
+            babble3 = StaticText("TheSHAD0W's Client Homepage (link)", self.FONT, True, 'Blue')
+            babble4 = StaticText("Eike Frost's Client Homepage (link)", self.FONT, True, 'Blue')
+            babble6 = StaticText('License Terms (link)', self.FONT, True, 'Blue')
+            colSizer.Add (babble1)
+            colSizer.Add (babble2)
+            colSizer.Add (babble3)
+            colSizer.Add (babble4)
+            colSizer.Add (babble6)
+
+            okButton = wxButton(panel, -1, 'Ok')
+            colSizer.Add(okButton, 0, wxALIGN_RIGHT)
+            colSizer.AddGrowableCol(0)
+
+            border = wxBoxSizer(wxHORIZONTAL)
+            border.Add(colSizer, 1, wxEXPAND | wxALL, 4)
+            panel.SetSizer(border)
+            panel.SetAutoLayout(True)
+
+            def donatelink(self):
+                Thread(target = open_new('https://www.paypal.com/cgi-bin/webscr?cmd=_xclick&business=bram@bitconjurer.org&item_name=BitTorrent&amount=5.00&submit=donate')).start()
+            EVT_LEFT_DOWN(linkDonate, donatelink)
+            def aboutlink(self):
+                Thread(target = open_new('http://bitconjurer.org/BitTorrent/')).start()
+            EVT_LEFT_DOWN(babble2, aboutlink)
+            def shadlink(self):
+                Thread(target = open_new('http://www.bittornado.com/')).start()
+            EVT_LEFT_DOWN(babble3, shadlink)
+            def explink(self):
+                Thread(target = open_new('http://ei.kefro.st/projects/btclient/')).start()
+            EVT_LEFT_DOWN(babble4, explink)
+            def licenselink(self):
+                Thread(target = open_new('http://ei.kefro.st/projects/btclient/LICENSE.TXT')).start()
+            EVT_LEFT_DOWN(babble6, licenselink)
+            EVT_LEFT_DOWN(credits, self.credits)
+
+            def closeAbout(e, self = self):
+                if self.aboutBox:
+                    self.aboutBox.Close()
+            EVT_BUTTON(self.aboutBox, okButton.GetId(), closeAbout)
+            def kill(e, self = self):
+                try:
+                    self.aboutBox.RemoveIcon()
+                except:
+                    pass
+                self.aboutBox.Destroy()
+                self.aboutBox = None
+            EVT_CLOSE(self.aboutBox, kill)
+
+            self.aboutBox.Show()
+            border.Fit(panel)
+            self.aboutBox.Fit()
+        except:
+            self.exception()
+
+
+    def details(self, event):
+        try:
+            if not self.dow or not self.filename:
+                return
+            metainfo = self.dow.getResponse()
+            if metainfo is None:
+                return
+            if metainfo.has_key('announce'):
+                announce = metainfo['announce']
+            else:
+                announce = None
+            if metainfo.has_key('announce-list'):
+                announce_list = metainfo['announce-list']
+            else:
+                announce_list = None
+            info = metainfo['info']
+            info_hash = self.dow.infohash
+            piece_length = info['piece length']
+            fileselector = self.dow.fileselector
+
+            if (self.detailBox is not None):
+                try:
+                    self.detailBox.Close()
+                except wxPyDeadObjectError, e:
+                    self.detailBox = None
+
+            self.detailBox = wxFrame(None, -1, 'Torrent Details ', size = wxSize(405,230),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+            try:
+                self.detailBox.SetIcon(self.icon)
+            except:
+                pass
+
+            panel = wxPanel(self.detailBox, -1, size = wxSize (400,220))
+
+            def StaticText(text, font = self.FONT-1, underline = False, color = None, panel = panel):
+                x = wxStaticText(panel, -1, text, style = wxALIGN_CENTER_VERTICAL)
+                x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+                if color is not None:
+                    x.SetForegroundColour(color)
+                return x
+
+            colSizer = wxFlexGridSizer(cols = 1, vgap = 3)
+            colSizer.AddGrowableCol(0)
+
+            titleSizer = wxBoxSizer(wxHORIZONTAL)
+            aboutTitle = StaticText('Details about ' + self.filename, self.FONT+4)
+
+            titleSizer.Add (aboutTitle)
+            colSizer.Add (titleSizer)
+
+            detailSizer = wxFlexGridSizer(cols = 2, vgap = 6)
+
+            if info.has_key('length'):
+                fileListID = None
+                detailSizer.Add(StaticText('file name :'))
+                detailSizer.Add(StaticText(info['name']))
+                if info.has_key('md5sum'):
+                    detailSizer.Add(StaticText('MD5 hash :'))
+                    detailSizer.Add(StaticText(info['md5sum']))
+                file_length = info['length']
+                name = "file size"
+            else:
+                detail1Sizer = wxFlexGridSizer(cols = 1, vgap = 6)
+                detail1Sizer.Add(StaticText('directory name : ' + info['name']))
+                colSizer.Add (detail1Sizer)
+                bgallocButton = wxBitmapButton(panel, -1, self.allocbuttonBitmap, size = (52,20))
+                def bgalloc(self, frame = self):
+                    if frame.dow.storagewrapper is not None:
+                        frame.dow.storagewrapper.bgalloc()
+                EVT_BUTTON(self.detailBox, bgallocButton.GetId(), bgalloc)
+
+                bgallocbuttonSizer = wxFlexGridSizer(cols = 4, hgap = 4, vgap = 0)
+                bgallocbuttonSizer.Add(StaticText('(right-click to set priority)',self.FONT-1),0,wxALIGN_BOTTOM)
+                bgallocbuttonSizer.Add(StaticText('(finish allocation)'), -1, wxALIGN_CENTER_VERTICAL)
+                bgallocbuttonSizer.Add(bgallocButton, -1, wxALIGN_CENTER)
+                bgallocbuttonSizer.AddGrowableCol(0)
+                colSizer.Add(bgallocbuttonSizer, -1, wxEXPAND)
+
+                file_length = 0
+
+                fileListID = wxNewId()
+                fileList = wxListCtrl(panel, fileListID,
+                                      wxPoint(-1,-1), (325,100), wxLC_REPORT)
+                self.fileList = fileList
+                fileList.SetImageList(self.filestatusIcons, wxIMAGE_LIST_SMALL)
+
+                fileList.SetAutoLayout (True)
+                fileList.InsertColumn(0, "file")
+                fileList.InsertColumn(1, "", format=wxLIST_FORMAT_RIGHT, width=55)
+                fileList.InsertColumn(2, "")
+
+                for i in range(len(info['files'])):
+                    x = wxListItem()
+                    fileList.InsertItem(x)
+
+                x = 0
+                for file in info['files']:
+                    path = ' '
+                    for item in file['path']:
+                        if (path != ''):
+                            path = path + "/"
+                        path = path + item
+                    path += ' (' + str(file['length']) + ')'
+                    fileList.SetStringItem(x, 0, path)
+                    if file.has_key('md5sum'):
+                        fileList.SetStringItem(x, 2, '    [' + str(file['md5sum']) + ']')
+                    if fileselector:
+                        p = fileselector[x]
+                        item = self.fileList.GetItem(x)
+                        item.SetTextColour(self.prioritycolors[p+1])
+                        fileList.SetItem(item)
+                    x += 1
+                    file_length += file['length']
+                fileList.SetColumnWidth(0,wxLIST_AUTOSIZE)
+                fileList.SetColumnWidth(2,wxLIST_AUTOSIZE)
+
+                name = 'archive size'
+                colSizer.Add(fileList, 1, wxEXPAND)
+                colSizer.AddGrowableRow(3)
+
+            detailSizer.Add(StaticText('info_hash :'),0,wxALIGN_CENTER_VERTICAL)
+            detailSizer.Add(wxTextCtrl(panel, -1, tohex(info_hash), size = (325, -1), style = wxTE_READONLY))
+            num_pieces = int((file_length+piece_length-1)/piece_length)
+            detailSizer.Add(StaticText(name + ' : '))
+            detailSizer.Add(StaticText('%s (%s bytes)' % (size_format(file_length), comma_format(file_length))))
+            detailSizer.Add(StaticText('pieces : '))
+            if num_pieces > 1:
+                detailSizer.Add(StaticText('%i (%s bytes each)' % (num_pieces, comma_format(piece_length))))
+            else:
+                detailSizer.Add(StaticText('1'))
+
+            if announce_list is None:
+                detailSizer.Add(StaticText('announce url : '),0,wxALIGN_CENTER_VERTICAL)
+                detailSizer.Add(wxTextCtrl(panel, -1, announce, size = (325, -1), style = wxTE_READONLY))
+            else:
+                detailSizer.Add(StaticText(''))
+                trackerList = wxListCtrl(panel, -1, wxPoint(-1,-1), (325,75), wxLC_REPORT)
+                trackerList.SetAutoLayout (True)
+                trackerList.InsertColumn(0, "")
+                trackerList.InsertColumn(1, "announce urls")
+
+                for tier in range(len(announce_list)):
+                    for t in range(len(announce_list[tier])):
+                        i = wxListItem()
+                        trackerList.InsertItem(i)
+                if announce is not None:
+                    for l in [1,2]:
+                        i = wxListItem()
+                        trackerList.InsertItem(i)
+
+                x = 0
+                for tier in range(len(announce_list)):
+                    for t in range(len(announce_list[tier])):
+                        if t == 0:
+                            trackerList.SetStringItem(x, 0, 'tier '+str(tier)+':')
+                        trackerList.SetStringItem(x, 1, announce_list[tier][t])
+                        x += 1
+                if announce is not None:
+                    trackerList.SetStringItem(x+1, 0, 'single:')
+                    trackerList.SetStringItem(x+1, 1, announce)
+                trackerList.SetColumnWidth(0,wxLIST_AUTOSIZE)
+                trackerList.SetColumnWidth(1,wxLIST_AUTOSIZE)
+                detailSizer.Add(trackerList)
+
+            if announce is None and announce_list is not None:
+                announce = announce_list[0][0]
+            if announce is not None:
+                detailSizer.Add(StaticText('likely tracker :'))
+                p = re.compile( '(.*/)[^/]+')
+                turl = p.sub (r'\1', announce)
+                trackerUrl = StaticText(turl, self.FONT, True, 'Blue')
+                detailSizer.Add(trackerUrl)
+            if metainfo.has_key('comment'):
+                detailSizer.Add(StaticText('comment :'))
+                detailSizer.Add(StaticText(metainfo['comment']))
+            if metainfo.has_key('creation date'):
+                detailSizer.Add(StaticText('creation date :'))
+                try:
+                    detailSizer.Add(StaticText(
+                        strftime('%x %X',localtime(metainfo['creation date']))))
+                except:
+                    try:
+                        detailSizer.Add(StaticText(metainfo['creation date']))
+                    except:
+                        detailSizer.Add(StaticText('<cannot read date>'))
+
+            detailSizer.AddGrowableCol(1)
+            colSizer.Add (detailSizer, 1, wxEXPAND)
+
+            okButton = wxButton(panel, -1, 'Ok')
+            colSizer.Add(okButton, 0, wxALIGN_RIGHT)
+            colSizer.AddGrowableCol(0)
+
+            if not self.configfileargs['gui_stretchwindow']:
+                aboutTitle.SetSize((400,-1))
+            else:
+                panel.SetAutoLayout(True)
+
+            border = wxBoxSizer(wxHORIZONTAL)
+            border.Add(colSizer, 1, wxEXPAND | wxALL, 4)
+            panel.SetSizer(border)
+            panel.SetAutoLayout(True)
+
+            if fileselector and fileListID:
+                def onRightClick(evt, self = self):
+                    s = []
+                    i = -1
+                    while True:
+                        i = self.fileList.GetNextItem(i,state=wxLIST_STATE_SELECTED)
+                        if i == -1:
+                            break
+                        s.append(i)
+                    if not s:   # just in case
+                        return
+                    oldstate = self.dow.fileselector[s[0]]
+                    kind=wxITEM_RADIO
+                    for i in s[1:]:
+                        if self.dow.fileselector[i] != oldstate:
+                            oldstate = None
+                            kind = wxITEM_NORMAL
+                            break
+                    menu = wxMenu()
+                    menu.Append(self.priorityIDs[1], "download first", kind=kind)
+                    menu.Append(self.priorityIDs[2], "download normally", kind=kind)
+                    menu.Append(self.priorityIDs[3], "download later", kind=kind)
+                    menu.Append(self.priorityIDs[0], "download never (deletes)", kind=kind)
+                    if oldstate is not None:
+                        menu.Check(self.priorityIDs[oldstate+1], True)
+
+                    def onSelection(evt, self = self, s = s):
+                        p = evt.GetId()
+                        priorities = self.dow.fileselector.get_priorities()
+                        for i in xrange(len(self.priorityIDs)):
+                            if p == self.priorityIDs[i]:
+                                for ss in s:
+                                    priorities[ss] = i-1
+                                    item = self.fileList.GetItem(ss)
+                                    item.SetTextColour(self.prioritycolors[i])
+                                    self.fileList.SetItem(item)
+                                self.dow.fileselector.set_priorities(priorities)
+                                self.fileList.Refresh()
+                                self.refresh_details = True
+                                break
+                        
+                    for id in self.priorityIDs:
+                        EVT_MENU(self.detailBox, id, onSelection)
+
+                    self.detailBox.PopupMenu(menu, evt.GetPoint())
+                        
+                EVT_LIST_ITEM_RIGHT_CLICK(self.detailBox, fileListID, onRightClick)
+
+            def closeDetail(evt, self = self):
+                if self.detailBox:
+                    self.detailBox.Close()
+            EVT_BUTTON(self.detailBox, okButton.GetId(), closeDetail)
+            def kill(evt, self = self):
+                try:
+                    self.detailBox.RemoveIcon()
+                except:
+                    pass
+                self.detailBox.Destroy()
+                self.detailBox = None
+                self.fileList = None
+                self.dow.filedatflag.clear()
+            EVT_CLOSE(self.detailBox, kill)
+
+            def trackerurl(self, turl = turl):
+                try:
+                    Thread(target = open_new(turl)).start()
+                except:
+                    pass
+            EVT_LEFT_DOWN(trackerUrl, trackerurl)
+
+            self.detailBox.Show ()
+            border.Fit(panel)
+            self.detailBox.Fit()
+
+            self.refresh_details = True
+            self.dow.filedatflag.set()
+        except:
+            self.exception()
+
+
+    def credits(self, event):
+        try:
+            if (self.creditsBox is not None):
+                try:
+                    self.creditsBox.Close()
+                except wxPyDeadObjectError, e:
+                    self.creditsBox = None
+
+            self.creditsBox = wxFrame(None, -1, 'Credits', size = (1,1),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+            try:
+                self.creditsBox.SetIcon(self.icon)
+            except:
+                pass
+
+            panel = wxPanel(self.creditsBox, -1)        
+
+            def StaticText(text, font = self.FONT, underline = False, color = None, panel = panel):
+                x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+                x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+                if color is not None:
+                    x.SetForegroundColour(color)
+                return x
+
+            colSizer = wxFlexGridSizer(cols = 1, vgap = 3)
+
+            titleSizer = wxBoxSizer(wxHORIZONTAL)
+            aboutTitle = StaticText('Credits', self.FONT+4)
+            titleSizer.Add (aboutTitle)
+            colSizer.Add (titleSizer)
+            colSizer.Add (StaticText(
+              'The following people have all helped with this\n' +
+              'version of BitTorrent in some way (in no particular order) -\n'));
+            creditSizer = wxFlexGridSizer(cols = 3)
+            creditSizer.Add(StaticText(
+              'Bill Bumgarner\n' +
+              'David Creswick\n' +
+              'Andrew Loewenstern\n' +
+              'Ross Cohen\n' +
+              'Jeremy Avnet\n' +
+              'Greg Broiles\n' +
+              'Barry Cohen\n' +
+              'Bram Cohen\n' +
+              'sayke\n' +
+              'Steve Jenson\n' +
+              'Myers Carpenter\n' +
+              'Francis Crick\n' +
+              'Petru Paler\n' +
+              'Jeff Darcy\n' +
+              'John Gilmore\n' +
+              'Xavier Bassery\n' +
+              'Pav Lucistnik'))
+            creditSizer.Add(StaticText('  '))
+            creditSizer.Add(StaticText(
+              'Yann Vernier\n' +
+              'Pat Mahoney\n' +
+              'Boris Zbarsky\n' +
+              'Eric Tiedemann\n' +
+              'Henry "Pi" James\n' +
+              'Loring Holden\n' +
+              'Robert Stone\n' +
+              'Michael Janssen\n' +
+              'Eike Frost\n' +
+              'Andrew Todd\n' +
+              'otaku\n' +
+              'Edward Keyes\n' +
+              'John Hoffman\n' +
+              'Uoti Urpala\n' +
+              'Jon Wolf\n' +
+              'Christoph Hohmann\n' +
+              'Micah Anderson'))
+            colSizer.Add (creditSizer, flag = wxALIGN_CENTER_HORIZONTAL)
+            okButton = wxButton(panel, -1, 'Ok')
+            colSizer.Add(okButton, 0, wxALIGN_RIGHT)
+            colSizer.AddGrowableCol(0)
+
+            border = wxBoxSizer(wxHORIZONTAL)
+            border.Add(colSizer, 1, wxEXPAND | wxALL, 4)
+            panel.SetSizer(border)
+            panel.SetAutoLayout(True)
+
+            def closeCredits(e, self = self):
+                if self.creditsBox:
+                    self.creditsBox.Close()
+            EVT_BUTTON(self.creditsBox, okButton.GetId(), closeCredits)
+            def kill(e, self = self):
+                try:
+                    self.creditsBox.RemoveIcon()
+                except:
+                    pass
+                self.creditsBox.Destroy()
+                self.creditsBox = None
+            EVT_CLOSE(self.creditsBox, kill)
+
+            self.creditsBox.Show()
+            border.Fit(panel)
+            self.creditsBox.Fit()
+        except:
+            self.exception()
+
+
+    def statusIconHelp(self, event):
+        try:
+            if (self.statusIconHelpBox is not None):
+                try:
+                    self.statusIconHelpBox.Close()
+                except wxPyDeadObjectError, e:
+                    self.statusIconHelpBox = None
+
+            self.statusIconHelpBox = wxFrame(None, -1, 'Help with the BitTorrent Status Light', size = (1,1),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+            try:
+                self.statusIconHelpBox.SetIcon(self.icon)
+            except:
+                pass
+
+            panel = wxPanel(self.statusIconHelpBox, -1)
+
+            def StaticText(text, font = self.FONT, underline = False, color = None, panel = panel):
+                x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+                x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+                if color is not None:
+                    x.SetForegroundColour(color)
+                return x
+
+            fullsizer = wxFlexGridSizer(cols = 1, vgap = 13)
+            colsizer = wxFlexGridSizer(cols = 2, hgap = 13, vgap = 13)
+
+            disconnectedicon=self.createStatusIcon('disconnected')
+            colsizer.Add(wxStaticBitmap(panel, -1, disconnectedicon))
+            colsizer.Add(StaticText(
+                'Waiting to connect to the tracker.\n' +
+                'If the status light stays black for a long time the tracker\n' +
+                'you are trying to connect to may not be working.  Unless you\n' +
+                'are receiving a message telling you otherwise, please wait,\n' +
+                'and BitTorrent will automatically try to reconnect for you.'), 1, wxALIGN_CENTER_VERTICAL)
+
+            noconnectionsicon=self.createStatusIcon('noconnections')
+            colsizer.Add(wxStaticBitmap(panel, -1, noconnectionsicon))
+            colsizer.Add(StaticText(
+                'You have no connections with other clients.\n' +
+                'Please be patient.  If after several minutes the status\n' +
+                'light remains red, this torrent may be old and abandoned.'), 1, wxALIGN_CENTER_VERTICAL)
+
+            noincomingicon=self.createStatusIcon('noincoming')
+            colsizer.Add(wxStaticBitmap(panel, -1, noincomingicon))
+            colsizer.Add(StaticText(
+                'You have not received any incoming connections from others.\n' +
+                'It may only be because no one has tried.  If you never see\n' +
+                'the status light turn green, it may indicate your system\n' +
+                'is behind a firewall or proxy server.  Please look into\n' +
+                'routing BitTorrent through your firewall in order to receive\n' +
+                'the best possible download rate.'), 1, wxALIGN_CENTER_VERTICAL)
+
+            nocompletesicon=self.createStatusIcon('nocompletes')
+            colsizer.Add(wxStaticBitmap(panel, -1, nocompletesicon))
+            colsizer.Add(StaticText(
+                'There are no complete copies among the clients you are\n' +
+                'connected to.  Don\'t panic, other clients in the torrent\n' +
+                "you can't see may have the missing data.\n" +
+                'If the status light remains blue, you may have problems\n' +
+                'completing your download.'), 1, wxALIGN_CENTER_VERTICAL)
+
+            allgoodicon=self.createStatusIcon('allgood')
+            colsizer.Add(wxStaticBitmap(panel, -1, allgoodicon))
+            colsizer.Add(StaticText(
+                'The torrent is operating properly.'), 1, wxALIGN_CENTER_VERTICAL)
+
+            fullsizer.Add(colsizer, 0, wxALIGN_CENTER)
+            colsizer2 = wxFlexGridSizer(cols = 1, hgap = 13)
+
+            colsizer2.Add(StaticText(
+                'Please note that the status light is not omniscient, and that it may\n' +
+                'be wrong in many instances.  A torrent with a blue light may complete\n' +
+                "normally, and an occasional yellow light doesn't mean your computer\n" +
+                'has suddenly become firewalled.'), 1, wxALIGN_CENTER_VERTICAL)
+
+            colspacer = StaticText('  ')
+            colsizer2.Add(colspacer)
+
+            okButton = wxButton(panel, -1, 'Ok')
+            colsizer2.Add(okButton, 0, wxALIGN_CENTER)
+            fullsizer.Add(colsizer2, 0, wxALIGN_CENTER)
+
+            border = wxBoxSizer(wxHORIZONTAL)
+            border.Add(fullsizer, 1, wxEXPAND | wxALL, 4)
+
+            panel.SetSizer(border)
+            panel.SetAutoLayout(True)
+
+
+            def closeHelp(self, frame = self):
+                frame.statusIconHelpBox.Close()
+            EVT_BUTTON(self.statusIconHelpBox, okButton.GetId(), closeHelp)
+
+            self.statusIconHelpBox.Show ()
+            border.Fit(panel)
+            self.statusIconHelpBox.Fit()
+        except:
+            self.exception()
+
+
+    def openConfigMenu(self, event):
+        try:
+            self.configfile.configMenu(self)
+        except:
+            self.exception()
+
+
+    def advanced(self, event):
+        try:
+            if not self.dow or not self.filename:
+                return
+            if (self.advBox is not None):
+                try:
+                    self.advBox.Close ()
+                except wxPyDeadObjectError, e:
+                    self.advBox = None
+
+            self.advBox = wxFrame(None, -1, 'BitTorrent Advanced', size = wxSize(200,200),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+            try:
+                self.advBox.SetIcon(self.icon)
+            except:
+                pass
+
+            panel = wxPanel(self.advBox, -1, size = wxSize (200,200))
+
+            def StaticText(text, font = self.FONT-1, underline = False, color = None, panel = panel):
+                x = wxStaticText(panel, -1, text, style = wxALIGN_LEFT)
+                x.SetFont(wxFont(font, wxDEFAULT, wxNORMAL, wxNORMAL, underline))
+                if color is not None:
+                    x.SetForegroundColour(color)
+                return x
+
+            colSizer = wxFlexGridSizer (cols = 1, vgap = 1)
+            colSizer.Add (StaticText('Advanced Info for ' + self.filename, self.FONT+4))
+
+            try:    # get system font width
+                fw = wxSystemSettings_GetFont(wxSYS_DEFAULT_GUI_FONT).GetPointSize()+1
+            except:
+                fw = wxSystemSettings_GetFont(wxSYS_SYSTEM_FONT).GetPointSize()+1
+
+            spewList = wxListCtrl(panel, -1, wxPoint(-1,-1), (fw*66,350), wxLC_REPORT|wxLC_HRULES|wxLC_VRULES)
+            self.spewList = spewList
+            spewList.SetAutoLayout (True)
+
+            colSizer.Add(spewList, -1, wxEXPAND)
+
+            colSizer.Add(StaticText(''))
+            self.storagestats1 = StaticText('')
+            self.storagestats2 = StaticText('')
+            colSizer.Add(self.storagestats1, -1, wxEXPAND)
+            colSizer.Add(self.storagestats2, -1, wxEXPAND)
+            spinnerSizer = wxFlexGridSizer(cols=4,vgap=0,hgap=0)
+            cstats = '          Listening on '
+            if self.connection_stats['interfaces']:
+                cstats += ', '.join(self.connection_stats['interfaces']) + ' on '
+            cstats += 'port ' + str(self.connection_stats['port'])
+            if self.connection_stats['upnp']:
+                cstats += ', UPnP port forwarded'
+            spinnerSizer.Add(StaticText(cstats), -1, wxEXPAND)
+            spinnerSizer.AddGrowableCol(0)
+            spinnerSizer.Add(StaticText('Max download rate (kB/s) '),0,wxALIGN_CENTER_VERTICAL)
+            self.downrateSpinner = wxSpinCtrl (panel, -1, "", (-1,-1), (50, -1))
+            self.downrateSpinner.SetFont(self.default_font)
+            self.downrateSpinner.SetRange(0,5000)
+            self.downrateSpinner.SetValue(self.config['max_download_rate'])
+            spinnerSizer.Add (self.downrateSpinner, 0)
+            EVT_SPINCTRL(self.downrateSpinner, -1, self.onDownRateSpinner)
+            spinnerSizer.Add(StaticText(' (0 = unlimited)  '),0,wxALIGN_CENTER_VERTICAL)
+            colSizer.Add(spinnerSizer,0,wxEXPAND)
+
+            colSizer.Add(StaticText(''))
+
+            buttonSizer = wxFlexGridSizer (cols = 5, hgap = 20)
+
+            reannounceButton = wxButton(panel, -1, 'Manual Announce')
+            buttonSizer.Add (reannounceButton)
+
+            extannounceButton = wxButton(panel, -1, 'External Announce')
+            buttonSizer.Add (extannounceButton)
+
+            bgallocButton = wxButton(panel, -1, 'Finish Allocation')
+            buttonSizer.Add (bgallocButton)
+
+            buttonSizer.Add(StaticText(''))
+
+            okButton = wxButton(panel, -1, 'Ok')
+            buttonSizer.Add (okButton)
+
+            colSizer.Add (buttonSizer, 0, wxALIGN_CENTER)
+            colSizer.AddGrowableCol(0)
+            colSizer.AddGrowableRow(1)
+
+            panel.SetSizer(colSizer)
+            panel.SetAutoLayout(True)
+
+            spewList.InsertColumn(0, "Optimistic Unchoke", format=wxLIST_FORMAT_CENTER, width=fw*2)
+            spewList.InsertColumn(1, "Peer ID", width=0)
+            spewList.InsertColumn(2, "IP", width=fw*11)
+            spewList.InsertColumn(3, "Local/Remote", format=wxLIST_FORMAT_CENTER, width=fw*3)
+            spewList.InsertColumn(4, "Up", format=wxLIST_FORMAT_RIGHT, width=fw*6)
+            spewList.InsertColumn(5, "Interested", format=wxLIST_FORMAT_CENTER, width=fw*2)
+            spewList.InsertColumn(6, "Choking", format=wxLIST_FORMAT_CENTER, width=fw*2)
+            spewList.InsertColumn(7, "Down", format=wxLIST_FORMAT_RIGHT, width=fw*6)
+            spewList.InsertColumn(8, "Interesting", format=wxLIST_FORMAT_CENTER, width=fw*2)
+            spewList.InsertColumn(9, "Choked", format=wxLIST_FORMAT_CENTER, width=fw*2)
+            spewList.InsertColumn(10, "Snubbed", format=wxLIST_FORMAT_CENTER, width=fw*2)
+            spewList.InsertColumn(11, "Downloaded", format=wxLIST_FORMAT_RIGHT, width=fw*7)
+            spewList.InsertColumn(12, "Uploaded", format=wxLIST_FORMAT_RIGHT, width=fw*7)
+            spewList.InsertColumn(13, "Completed", format=wxLIST_FORMAT_RIGHT, width=fw*6)
+            spewList.InsertColumn(14, "Peer Download Speed", format=wxLIST_FORMAT_RIGHT, width=fw*6)
+
+            def reannounce(self, frame = self):
+                if (clock() - frame.reannouncelast > 60):
+                    frame.reannouncelast = clock()
+                    frame.dow.reannounce()
+            EVT_BUTTON(self.advBox, reannounceButton.GetId(), reannounce)
+
+            self.advextannouncebox = None
+            def reannounce_external(self, frame = self):
+                if (frame.advextannouncebox is not None):
+                    try:
+                        frame.advextannouncebox.Close ()
+                    except wxPyDeadObjectError, e:
+                        frame.advextannouncebox = None
+
+                frame.advextannouncebox = wxFrame(None, -1, 'External Announce', size = (1,1),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+                try:
+                    frame.advextannouncebox.SetIcon(frame.icon)
+                except:
+                    pass
+
+                panel = wxPanel(frame.advextannouncebox, -1)
+
+                fullsizer = wxFlexGridSizer(cols = 1, vgap = 13)
+                msg = wxStaticText(panel, -1, "Enter tracker anounce URL:")
+                msg.SetFont(frame.default_font)
+                fullsizer.Add(msg)
+
+                frame.advexturl = wxTextCtrl(parent = panel, id = -1, value = '',
+                                    size = (255, 20), style = wxTE_PROCESS_TAB)
+                frame.advexturl.SetFont(frame.default_font)
+                frame.advexturl.SetValue(frame.lastexternalannounce)
+                fullsizer.Add(frame.advexturl)
+
+                buttonSizer = wxFlexGridSizer (cols = 2, hgap = 10)
+
+                okButton = wxButton(panel, -1, 'OK')
+                buttonSizer.Add (okButton)
+
+                cancelButton = wxButton(panel, -1, 'Cancel')
+                buttonSizer.Add (cancelButton)
+
+                fullsizer.Add (buttonSizer, 0, wxALIGN_CENTER)
+
+                border = wxBoxSizer(wxHORIZONTAL)
+                border.Add(fullsizer, 1, wxEXPAND | wxALL, 4)
+
+                panel.SetSizer(border)
+                panel.SetAutoLayout(True)
+
+                def ok(self, frame = frame):
+                    special = frame.advexturl.GetValue()
+                    if special:
+                        frame.lastexternalannounce = special
+                        if (clock() - frame.reannouncelast > 60):
+                            frame.reannouncelast = clock()
+                            frame.dow.reannounce(special)
+                    frame.advextannouncebox.Close()
+                EVT_BUTTON(frame.advextannouncebox, okButton.GetId(), ok)
+
+                def cancel(self, frame = frame):
+                    frame.advextannouncebox.Close()
+                EVT_BUTTON(frame.advextannouncebox, cancelButton.GetId(), cancel)
+
+                frame.advextannouncebox.Show ()
+                fullsizer.Fit(panel)
+                frame.advextannouncebox.Fit()
+
+            EVT_BUTTON(self.advBox, extannounceButton.GetId(), reannounce_external)
+
+            def bgalloc(self, frame = self):
+                if frame.dow.storagewrapper is not None:
+                    frame.dow.storagewrapper.bgalloc()
+            EVT_BUTTON(self.advBox, bgallocButton.GetId(), bgalloc)
+
+            def closeAdv(evt, self = self):
+                self.advBox.Close()
+            def killAdv(evt, self = self):
+                try:
+                    self.advBox.RemoveIcon()
+                except:
+                    pass
+                self.onDownRateSpinner()
+                self.dow.spewflag.clear()
+                self.advBox.Destroy()
+                self.advBox = None
+                if (self.advextannouncebox is not None):
+                    try:
+                        self.advextannouncebox.Close()
+                    except wxPyDeadObjectError, e:
+                        pass
+                    self.advextannouncebox = None
+            EVT_BUTTON(self.advBox, okButton.GetId(), closeAdv)
+            EVT_CLOSE(self.advBox, killAdv)
+
+            self.advBox.Show ()
+            colSizer.Fit(panel)
+            self.advBox.Fit()
+            if self.dow:
+                self.dow.spewflag.set()
+        except:
+            self.exception()
+
+
+    def displayUsage(self, text):
+        self.invokeLater(self.onDisplayUsage, [text])
+
+    def onDisplayUsage(self, text):        
+        try:
+            self.done(None)
+            w = wxFrame(None, -1, 'BITTORRENT USAGE',
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+            panel = wxPanel(w, -1)
+            sizer = wxFlexGridSizer(cols = 1)
+            sizer.Add(wxTextCtrl(panel, -1, text,
+                        size = (500,300), style = wxTE_READONLY|wxTE_MULTILINE))
+            okButton = wxButton(panel, -1, 'Ok')
+
+            def closeUsage(self, frame = self):
+                frame.usageBox.Close()
+            EVT_BUTTON(w, okButton.GetId(), closeUsage)
+            def kill(self, frame = self):
+                frame.usageBox.Destroy()
+                frame.usageBox = None
+            EVT_CLOSE(w, kill)
+
+            sizer.Add(okButton, 0, wxALIGN_RIGHT)
+            border = wxBoxSizer(wxHORIZONTAL)
+            border.Add(sizer, 1, wxEXPAND | wxALL, 4)
+
+            panel.SetSizer(border)
+            panel.SetAutoLayout(True)
+
+            border.Fit(panel)
+            w.Fit()
+            w.Show()
+            self.usageBox = w
+        except:
+            self.exception()
+
+
+    def updateStatus(self, dpflag = Event(), fractionDone = None,
+            timeEst = None, downRate = None, upRate = None,
+            activity = None, statistics = None, spew = None, sizeDone = None,
+            **kws):
+        if activity is not None:
+            self.activity = activity
+        self.gui_fractiondone = fractionDone
+        self.invokeLater(self.onUpdateStatus,
+                 [dpflag, timeEst, downRate, upRate, statistics, spew, sizeDone])
+
+    def onUpdateStatus(self, dpflag, timeEst, downRate, upRate,
+                             statistics, spew, sizeDone):
+        if self.firstupdate:
+            if not self.old_ratesettings:
+                self.old_ratesettings = {}
+            self.connChoice.SetStringSelection(
+                self.old_ratesettings.get('rate setting',
+                                  self.configfileargs['gui_ratesettingsdefault']))
+            self.onConnChoice(0,
+                              self.old_ratesettings.get('uploads'),
+                              self.old_ratesettings.get('max upload rate'))
+            if self.old_ratesettings.has_key('max download rate'):
+                self.dow.setDownloadRate(self.old_ratesettings['max download rate'])
+                if self.advBox:
+                    self.downrateSpinner.SetValue(self.old_ratesettings['max download rate'])
+            self.firstupdate = False
+            if self.advBox:
+                self.dow.spewflag.set()
+        if self.ispaused or statistics is None:
+            self.setStatusIcon('startup')
+        elif statistics.numPeers + statistics.numSeeds + statistics.numOldSeeds == 0:
+            if statistics.last_failed:
+                self.setStatusIcon('disconnected')
+            else:
+                self.setStatusIcon('noconnections')
+        elif ( not statistics.external_connection_made
+            and not self.configfileargs['gui_forcegreenonfirewall'] ):
+            self.setStatusIcon('noincoming')
+        elif ( (statistics.numSeeds + statistics.numOldSeeds == 0)
+               and ( (self.fin and statistics.numCopies < 1)
+                    or (not self.fin and statistics.numCopies2 < 1) ) ):
+            self.setStatusIcon('nocompletes')
+        elif timeEst == 0 and sizeDone < self.torrentsize:
+            self.setStatusIcon('nocompletes')
+        else:
+            self.setStatusIcon('allgood')
+        if statistics is None:
+            self.setgaugemode(-1)
+        elif self.gui_fractiondone == None or self.gui_fractiondone == 1.0:
+            self.setgaugemode(1)
+        else:
+            self.setgaugemode(0)
+
+        if self.updateSliderFlag == 1:
+            self.updateSliderFlag = 0
+            newValue = (self.rateSpinner.GetValue()
+                         / connChoices[self.connChoice.GetSelection()]['rate'].get('div',1))
+            if self.rateslider.GetValue() != newValue:
+                self.rateslider.SetValue(newValue)
+        if self.updateSpinnerFlag == 1:
+            self.updateSpinnerFlag = 0
+            cc = connChoices[self.connChoice.GetSelection()]
+            if cc.has_key('rate'):
+                newValue = (self.rateslider.GetValue() * cc['rate'].get('div',1))
+                if self.rateSpinner.GetValue() != newValue:
+                    self.rateSpinner.SetValue(newValue)
+
+        if self.fin:
+            if statistics is None or statistics.numOldSeeds > 0 or statistics.numCopies > 1:
+                self.gauge.SetValue(1000)
+            else:
+                self.gauge.SetValue(int(1000*statistics.numCopies))
+        elif self.gui_fractiondone is not None:
+            gaugelevel = int(self.gui_fractiondone * 1000)
+            self.gauge.SetValue(gaugelevel)
+            if statistics is not None and statistics.downTotal is not None:
+                if self.configfileargs['gui_displaymiscstats']:
+                    self.frame.SetTitle('%.1f%% (%.2f MiB) %s - BitTorrent %s' % (float(gaugelevel)/10, float(sizeDone) / (1<<20), self.filename, version))
+                else:
+                    self.frame.SetTitle('%.1f%% %s - BitTorrent %s' % (float(gaugelevel)/10, self.filename, version))
+            else:
+                self.frame.SetTitle('%.0f%% %s - BitTorrent %s' % (float(gaugelevel)/10, self.filename, version))
+        if self.ispaused:
+            self.timeText.SetLabel(hours(clock() - self.starttime) + ' /')
+        elif timeEst is None:
+            self.timeText.SetLabel(hours(clock() - self.starttime) + ' / ' + self.activity)
+        else:
+            self.timeText.SetLabel(hours(clock() - self.starttime) + ' / ' + hours(timeEst))
+        if not self.ispaused:
+            if downRate is not None:
+                self.downRateText.SetLabel('%.0f kB/s' % (float(downRate) / 1000))
+            if upRate is not None:
+                self.upRateText.SetLabel('%.0f kB/s' % (float(upRate) / 1000))
+        if self.taskbaricon:
+            icontext='BitTorrent '
+            if self.gui_fractiondone is not None and not self.fin:
+                if statistics is not None and statistics.downTotal is not None:
+                    icontext=icontext+' %.1f%% (%.2f MiB)' % (self.gui_fractiondone*100, float(sizeDone) / (1<<20))
+                else:
+                    icontext=icontext+' %.0f%%' % (self.gui_fractiondone*100)
+            if upRate is not None:
+                icontext=icontext+' u:%.0f kB/s' % (float(upRate) / 1000)
+            if downRate is not None:
+                icontext=icontext+' d:%.0f kB/s' % (float(downRate) / 1000)
+            icontext+=' %s' % self.filename
+            try:
+                if self.gui_fractiondone == None or self.gui_fractiondone == 1.0:
+                    self.frame.tbicon.SetIcon(self.finicon,icontext)
+                else:
+                    self.frame.tbicon.SetIcon(self.icon,icontext)
+            except:
+                pass
+        if statistics is not None:
+            if self.autorate:
+                self.rateSpinner.SetValue(statistics.upRate)
+                self.connSpinner.SetValue(statistics.upSlots)
+
+            downtotal = statistics.downTotal + self.old_download
+            uptotal = statistics.upTotal + self.old_upload
+            if self.configfileargs['gui_displaymiscstats']:
+                self.downText.SetLabel('%.2f MiB' % (float(downtotal) / (1 << 20)))
+                self.upText.SetLabel('%.2f MiB' % (float(uptotal) / (1 << 20)))
+            if downtotal > 0:
+                sharerating = float(uptotal)/downtotal
+                if sharerating == 0:
+                    shareSmiley = ''
+                    color = 'Black'
+                elif sharerating < 0.5:
+                    shareSmiley = ':-('
+                    color = 'Red'
+                elif sharerating < 1.0:
+                    shareSmiley = ':-|'
+                    color = 'Orange'
+                else:
+                    shareSmiley = ':-)'
+                    color = 'Forest Green'
+            elif uptotal == 0:
+                sharerating = None
+                shareSmiley = ''
+                color = 'Black'
+            else:
+                sharerating = None
+                shareSmiley = '00 :-D'
+                color = 'Forest Green'
+            if sharerating is None:
+                self.shareRatingText.SetLabel(shareSmiley)
+            else:
+                self.shareRatingText.SetLabel('%.3f %s' % (sharerating, shareSmiley))
+            self.shareRatingText.SetForegroundColour(color)
+
+            if self.configfileargs['gui_displaystats']:
+                if not self.fin:
+                    self.seedStatusText.SetLabel('connected to %d seeds; also seeing %.3f distributed copies' % (statistics.numSeeds,0.001*int(1000*statistics.numCopies2)))
+                else:
+                    self.seedStatusText.SetLabel('%d seeds seen recently; also seeing %.3f distributed copies' % (statistics.numOldSeeds,0.001*int(1000*statistics.numCopies)))
+                self.peerStatusText.SetLabel('connected to %d peers with an average of %.1f%% completed (total speed %.0f kB/s)' % (statistics.numPeers,statistics.percentDone,float(statistics.torrentRate) / (1000)))
+        if ((clock() - self.lastError) > 300):
+            self.errorText.SetLabel('')
+
+        if ( self.configfileargs['gui_displaymiscstats']
+            and statistics is not None and statistics.backgroundallocating ):
+            self.bgalloc_periods += 1
+            if self.bgalloc_periods > 3:
+                self.bgalloc_periods = 0
+            self.bgallocText.SetLabel('ALLOCATING'+(' .'*self.bgalloc_periods))
+        elif self.dow.superseedflag.isSet():
+            self.bgallocText.SetLabel('SUPER-SEED')
+        else:
+            self.bgallocText.SetLabel('')
+
+
+        if spew is not None and (clock()-self.spewwait>1):
+            if (self.advBox is not None):
+                self.spewwait = clock()
+                spewList = self.spewList
+                spewlen = len(spew)+2
+                if statistics is not None:
+                    kickbanlen = len(statistics.peers_kicked)+len(statistics.peers_banned)
+                    if kickbanlen:
+                        spewlen += kickbanlen+1
+                else:
+                    kickbanlen = 0
+                for x in range(spewlen-spewList.GetItemCount()):
+                    i = wxListItem()
+                    spewList.InsertItem(i)
+                for x in range(spewlen,spewList.GetItemCount()):
+                    spewList.DeleteItem(len(spew)+1)
+
+                tot_uprate = 0.0
+                tot_downrate = 0.0
+                for x in range(len(spew)):
+                    if (spew[x]['optimistic'] == 1):
+                        a = '*'
+                    else:
+                        a = ' '
+                    spewList.SetStringItem(x, 0, a)
+                    spewList.SetStringItem(x, 1, spew[x]['id'])
+                    spewList.SetStringItem(x, 2, spew[x]['ip'])
+                    spewList.SetStringItem(x, 3, spew[x]['direction'])
+                    if spew[x]['uprate'] > 100:
+                        spewList.SetStringItem(x, 4, '%.0f kB/s' % (float(spew[x]['uprate']) / 1000))
+                    else:
+                        spewList.SetStringItem(x, 4, ' ')
+                    tot_uprate += spew[x]['uprate']
+                    if (spew[x]['uinterested'] == 1):
+                        a = '*'
+                    else:
+                        a = ' '
+                    spewList.SetStringItem(x, 5, a)
+                    if (spew[x]['uchoked'] == 1):
+                        a = '*'
+                    else:
+                        a = ' '
+                    spewList.SetStringItem(x, 6, a)
+
+                    if spew[x]['downrate'] > 100:
+                        spewList.SetStringItem(x, 7, '%.0f kB/s' % (float(spew[x]['downrate']) / 1000))
+                    else:
+                        spewList.SetStringItem(x, 7, ' ')
+                    tot_downrate += spew[x]['downrate']
+
+                    if (spew[x]['dinterested'] == 1):
+                        a = '*'
+                    else:
+                        a = ' '
+                    spewList.SetStringItem(x, 8, a)
+                    if (spew[x]['dchoked'] == 1):
+                        a = '*'
+                    else:
+                        a = ' '
+                    spewList.SetStringItem(x, 9, a)
+                    if (spew[x]['snubbed'] == 1):
+                        a = '*'
+                    else:
+                        a = ' '
+                    spewList.SetStringItem(x, 10, a)
+                    spewList.SetStringItem(x, 11, '%.2f MiB' % (float(spew[x]['dtotal']) / (1 << 20)))
+                    if spew[x]['utotal'] is not None:
+                        a = '%.2f MiB' % (float(spew[x]['utotal']) / (1 << 20))
+                    else:
+                        a = ''
+                    spewList.SetStringItem(x, 12, a)
+                    spewList.SetStringItem(x, 13, '%.1f%%' % (float(int(spew[x]['completed']*1000))/10))
+                    if spew[x]['speed'] is not None:
+                        a = '%.0f kB/s' % (float(spew[x]['speed']) / 1000)
+                    else:
+                        a = ''
+                    spewList.SetStringItem(x, 14, a)
+
+                x = len(spew)
+                for i in range(15):
+                    spewList.SetStringItem(x, i, '')
+
+                x += 1
+                spewList.SetStringItem(x, 2, '         TOTALS:')
+                spewList.SetStringItem(x, 4, '%.0f kB/s' % (float(tot_uprate) / 1000))
+                spewList.SetStringItem(x, 7, '%.0f kB/s' % (float(tot_downrate) / 1000))
+                if statistics is not None:
+                    spewList.SetStringItem(x, 11, '%.2f MiB' % (float(statistics.downTotal) / (1 << 20)))
+                    spewList.SetStringItem(x, 12, '%.2f MiB' % (float(statistics.upTotal) / (1 << 20)))
+                else:
+                    spewList.SetStringItem(x, 11, '')
+                    spewList.SetStringItem(x, 12, '')
+                for i in [0,1,3,5,6,8,9,10,13,14]:
+                    spewList.SetStringItem(x, i, '')
+
+                if kickbanlen:
+                    x += 1
+                    for i in range(14):
+                        spewList.SetStringItem(x, i, '')
+
+                    for peer in statistics.peers_kicked:
+                        x += 1
+                        spewList.SetStringItem(x, 2, peer[0])
+                        spewList.SetStringItem(x, 1, peer[1])
+                        spewList.SetStringItem(x, 4, 'KICKED')
+                        for i in [0,3,5,6,7,8,9,10,11,12,13,14]:
+                            spewList.SetStringItem(x, i, '')
+
+                    for peer in statistics.peers_banned:
+                        x += 1
+                        spewList.SetStringItem(x, 2, peer[0])
+                        spewList.SetStringItem(x, 1, peer[1])
+                        spewList.SetStringItem(x, 4, 'BANNED')
+                        for i in [0,3,5,6,7,8,9,10,11,12,13,14]:
+                            spewList.SetStringItem(x, i, '')
+
+                if statistics is not None:
+                    l1 = (
+                        '          currently downloading %d pieces (%d just started), %d pieces partially retrieved'
+                                        % ( statistics.storage_active,
+                                            statistics.storage_new,
+                                            statistics.storage_dirty ) )
+                    if statistics.storage_isendgame:
+                        l1 += ', endgame mode'
+                    self.storagestats2.SetLabel(l1)
+                    self.storagestats1.SetLabel(
+                        '          %d of %d pieces complete (%d just downloaded), %d failed hash check, %sKiB redundant data discarded'
+                                        % ( statistics.storage_numcomplete,
+                                            statistics.storage_totalpieces,
+                                            statistics.storage_justdownloaded,
+                                            statistics.storage_numflunked,
+                                            comma_format(int(statistics.discarded/1024)) ) )
+
+        if ( self.fileList is not None and statistics is not None
+                and (statistics.filelistupdated.isSet() or self.refresh_details) ):
+            for i in range(len(statistics.filecomplete)):
+                if self.dow.fileselector[i] == -1:
+                    self.fileList.SetItemImage(i,0,0)
+                    self.fileList.SetStringItem(i,1,'')
+                elif statistics.fileinplace[i]:
+                    self.fileList.SetItemImage(i,2,2)
+                    self.fileList.SetStringItem(i,1,"done")
+                elif statistics.filecomplete[i]:
+                    self.fileList.SetItemImage(i,1,1)
+                    self.fileList.SetStringItem(i,1,"100%")
+                else:
+                    self.fileList.SetItemImage(i,0,0)
+                    frac = statistics.fileamtdone[i]
+                    if frac:
+                        self.fileList.SetStringItem(i,1,'%d%%' % (frac*100))
+                    else:
+                        self.fileList.SetStringItem(i,1,'')
+
+            statistics.filelistupdated.clear()
+            self.refresh_details = False
+
+        if self.configfile.configReset():     # whoopee!  Set everything invisible! :-)
+
+            self.dow.config['security'] = self.configfileargs['security']
+
+            statsdisplayflag = self.configfileargs['gui_displaymiscstats']
+            self.downTextLabel.Show(statsdisplayflag)
+            self.upTextLabel.Show(statsdisplayflag)
+            self.fileDestLabel.Show(statsdisplayflag)
+            self.fileDestText.Show(statsdisplayflag)
+            self.colSizer.Layout()
+
+            self.downText.SetLabel('')          # blank these to flush them
+            self.upText.SetLabel('')
+            self.seedStatusText.SetLabel('')
+            self.peerStatusText.SetLabel('')
+
+            ratesettingsmode = self.configfileargs['gui_ratesettingsmode']
+            ratesettingsflag1 = True    #\ settings
+            ratesettingsflag2 = False   #/ for 'basic'
+            if ratesettingsmode == 'none':
+                ratesettingsflag1 = False
+            elif ratesettingsmode == 'full':
+                ratesettingsflag2 = True
+            self.connChoiceLabel.Show(ratesettingsflag1)
+            self.connChoice.Show(ratesettingsflag1)
+            self.rateSpinnerLabel.Show(ratesettingsflag2)
+            self.rateSpinner.Show(ratesettingsflag2)
+            self.rateLowerText.Show(ratesettingsflag2)
+            self.rateUpperText.Show(ratesettingsflag2)
+            self.rateslider.Show(ratesettingsflag2)
+            self.connSpinnerLabel.Show(ratesettingsflag2)
+            self.connSpinner.Show(ratesettingsflag2)
+            self.connLowerText.Show(ratesettingsflag2)
+            self.connUpperText.Show(ratesettingsflag2)
+            self.connslider.Show(ratesettingsflag2)
+            self.unlimitedLabel.Show(ratesettingsflag2)
+
+            self.setgaugemode(None)
+
+        self.frame.Layout()
+        self.frame.Refresh()
+
+        self.gui_fractiondone = None
+        dpflag.set()
+
+
+    def finished(self):
+        self.fin = True
+        self.invokeLater(self.onFinishEvent)
+
+    def failed(self):
+        self.fin = True
+        self.invokeLater(self.onFailEvent)
+
+    def error(self, errormsg):
+        self.invokeLater(self.onErrorEvent, [errormsg])
+
+    def onFinishEvent(self):
+        self.activity = hours(clock() - self.starttime) + ' / ' +'Download Succeeded!'
+        self.cancelButton.SetLabel('Finish')
+        self.gauge.SetValue(0)
+        self.frame.SetTitle('%s - Upload - BitTorrent %s' % (self.filename, version))
+        try:
+            self.frame.SetIcon(self.finicon)
+        except:
+            pass
+        if self.taskbaricon:
+            self.frame.tbicon.SetIcon(self.finicon, "BitTorrent - Finished")
+        self.downRateText.SetLabel('')
+
+    def onFailEvent(self):
+        if not self.shuttingdown:
+            self.timeText.SetLabel(hours(clock() - self.starttime) + ' / ' +'Failed!')
+            self.activity = 'Failed!'
+            self.cancelButton.SetLabel('Close')
+            self.gauge.SetValue(0)
+            self.downRateText.SetLabel('')
+            self.setStatusIcon('startup')
+
+    def onErrorEvent(self, errormsg):
+        if errormsg[:2] == '  ':    # indent at least 2 spaces means a warning message
+            self.errorText.SetLabel(errormsg)
+            self.lastError = clock()
+        else:
+            self.errorText.SetLabel(strftime('ERROR (%x %X) -\n') + errormsg)
+            self.lastError = clock()
+
+
+    def chooseFile(self, default, size, saveas, dir):
+        f = Event()
+        bucket = [None]
+        self.invokeLater(self.onChooseFile, [default, bucket, f, size, dir, saveas])
+        f.wait()
+        return bucket[0]
+
+    def onChooseFile(self, default, bucket, f, size, dir, saveas):
+        if saveas == '':
+            if self.configfileargs['gui_default_savedir'] != '':
+                start_dir = self.configfileargs['gui_default_savedir']
+            else:
+                start_dir = self.configfileargs['last_saved']
+            if not isdir(start_dir):    # if it's not set properly
+                start_dir = '/'    # yes, this hack does work in Windows
+            if dir:
+                start_dir1 = start_dir
+                if isdir(join(start_dir,default)):
+                    start_dir = join(start_dir,default)
+                dl = wxDirDialog(self.frame,
+                        'Choose a directory to save to, pick a partial download to resume',
+                        defaultPath = start_dir, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
+            else:
+                dl = wxFileDialog(self.frame,
+                        'Choose file to save as, pick a partial download to resume', 
+                        defaultDir = start_dir, defaultFile = default, wildcard = '*',
+                        style = wxSAVE)
+
+            if dl.ShowModal() != wxID_OK:
+                f.set()
+                self.done(None)
+                return
+
+            d = dl.GetPath()
+            if d == start_dir:
+                d = start_dir1
+            bucket[0] = d
+            d1,d2 = split(d)
+            if d2 == default:
+                d = d1
+            self.configfile.WriteLastSaved(d)
+
+        else:
+            bucket[0] = saveas
+            default = basename(saveas)
+
+        self.onChooseFileDone(default, size)
+        f.set()
+
+    def ChooseFileDone(self, name, size):
+        self.invokeLater(self.onChooseFileDone, [name, size])
+
+    def onChooseFileDone(self, name, size):
+        self.torrentsize = size
+        lname = basename(name)
+        self.filename = lname
+        self.fileNameText.SetLabel('%s' % (lname))
+        self.fileSizeText.SetLabel('(%.2f MiB)' % (float(size) / (1 << 20)))
+        self.timeText.SetLabel(hours(clock() - self.starttime) + ' / ' + self.activity)
+        self.fileDestText.SetLabel(name)
+        self.frame.SetTitle(lname + '- BitTorrent ' + version)
+
+        minsize = self.fileNameText.GetBestSize()
+        if (not self.configfileargs['gui_stretchwindow'] or
+                            minsize.GetWidth() < self.addwidth):
+            minsize.SetWidth(self.addwidth)
+        self.fnsizer.SetMinSize (minsize)
+        minsize.SetHeight(self.fileSizeText.GetBestSize().GetHeight())
+        self.fnsizer2.SetMinSize (minsize)
+        minsize.SetWidth(minsize.GetWidth()+(self.FONT*8))
+        minsize.SetHeight(self.fileNameText.GetBestSize().GetHeight()+self.fileSizeText.GetBestSize().GetHeight())
+        minsize.SetHeight(2*self.errorText.GetBestSize().GetHeight())
+        self.errorTextSizer.SetMinSize(minsize)
+        self.topboxsizer.SetMinSize(minsize)
+
+        # Kludge to make details and about catch the event
+        self.frame.SetSize ((self.frame.GetSizeTuple()[0]+1, self.frame.GetSizeTuple()[1]+1))
+        self.frame.SetSize ((self.frame.GetSizeTuple()[0]-1, self.frame.GetSizeTuple()[1]-1))
+        self.colSizer.Fit(self.frame)
+        self.frame.Layout()
+        self.frame.Refresh()
+
+    def newpath(self, path):
+        self.invokeLater(self.onNewpath, [path])
+
+    def onNewpath(self, path):
+        self.fileDestText.SetLabel(path)
+
+    def pause(self, event):
+        self.invokeLater(self.onPause)
+
+    def onPause(self):
+        if not self.dow:
+            return
+        if self.ispaused:
+            self.ispaused = False
+            self.pauseButton.SetLabel('Pause')
+            self.dow.Unpause()
+        else:
+            if self.dow.Pause():
+                self.ispaused = True
+                self.pauseButton.SetLabel('Resume')
+                self.downRateText.SetLabel(' ')
+                self.upRateText.SetLabel(' ')
+                self.setStatusIcon('startup')
+
+    def done(self, event):
+        self.uiflag.set()
+        self.flag.set()
+        self.shuttingdown = True
+
+        try:
+            self.frame.tbicon.RemoveIcon()
+        except:
+            pass
+        try:
+            self.frame.tbicon.Destroy()
+        except:
+            pass
+        try:
+            self.detailBox.Close()
+        except:
+            self.detailBox = None
+        try:
+            self.aboutBox.Close()
+        except:
+            self.aboutBox = None
+        try:
+            self.creditsBox.Close()
+        except:
+            self.creditsBox = None
+        try:
+            self.advBox.Close()
+        except:
+            self.advBox = None
+        try:
+            self.statusIconHelpBox.Close()
+        except:
+            self.statusIconHelpBox = None
+        try:
+            self.frame.RemoveIcon()
+        except:
+            pass
+
+        self.frame.Destroy()
+
+
+    def exception(self):
+        data = StringIO()
+        print_exc(file = data)
+        print data.getvalue()   # report exception here too
+        self.on_errorwindow(data.getvalue())
+
+    def errorwindow(self, err):
+        self.invokeLater(self.on_errorwindow,[err])
+
+    def on_errorwindow(self, err):
+        if self._errorwindow is None:
+            w = wxFrame(None, -1, 'BITTORRENT ERROR', size = (1,1),
+                            style = wxDEFAULT_FRAME_STYLE|wxFULL_REPAINT_ON_RESIZE)
+            panel = wxPanel(w, -1)
+
+            sizer = wxFlexGridSizer(cols = 1)
+            t = ( 'BitTorrent ' + version + '\n' +
+                  'OS: ' + sys.platform + '\n' +
+                  'Python version: ' + sys.version + '\n' +
+                  'wxWindows version: ' + wxVERSION_STRING + '\n' )
+            try:
+                t += 'Psyco version: ' + hex(psyco.__version__)[2:] + '\n'
+            except:
+                pass
+            try:
+                t += 'Allocation method: ' + self.config['alloc_type']
+                if self.dow.storagewrapper.bgalloc_active:
+                    t += '*'
+                t += '\n'
+            except:
+                pass
+            sizer.Add(wxTextCtrl(panel, -1, t + '\n' + err,
+                                size = (500,300), style = wxTE_READONLY|wxTE_MULTILINE))
+
+            sizer.Add(wxStaticText(panel, -1,
+                    '\nHelp us iron out the bugs in the engine!'))
+            linkMail = wxStaticText(panel, -1,
+                'Please report this error to '+report_email)
+            linkMail.SetFont(wxFont(self.FONT, wxDEFAULT, wxNORMAL, wxNORMAL, True))
+            linkMail.SetForegroundColour('Blue')
+            sizer.Add(linkMail)
+
+            def maillink(self):
+                Thread(target = open_new("mailto:" + report_email
+                                         + "?subject=autobugreport")).start()
+            EVT_LEFT_DOWN(linkMail, maillink)
+
+            border = wxBoxSizer(wxHORIZONTAL)
+            border.Add(sizer, 1, wxEXPAND | wxALL, 4)
+
+            panel.SetSizer(border)
+            panel.SetAutoLayout(True)
+
+            w.Show()
+            border.Fit(panel)
+            w.Fit()
+            self._errorwindow = w
+
+
+class btWxApp(wxApp):
+    def __init__(self, x, params):
+        self.params = params
+        wxApp.__init__(self, x)
+
+    def OnInit(self):
+        doneflag = Event()
+        self.configfile = configReader()
+        d = DownloadInfoFrame(doneflag, self.configfile)
+        self.SetTopWindow(d.frame)
+        if len(self.params) == 0:
+            b = wxFileDialog (d.frame, 'Choose .torrent file to use',
+                        defaultDir = '', defaultFile = '', wildcard = '*.torrent',
+                        style = wxOPEN)
+
+            if b.ShowModal() == wxID_OK:
+                self.params.append (b.GetPath())
+
+        thread = Thread(target = next, args = [self.params, d, doneflag, self.configfile])
+        thread.setDaemon(False)
+        thread.start()
+        return 1
+
+def run(params):
+    if WXPROFILER:
+        import profile, pstats
+        p = profile.Profile()
+        p.runcall(_run, params)
+        log = open('profile_data_wx.'+strftime('%y%m%d%H%M%S')+'.txt','a')
+        normalstdout = sys.stdout
+        sys.stdout = log
+#        pstats.Stats(p).strip_dirs().sort_stats('cumulative').print_stats()
+        pstats.Stats(p).strip_dirs().sort_stats('time').print_stats()
+        sys.stdout = normalstdout
+    else:
+        _run(params)
+        
+def _run(params):
+    app = btWxApp(0, params)
+    app.MainLoop()
+
+def next(params, d, doneflag, configfile):
+    if PROFILER:
+        import profile, pstats
+        p = profile.Profile()
+        p.runcall(_next, params, d, doneflag, configfile)
+        log = open('profile_data.'+strftime('%y%m%d%H%M%S')+'.txt','a')
+        normalstdout = sys.stdout
+        sys.stdout = log
+#        pstats.Stats(p).strip_dirs().sort_stats('cumulative').print_stats()
+        pstats.Stats(p).strip_dirs().sort_stats('time').print_stats()
+        sys.stdout = normalstdout
+    else:
+        _next(params, d, doneflag, configfile)
+
+def _next(params, d, doneflag, configfile):
+    err = False
+    try:
+        while 1:
+            try:            
+                config = parse_params(params, configfile.config)
+            except ValueError, e:
+                d.error('error: ' + str(e) + '\nrun with no args for parameter explanations')
+                break
+            if not config:
+                d.displayUsage(get_usage(presets = configfile.config))
+                break
+
+            myid = createPeerID()
+            seed(myid)
+            
+            rawserver = RawServer(doneflag, config['timeout_check_interval'],
+                                  config['timeout'], ipv6_enable = config['ipv6_enabled'],
+                                  failfunc = d.error, errorfunc = d.errorwindow)
+
+            upnp_type = UPnP_test(config['upnp_nat_access'])
+            while True:
+                try:
+                    listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
+                                    config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
+                                    upnp = upnp_type, randomizer = config['random_port'])
+                    break
+                except socketerror, e:
+                    if upnp_type and e == UPnP_ERROR:
+                        d.error('WARNING: COULD NOT FORWARD VIA UPnP')
+                        upnp_type = 0
+                        continue
+                    d.error("Couldn't listen - " + str(e))
+                    d.failed()
+                    return
+            d.connection_stats = rawserver.get_stats()
+
+            response = get_response(config['responsefile'], config['url'], d.error)
+            if not response:
+                break
+
+            infohash = sha(bencode(response['info'])).digest()
+            
+            torrentdata = configfile.getTorrentData(infohash)
+            if torrentdata:
+                oldsave = torrentdata.get('saved as')
+                d.old_ratesettings = torrentdata.get('rate settings')
+                s = torrentdata.get('stats')
+                if s:
+                    d.old_upload = s['uploaded']
+                    d.old_download = s['downloaded']
+            else:
+                oldsave = None
+
+            dow = BT1Download(d.updateStatus, d.finished, d.error, d.errorwindow, doneflag,
+                            config, response, infohash, myid, rawserver, listen_port,
+                            configfile.getConfigDir())
+            d.dow = dow
+
+            if config['gui_saveas_ask'] == 1:
+                oldsave = None
+            if oldsave:
+                if not dow.checkSaveLocation(oldsave):
+                    oldsave = None
+            if oldsave:
+                def choosefile(default, size, saveas, dir, oldsave = oldsave):
+                    d.ChooseFileDone(oldsave, size)
+                    return oldsave
+            elif config['gui_saveas_ask'] == 0:
+                def choosefile(default, size, saveas, dir,
+                               spot = config['gui_default_savedir']):
+                    spot = os.path.join(spot,default)
+                    d.ChooseFileDone(spot, size)
+                    return spot
+            else:
+                choosefile = d.chooseFile
+            savedas = dow.saveAs(choosefile, d.newpath)
+            if not savedas: 
+                break
+
+            if not dow.initFiles(old_style = True):
+                break
+            if not dow.startEngine():
+                dow.shutdown()
+                break
+            dow.startRerequester()
+            dow.autoStats()
+
+            if not dow.am_I_finished():
+                d.updateStatus(activity = 'connecting to peers')
+            rawserver.listen_forever(dow.getPortHandler())
+
+            ratesettings = {
+                    'rate setting': d.current_ratesetting,
+                    'max download rate': config['max_download_rate']
+                }
+            if d.current_ratesetting != 'automatic':
+                ratesettings['uploads'] = config['min_uploads']
+                ratesettings['max upload rate'] = config['max_upload_rate']
+            up, dn = dow.get_transfer_stats()
+            stats = {
+                    'uploaded': up + d.old_upload,
+                    'downloaded': dn + d.old_download
+                }
+            torrentdata = {
+                    'saved as': savedas,
+                    'rate settings': ratesettings,
+                    'stats': stats
+                }
+            dow.shutdown(torrentdata)
+            break
+    except:
+        err = True
+        data = StringIO()
+        print_exc(file = data)
+        print data.getvalue()   # report exception here too
+        d.errorwindow(data.getvalue())
+    try:
+        rawserver.shutdown()
+    except:
+        pass
+    if not d.fin:
+        d.failed()
+    if err:
+        sleep(3600*24*30)   # this will make the app stick in the task manager,
+                            # but so be it
+
+
+if __name__ == '__main__':
+    if argv[1:] == ['--version']:
+        print version
+        exit(0)
+    run(argv[1:])

Propchange: debtorrent/branches/upstream/current/btdownloadgui.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btdownloadheadless.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btdownloadheadless.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btdownloadheadless.py (added)
+++ debtorrent/branches/upstream/current/btdownloadheadless.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,244 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+    
+from BitTornado.download_bt1 import BT1Download, defaults, parse_params, get_usage, get_response
+from BitTornado.RawServer import RawServer, UPnP_ERROR
+from random import seed
+from socket import error as socketerror
+from BitTornado.bencode import bencode
+from BitTornado.natpunch import UPnP_test
+from threading import Event
+from os.path import abspath
+from sys import argv, stdout
+import sys
+from sha import sha
+from time import strftime
+from BitTornado.clock import clock
+from BitTornado import createPeerID, version
+from BitTornado.ConfigDir import ConfigDir
+
+assert sys.version >= '2', "Install Python 2.0 or greater"
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+PROFILER = False
+
+def hours(n):
+    if n == 0:
+        return 'complete!'
+    try:
+        n = int(n)
+        assert n >= 0 and n < 5184000  # 60 days
+    except:
+        return '<unknown>'
+    m, s = divmod(n, 60)
+    h, m = divmod(m, 60)
+    if h > 0:
+        return '%d hour %02d min %02d sec' % (h, m, s)
+    else:
+        return '%d min %02d sec' % (m, s)
+
+class HeadlessDisplayer:
+    def __init__(self):
+        self.done = False
+        self.file = ''
+        self.percentDone = ''
+        self.timeEst = ''
+        self.downloadTo = ''
+        self.downRate = ''
+        self.upRate = ''
+        self.shareRating = ''
+        self.seedStatus = ''
+        self.peerStatus = ''
+        self.errors = []
+        self.last_update_time = -1
+
+    def finished(self):
+        self.done = True
+        self.percentDone = '100'
+        self.timeEst = 'Download Succeeded!'
+        self.downRate = ''
+        self.display()
+
+    def failed(self):
+        self.done = True
+        self.percentDone = '0'
+        self.timeEst = 'Download Failed!'
+        self.downRate = ''
+        self.display()
+
+    def error(self, errormsg):
+        self.errors.append(errormsg)
+        self.display()
+
+    def display(self, dpflag = Event(), fractionDone = None, timeEst = None, 
+            downRate = None, upRate = None, activity = None,
+            statistics = None,  **kws):
+        if self.last_update_time + 0.1 > clock() and fractionDone not in (0.0, 1.0) and activity is not None:
+            return
+        self.last_update_time = clock()        
+        if fractionDone is not None:
+            self.percentDone = str(float(int(fractionDone * 1000)) / 10)
+        if timeEst is not None:
+            self.timeEst = hours(timeEst)
+        if activity is not None and not self.done:
+            self.timeEst = activity
+        if downRate is not None:
+            self.downRate = '%.1f kB/s' % (float(downRate) / (1 << 10))
+        if upRate is not None:
+            self.upRate = '%.1f kB/s' % (float(upRate) / (1 << 10))
+        if statistics is not None:
+           if (statistics.shareRating < 0) or (statistics.shareRating > 100):
+               self.shareRating = 'oo  (%.1f MB up / %.1f MB down)' % (float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
+           else:
+               self.shareRating = '%.3f  (%.1f MB up / %.1f MB down)' % (statistics.shareRating, float(statistics.upTotal) / (1<<20), float(statistics.downTotal) / (1<<20))
+           if not self.done:
+              self.seedStatus = '%d seen now, plus %.3f distributed copies' % (statistics.numSeeds,0.001*int(1000*statistics.numCopies))
+           else:
+              self.seedStatus = '%d seen recently, plus %.3f distributed copies' % (statistics.numOldSeeds,0.001*int(1000*statistics.numCopies))
+           self.peerStatus = '%d seen now, %.1f%% done at %.1f kB/s' % (statistics.numPeers,statistics.percentDone,float(statistics.torrentRate) / (1 << 10))
+        print '\n\n\n\n'
+        for err in self.errors:
+            print 'ERROR:\n' + err + '\n'
+        print 'saving:        ', self.file
+        print 'percent done:  ', self.percentDone
+        print 'time left:     ', self.timeEst
+        print 'download to:   ', self.downloadTo
+        print 'download rate: ', self.downRate
+        print 'upload rate:   ', self.upRate
+        print 'share rating:  ', self.shareRating
+        print 'seed status:   ', self.seedStatus
+        print 'peer status:   ', self.peerStatus
+        stdout.flush()
+        dpflag.set()        
+
+    def chooseFile(self, default, size, saveas, dir):
+        self.file = '%s (%.1f MB)' % (default, float(size) / (1 << 20))
+        if saveas != '':
+            default = saveas
+        self.downloadTo = abspath(default)
+        return default
+
+    def newpath(self, path):
+        self.downloadTo = path
+
+def run(params):
+    try:
+        import curses
+        curses.initscr()
+        cols = curses.COLS
+        curses.endwin()
+    except:
+        cols = 80
+
+    h = HeadlessDisplayer()
+    while 1:
+        configdir = ConfigDir('downloadheadless')
+        defaultsToIgnore = ['responsefile', 'url', 'priority']
+        configdir.setDefaults(defaults,defaultsToIgnore)
+        configdefaults = configdir.loadConfig()
+        defaults.append(('save_options',0,
+         "whether to save the current options as the new default configuration " +
+         "(only for btdownloadheadless.py)"))
+        try:
+            config = parse_params(params, configdefaults)
+        except ValueError, e:
+            print 'error: ' + str(e) + '\nrun with no args for parameter explanations'
+            break
+        if not config:
+            print get_usage(defaults, 80, configdefaults)
+            break
+        if config['save_options']:
+            configdir.saveConfig(config)
+        configdir.deleteOldCacheData(config['expire_cache_data'])
+
+        myid = createPeerID()
+        seed(myid)
+        
+        doneflag = Event()
+        def disp_exception(text):
+            print text
+        rawserver = RawServer(doneflag, config['timeout_check_interval'],
+                              config['timeout'], ipv6_enable = config['ipv6_enabled'],
+                              failfunc = h.failed, errorfunc = disp_exception)
+        upnp_type = UPnP_test(config['upnp_nat_access'])
+        while True:
+            try:
+                listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
+                                config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
+                                upnp = upnp_type, randomizer = config['random_port'])
+                break
+            except socketerror, e:
+                if upnp_type and e == UPnP_ERROR:
+                    print 'WARNING: COULD NOT FORWARD VIA UPnP'
+                    upnp_type = 0
+                    continue
+                print "error: Couldn't listen - " + str(e)
+                h.failed()
+                return
+
+        response = get_response(config['responsefile'], config['url'], h.error)
+        if not response:
+            break
+
+        infohash = sha(bencode(response['info'])).digest()
+
+        dow = BT1Download(h.display, h.finished, h.error, disp_exception, doneflag,
+                        config, response, infohash, myid, rawserver, listen_port,
+                        configdir)
+        
+        if not dow.saveAs(h.chooseFile, h.newpath):
+            break
+
+        if not dow.initFiles(old_style = True):
+            break
+        if not dow.startEngine():
+            dow.shutdown()
+            break
+        dow.startRerequester()
+        dow.autoStats()
+
+        if not dow.am_I_finished():
+            h.display(activity = 'connecting to peers')
+        rawserver.listen_forever(dow.getPortHandler())
+        h.display(activity = 'shutting down')
+        dow.shutdown()
+        break
+    try:
+        rawserver.shutdown()
+    except:
+        pass
+    if not h.done:
+        h.failed()
+
+if __name__ == '__main__':
+    if argv[1:] == ['--version']:
+        print version
+        sys.exit(0)
+
+    if PROFILER:
+        import profile, pstats
+        p = profile.Profile()
+        p.runcall(run, argv[1:])
+        log = open('profile_data.'+strftime('%y%m%d%H%M%S')+'.txt','a')
+        normalstdout = sys.stdout
+        sys.stdout = log
+#        pstats.Stats(p).strip_dirs().sort_stats('cumulative').print_stats()
+        pstats.Stats(p).strip_dirs().sort_stats('time').print_stats()
+        sys.stdout = normalstdout
+    else:
+        run(argv[1:])

Propchange: debtorrent/branches/upstream/current/btdownloadheadless.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btlaunchmany.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btlaunchmany.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btlaunchmany.py (added)
+++ debtorrent/branches/upstream/current/btlaunchmany.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,111 @@
+#!/usr/bin/env python
+
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from BitTornado.launchmanycore import LaunchMany
+from BitTornado.download_bt1 import defaults, get_usage
+from BitTornado.parseargs import parseargs
+from threading import Event
+from sys import argv, exit
+import sys, os
+from BitTornado import version, report_email
+from BitTornado.ConfigDir import ConfigDir
+
+assert sys.version >= '2', "Install Python 2.0 or greater"
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+def hours(n):
+    if n == 0:
+        return 'complete!'
+    try:
+        n = int(n)
+        assert n >= 0 and n < 5184000  # 60 days
+    except:
+        return '<unknown>'
+    m, s = divmod(n, 60)
+    h, m = divmod(m, 60)
+    if h > 0:
+        return '%d hour %02d min %02d sec' % (h, m, s)
+    else:
+        return '%d min %02d sec' % (m, s)
+
+
+Exceptions = []
+
+class HeadlessDisplayer:
+    def display(self, data):
+        print ''
+        if not data:
+            self.message('no torrents')
+        for x in data:
+            ( name, status, progress, peers, seeds, seedsmsg, dist,
+              uprate, dnrate, upamt, dnamt, size, t, msg ) = x
+            print '"%s": "%s" (%s) - %sP%s%s%.3fD u%0.1fK/s-d%0.1fK/s u%dK-d%dK "%s"' % (
+                        name, status, progress, peers, seeds, seedsmsg, dist,
+                        uprate/1000, dnrate/1000, upamt/1024, dnamt/1024, msg)
+        return False
+            
+    def message(self, s):
+        print "### "+s
+
+    def exception(self, s):
+        Exceptions.append(s)
+        self.message('SYSTEM ERROR - EXCEPTION GENERATED')
+
+
+if __name__ == '__main__':
+    if argv[1:] == ['--version']:
+        print version
+        exit(0)
+    defaults.extend( [
+        ( 'parse_dir_interval', 60,
+          "how often to rescan the torrent directory, in seconds" ),
+        ( 'saveas_style', 1,
+          "How to name torrent downloads (1 = rename to torrent name, " +
+          "2 = save under name in torrent, 3 = save in directory under torrent name)" ),
+        ( 'display_path', 1,
+          "whether to display the full path or the torrent contents for each torrent" ),
+    ] )
+    try:
+        configdir = ConfigDir('launchmany')
+        defaultsToIgnore = ['responsefile', 'url', 'priority']
+        configdir.setDefaults(defaults,defaultsToIgnore)
+        configdefaults = configdir.loadConfig()
+        defaults.append(('save_options',0,
+         "whether to save the current options as the new default configuration " +
+         "(only for btlaunchmany.py)"))
+        if len(argv) < 2:
+            print "Usage: btlaunchmany.py <directory> <global options>\n"
+            print "<directory> - directory to look for .torrent files (semi-recursive)"
+            print get_usage(defaults, 80, configdefaults)
+            exit(1)
+        config, args = parseargs(argv[1:], defaults, 1, 1, configdefaults)
+        if config['save_options']:
+            configdir.saveConfig(config)
+        configdir.deleteOldCacheData(config['expire_cache_data'])
+        if not os.path.isdir(args[0]):
+            raise ValueError("Warning: "+args[0]+" is not a directory")
+        config['torrent_dir'] = args[0]
+    except ValueError, e:
+        print 'error: ' + str(e) + '\nrun with no args for parameter explanations'
+        exit(1)
+
+    LaunchMany(config, HeadlessDisplayer())
+    if Exceptions:
+        print '\nEXCEPTION:'
+        print Exceptions[0]
+        print 'please report this to '+report_email

Propchange: debtorrent/branches/upstream/current/btlaunchmany.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btlaunchmanycurses.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btlaunchmanycurses.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btlaunchmanycurses.py (added)
+++ debtorrent/branches/upstream/current/btlaunchmanycurses.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,320 @@
+#!/usr/bin/env python
+
+# Written by John Hoffman
+# see LICENSE.txt for license information
+
+DOWNLOAD_SCROLL_RATE = 1
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from BitTornado.launchmanycore import LaunchMany
+from BitTornado.download_bt1 import defaults, get_usage
+from BitTornado.parseargs import parseargs
+from threading import Event
+from sys import argv, exit
+from time import time, localtime, strftime
+import sys, os
+from BitTornado import version, report_email
+from BitTornado.ConfigDir import ConfigDir
+
+try:
+    import curses
+    import curses.panel
+    from curses.wrapper import wrapper as curses_wrapper
+    from signal import signal, SIGWINCH 
+except:
+    print 'Textmode GUI initialization failed, cannot proceed.'
+    print
+    print 'This download interface requires the standard Python module ' \
+       '"curses", which is unfortunately not available for the native ' \
+       'Windows port of Python. It is however available for the Cygwin ' \
+       'port of Python, running on all Win32 systems (www.cygwin.com).'
+    print
+    print 'You may still use "btdownloadheadless.py" to download.'
+    sys.exit(1)
+
+assert sys.version >= '2', "Install Python 2.0 or greater"
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+Exceptions = []
+
+
+def fmttime(n):
+    if n <= 0:
+        return None
+    try:
+        n = int(n)
+        assert n < 5184000  # 60 days
+    except:
+        return 'connecting to peers'
+    m, s = divmod(n, 60)
+    h, m = divmod(m, 60)
+    return 'ETA in %d:%02d:%02d' % (h, m, s)
+
+def fmtsize(n):
+    n = long(n)
+    unit = [' B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
+    i = 0
+    if (n > 999):
+        i = 1
+        while i + 1 < len(unit) and (n >> 10) >= 999:
+            i += 1
+            n >>= 10
+        n = float(n) / (1 << 10)
+    if i > 0:
+        size = '%.1f' % n + '%s' % unit[i]
+    else:
+        size = '%.0f' % n + '%s' % unit[i]
+    return size
+
+def ljust(s, size):
+    s = s[:size]
+    return s + (' '*(size-len(s)))
+
+def rjust(s, size):
+    s = s[:size]
+    return (' '*(size-len(s)))+s
+
+
+class CursesDisplayer:
+    def __init__(self, scrwin):
+        self.messages = []
+        self.scroll_pos = 0
+        self.scroll_time = 0
+        
+        self.scrwin = scrwin
+        signal(SIGWINCH, self.winch_handler)
+        self.changeflag = Event()
+        self._remake_window()
+
+    def winch_handler(self, signum, stackframe):
+        self.changeflag.set()
+        curses.endwin()
+        self.scrwin.refresh()
+        self.scrwin = curses.newwin(0, 0, 0, 0)
+        self._remake_window()
+        self._display_messages()
+
+    def _remake_window(self):
+        self.scrh, self.scrw = self.scrwin.getmaxyx()
+        self.scrpan = curses.panel.new_panel(self.scrwin)
+        self.mainwinh = int(2*(self.scrh)/3)
+        self.mainwinw = self.scrw - 4  # - 2 (bars) - 2 (spaces)
+        self.mainwiny = 2         # + 1 (bar) + 1 (titles)
+        self.mainwinx = 2         # + 1 (bar) + 1 (space)
+        # + 1 to all windows so we can write at mainwinw
+
+        self.mainwin = curses.newwin(self.mainwinh, self.mainwinw+1,
+                                     self.mainwiny, self.mainwinx)
+        self.mainpan = curses.panel.new_panel(self.mainwin)
+        self.mainwin.scrollok(0)
+        self.mainwin.nodelay(1)
+
+        self.headerwin = curses.newwin(1, self.mainwinw+1,
+                                       1, self.mainwinx)
+        self.headerpan = curses.panel.new_panel(self.headerwin)
+        self.headerwin.scrollok(0)
+
+        self.totalwin = curses.newwin(1, self.mainwinw+1,
+                                      self.mainwinh+1, self.mainwinx)
+        self.totalpan = curses.panel.new_panel(self.totalwin)
+        self.totalwin.scrollok(0)
+
+        self.statuswinh = self.scrh-4-self.mainwinh
+        self.statuswin = curses.newwin(self.statuswinh, self.mainwinw+1,
+                                       self.mainwinh+3, self.mainwinx)
+        self.statuspan = curses.panel.new_panel(self.statuswin)
+        self.statuswin.scrollok(0)
+
+        try:
+            self.scrwin.border(ord('|'),ord('|'),ord('-'),ord('-'),ord(' '),ord(' '),ord(' '),ord(' '))
+        except:
+            pass
+        self.headerwin.addnstr(0, 2, '#', self.mainwinw - 25, curses.A_BOLD)
+        self.headerwin.addnstr(0, 4, 'Filename', self.mainwinw - 25, curses.A_BOLD)
+        self.headerwin.addnstr(0, self.mainwinw - 24, 'Size', 4, curses.A_BOLD)
+        self.headerwin.addnstr(0, self.mainwinw - 18, 'Download', 8, curses.A_BOLD)
+        self.headerwin.addnstr(0, self.mainwinw -  6, 'Upload', 6, curses.A_BOLD)
+        self.totalwin.addnstr(0, self.mainwinw - 27, 'Totals:', 7, curses.A_BOLD)
+
+        self._display_messages()
+        
+        curses.panel.update_panels()
+        curses.doupdate()
+        self.changeflag.clear()
+
+
+    def _display_line(self, s, bold = False):
+        if self.disp_end:
+            return True
+        line = self.disp_line
+        self.disp_line += 1
+        if line < 0:
+            return False
+        if bold:
+            self.mainwin.addnstr(line, 0, s, self.mainwinw, curses.A_BOLD)
+        else:
+            self.mainwin.addnstr(line, 0, s, self.mainwinw)
+        if self.disp_line >= self.mainwinh:
+            self.disp_end = True
+        return self.disp_end
+
+    def _display_data(self, data):    
+        if 3*len(data) <= self.mainwinh:
+            self.scroll_pos = 0
+            self.scrolling = False
+        elif self.scroll_time + DOWNLOAD_SCROLL_RATE < time():
+            self.scroll_time = time()
+            self.scroll_pos += 1
+            self.scrolling = True
+            if self.scroll_pos >= 3*len(data)+2:
+                self.scroll_pos = 0
+
+        i = int(self.scroll_pos/3)
+        self.disp_line = (3*i)-self.scroll_pos
+        self.disp_end = False
+
+        while not self.disp_end:
+            ii = i % len(data)
+            if i and not ii:
+                if not self.scrolling:
+                    break
+                self._display_line('')
+                if self._display_line(''):
+                    break
+            ( name, status, progress, peers, seeds, seedsmsg, dist,
+              uprate, dnrate, upamt, dnamt, size, t, msg ) = data[ii]
+            t = fmttime(t)
+            if t:
+                status = t
+            name = ljust(name,self.mainwinw-32)
+            size = rjust(fmtsize(size),8)
+            uprate = rjust('%s/s' % fmtsize(uprate),10)
+            dnrate = rjust('%s/s' % fmtsize(dnrate),10)
+            line = "%3d %s%s%s%s" % (ii+1, name, size, dnrate, uprate)
+            self._display_line(line, True)
+            if peers + seeds:
+                datastr = '    (%s) %s - %s up %s dn - %s peers %s seeds %.3f dist copies' % (
+                                progress, status,
+                                fmtsize(upamt), fmtsize(dnamt),
+                                peers, seeds, dist )
+            else:
+                datastr = '    (%s) %s - %s up %s dn' % (
+                                progress, status,
+                                fmtsize(upamt), fmtsize(dnamt) )
+            self._display_line(datastr)
+            self._display_line('    '+ljust(msg,self.mainwinw-4))
+            i += 1
+
+    def display(self, data):
+        if self.changeflag.isSet():
+            return
+
+        inchar = self.mainwin.getch()
+        if inchar == 12: # ^L
+            self._remake_window()
+
+        self.mainwin.erase()
+        if data:
+            self._display_data(data)
+        else:
+            self.mainwin.addnstr( 1, int(self.mainwinw/2)-5,
+                                  'no torrents', 12, curses.A_BOLD )
+        totalup = 0
+        totaldn = 0
+        for ( name, status, progress, peers, seeds, seedsmsg, dist,
+              uprate, dnrate, upamt, dnamt, size, t, msg ) in data:
+            totalup += uprate
+            totaldn += dnrate
+        
+        totalup = '%s/s' % fmtsize(totalup)
+        totaldn = '%s/s' % fmtsize(totaldn)
+        
+        self.totalwin.erase()
+        self.totalwin.addnstr(0, self.mainwinw-27, 'Totals:', 7, curses.A_BOLD)
+        self.totalwin.addnstr(0, self.mainwinw-20 + (10-len(totaldn)),
+                              totaldn, 10, curses.A_BOLD)
+        self.totalwin.addnstr(0, self.mainwinw-10 + (10-len(totalup)),
+                              totalup, 10, curses.A_BOLD)
+
+        curses.panel.update_panels()
+        curses.doupdate()
+
+        return inchar in (ord('q'),ord('Q'))
+
+    def message(self, s):
+        self.messages.append(strftime('%x %X - ',localtime(time()))+s)
+        self._display_messages()
+
+    def _display_messages(self):
+        self.statuswin.erase()
+        winpos = 0
+        for s in self.messages[-self.statuswinh:]:
+            self.statuswin.addnstr(winpos, 0, s, self.mainwinw)
+            winpos += 1
+        curses.panel.update_panels()
+        curses.doupdate()
+
+    def exception(self, s):
+        Exceptions.append(s)
+        self.message('SYSTEM ERROR - EXCEPTION GENERATED')
+
+
+
+def LaunchManyWrapper(scrwin, config):
+    LaunchMany(config, CursesDisplayer(scrwin))
+
+
+if __name__ == '__main__':
+    if argv[1:] == ['--version']:
+        print version
+        exit(0)
+    defaults.extend( [
+        ( 'parse_dir_interval', 60,
+          "how often to rescan the torrent directory, in seconds" ),
+        ( 'saveas_style', 2,
+          "How to name torrent downloads (1 = rename to torrent name, " +
+          "2 = save under name in torrent, 3 = save in directory under torrent name)" ),
+        ( 'display_path', 0,
+          "whether to display the full path or the torrent contents for each torrent" ),
+    ] )
+    try:
+        configdir = ConfigDir('launchmanycurses')
+        defaultsToIgnore = ['responsefile', 'url', 'priority']
+        configdir.setDefaults(defaults,defaultsToIgnore)
+        configdefaults = configdir.loadConfig()
+        defaults.append(('save_options',0,
+         "whether to save the current options as the new default configuration " +
+         "(only for btlaunchmanycurses.py)"))
+        if len(argv) < 2:
+            print "Usage: btlaunchmanycurses.py <directory> <global options>\n"
+            print "<directory> - directory to look for .torrent files (semi-recursive)"
+            print get_usage(defaults, 80, configdefaults)
+            exit(1)
+        config, args = parseargs(argv[1:], defaults, 1, 1, configdefaults)
+        if config['save_options']:
+            configdir.saveConfig(config)
+        configdir.deleteOldCacheData(config['expire_cache_data'])
+        if not os.path.isdir(args[0]):
+            raise ValueError("Warning: "+args[0]+" is not a directory")
+        config['torrent_dir'] = args[0]
+    except ValueError, e:
+        print 'error: ' + str(e) + '\nrun with no args for parameter explanations'
+        exit(1)
+
+    curses_wrapper(LaunchManyWrapper, config)
+    if Exceptions:
+        print '\nEXCEPTION:'
+        print Exceptions[0]
+        print 'please report this to '+report_email

Propchange: debtorrent/branches/upstream/current/btlaunchmanycurses.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btmakemetafile.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btmakemetafile.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btmakemetafile.py (added)
+++ debtorrent/branches/upstream/current/btmakemetafile.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,41 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# multitracker extensions by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from sys import argv, version, exit
+from os.path import split
+assert version >= '2', "Install Python 2.0 or greater"
+from BitTornado.BT1.makemetafile import make_meta_file, defaults, print_announcelist_details
+from BitTornado.parseargs import parseargs, formatDefinitions
+
+
+def prog(amount):
+    print '%.1f%% complete\r' % (amount * 100),
+
+if len(argv) < 3:
+    a,b = split(argv[0])
+    print 'Usage: ' + b + ' <trackerurl> <file> [file...] [params...]'
+    print
+    print formatDefinitions(defaults, 80)
+    print_announcelist_details()
+    print ('')
+    exit(2)
+
+try:
+    config, args = parseargs(argv[1:], defaults, 2, None)
+    for file in args[1:]:
+        make_meta_file(file, args[0], config, progress = prog)
+except ValueError, e:
+    print 'error: ' + str(e)
+    print 'run with no args for parameter explanations'

Propchange: debtorrent/branches/upstream/current/btmakemetafile.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btmaketorrentgui.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btmaketorrentgui.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btmaketorrentgui.py (added)
+++ debtorrent/branches/upstream/current/btmaketorrentgui.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,353 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# modified for multitracker by John Hoffman
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+from sys import argv, version
+
+from BitTornado.BT1.makemetafile import make_meta_file, completedir
+from threading import Event, Thread
+from BitTornado.bencode import bdecode
+import sys
+from os import getcwd
+from os.path import join, isdir
+try:
+    from wxPython.wx import *
+except:
+    print 'wxPython is either not installed or has not been installed properly.'
+    sys.exit(1)
+
+try:
+    True
+except:
+    True = 1
+    False = 0
+
+wxEVT_INVOKE = wxNewEventType()
+
+def EVT_INVOKE(win, func):
+    win.Connect(-1, -1, wxEVT_INVOKE, func)
+
+class InvokeEvent(wxPyEvent):
+    def __init__(self, func, args, kwargs):
+        wxPyEvent.__init__(self)
+        self.SetEventType(wxEVT_INVOKE)
+        self.func = func
+        self.args = args
+        self.kwargs = kwargs
+
+class DownloadInfo:
+    def __init__(self):
+        frame = wxFrame(None, -1, 'BitTorrent Torrent File Maker', size = wxSize(550, 410))
+        self.frame = frame
+
+        panel = wxPanel(frame, -1)
+
+        gridSizer = wxFlexGridSizer(cols = 2, rows = 2, vgap = 0, hgap = 8)
+        
+        gridSizer.Add(wxStaticText(panel, -1, 'make torrent of:'))
+
+        b = wxBoxSizer(wxHORIZONTAL)
+        self.dirCtl = wxTextCtrl(panel, -1, '')
+        b.Add(self.dirCtl, 1, wxEXPAND)
+#        b.Add(10, 10, 0, wxEXPAND)
+        
+        button = wxButton(panel, -1, 'dir', size = (30,20))
+        EVT_BUTTON(frame, button.GetId(), self.selectdir)
+        b.Add(button, 0)
+
+        button2 = wxButton(panel, -1, 'file', size = (30,20))
+        EVT_BUTTON(frame, button2.GetId(), self.selectfile)
+        b.Add(button2, 0)
+
+        gridSizer.Add(b, 0, wxEXPAND)
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+
+        gridSizer.Add(wxStaticText(panel, -1, 'announce url:'))
+        self.annCtl = wxTextCtrl(panel, -1, 'http://my.tracker:6969/announce')
+        gridSizer.Add(self.annCtl, 0, wxEXPAND)
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+
+        a = wxFlexGridSizer(cols = 1)
+        a.Add(wxStaticText(panel, -1, 'announce list:'))
+        a.Add(wxStaticText(panel, -1, ''))
+        abutton = wxButton(panel, -1, 'copy\nannounces\nfrom\ntorrent', size = (50,70))
+        EVT_BUTTON(frame, abutton.GetId(), self.announcecopy)
+        a.Add(abutton, 0, wxEXPAND)
+        gridSizer.Add(a, 0, wxEXPAND)
+        
+        self.annListCtl = wxTextCtrl(panel, -1, '\n\n\n\n\n', wxPoint(-1,-1), (400,120),
+                                            wxTE_MULTILINE|wxHSCROLL|wxTE_DONTWRAP)
+        gridSizer.Add(self.annListCtl, -1, wxEXPAND)
+
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        exptext = wxStaticText(panel, -1,
+                "a list of announces separated by commas " +
+                "or whitespace and on several lines -\n" +
+                "trackers on the same line will be tried randomly," +
+                "and all the trackers on one line\n" +
+                "will be tried before the trackers on the next line.")
+        exptext.SetFont(wxFont(6, wxDEFAULT, wxNORMAL, wxNORMAL, False))
+        gridSizer.Add(exptext)
+
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+
+        gridSizer.Add(wxStaticText(panel, -1, 'piece size:'))
+        self.piece_length = wxChoice(panel, -1,
+                 choices = ['automatic', '2MiB', '1MiB', '512KiB', '256KiB', '128KiB', '64KiB', '32KiB'])
+        self.piece_length_list = [0,       21,     20,      19,       18,       17,      16,      15]
+        self.piece_length.SetSelection(0)
+        gridSizer.Add(self.piece_length)
+        
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+        gridSizer.Add(wxStaticText(panel, -1, ''))
+
+        gridSizer.Add(wxStaticText(panel, -1, 'comment:'))
+        self.commentCtl = wxTextCtrl(panel, -1, '')
+        gridSizer.Add(self.commentCtl, 0, wxEXPAND)
+
+        gridSizer.AddGrowableCol(1)
+ 
+        border = wxBoxSizer(wxVERTICAL)
+        border.Add(gridSizer, 0, wxEXPAND | wxNORTH | wxEAST | wxWEST, 25)
+        b2 = wxButton(panel, -1, 'make')
+#        border.Add(10, 10, 1, wxEXPAND)
+        border.Add(b2, 0, wxALIGN_CENTER | wxSOUTH, 20)
+        EVT_BUTTON(frame, b2.GetId(), self.complete)
+        panel.SetSizer(border)
+        panel.SetAutoLayout(True)
+
+#        panel.DragAcceptFiles(True)
+#        EVT_DROP_FILES(panel, self.selectdrop)
+
+    def selectdir(self, x):
+        dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
+        if dl.ShowModal() == wxID_OK:
+            self.dirCtl.SetValue(dl.GetPath())
+
+    def selectfile(self, x):
+        dl = wxFileDialog (self.frame, 'Choose file or directory to use', '', '', '', wxOPEN)
+        if dl.ShowModal() == wxID_OK:
+            self.dirCtl.SetValue(dl.GetPath())
+
+    def selectdrop(self, x):
+        print x
+
+        list = x.m_files
+        self.dirCtl.SetValue(x[0])
+
+    def announcecopy(self, x):
+        dl = wxFileDialog (self.frame, 'Choose .torrent file to use', '', '', '*.torrent', wxOPEN)
+        if dl.ShowModal() == wxID_OK:
+            try:
+                h = open(dl.GetPath(), 'rb')
+                metainfo = bdecode(h.read())
+                h.close()
+                self.annCtl.SetValue(metainfo['announce'])
+                if metainfo.has_key('announce-list'):
+                    list = []
+                    for tier in metainfo['announce-list']:
+                        for tracker in tier:
+                            list += [tracker, ', ']
+                        del list[-1]
+                        list += ['\n']
+                    liststring = ''
+                    for i in list:
+                        liststring += i
+                    self.annListCtl.SetValue(liststring+'\n\n')
+                else:
+                    self.annListCtl.SetValue('')
+            except:
+                return
+
+    def getannouncelist(self):
+        list = []
+        for t in self.annListCtl.GetValue().split('\n'):
+            tier = []
+            t = t.replace(',',' ')
+            for tr in t.split(' '):
+                if tr != '':
+                    tier += [tr]
+            if len(tier)>0:
+                list.append(tier)
+        return list
+    
+    def complete(self, x):
+        if self.dirCtl.GetValue() == '':
+            dlg = wxMessageDialog(self.frame, message = 'You must select a\n file or directory', 
+                caption = 'Error', style = wxOK | wxICON_ERROR)
+            dlg.ShowModal()
+            dlg.Destroy()
+            return
+        params = {'piece_size_pow2': self.piece_length_list[self.piece_length.GetSelection()]}
+        annlist = self.getannouncelist()
+        if len(annlist)>0:
+            params['real_announce_list'] = annlist
+        comment = self.commentCtl.GetValue()
+        if comment != '':
+            params['comment'] = comment
+        try:
+            CompleteDir(self.dirCtl.GetValue(), self.annCtl.GetValue(), params)
+        except:
+            print_exc()
+
+
+from traceback import print_exc
+
+class CompleteDir:
+    def __init__(self, d, a, params):
+        self.d = d
+        self.a = a
+        self.params = params
+        self.flag = Event()
+        self.separatetorrents = False
+
+        if isdir(d):
+            self.choicemade = Event()
+            frame = wxFrame(None, -1, 'BitTorrent make torrent', size = (1,1))
+            self.frame = frame
+            panel = wxPanel(frame, -1)
+            gridSizer = wxFlexGridSizer(cols = 1, vgap = 8, hgap = 8)
+            gridSizer.AddGrowableRow(1)
+            gridSizer.Add(wxStaticText(panel, -1,
+                    'Do you want to make a separate .torrent'),0,wxALIGN_CENTER)
+            gridSizer.Add(wxStaticText(panel, -1,
+                    'for every item in this directory?'),0,wxALIGN_CENTER)
+            gridSizer.Add(wxStaticText(panel, -1, ''))
+
+            b = wxFlexGridSizer(cols = 3, hgap = 10)
+            yesbut = wxButton(panel, -1, 'Yes')
+            def saidyes(e, self = self):
+                self.frame.Destroy()
+                self.separatetorrents = True
+                self.begin()
+            EVT_BUTTON(frame, yesbut.GetId(), saidyes)
+            b.Add(yesbut, 0)
+
+            nobut = wxButton(panel, -1, 'No')
+            def saidno(e, self = self):
+                self.frame.Destroy()
+                self.begin()
+            EVT_BUTTON(frame, nobut.GetId(), saidno)
+            b.Add(nobut, 0)
+
+            cancelbut = wxButton(panel, -1, 'Cancel')
+            def canceled(e, self = self):
+                self.frame.Destroy()                
+            EVT_BUTTON(frame, cancelbut.GetId(), canceled)
+            b.Add(cancelbut, 0)
+            gridSizer.Add(b, 0, wxALIGN_CENTER)
+            border = wxBoxSizer(wxHORIZONTAL)
+            border.Add(gridSizer, 1, wxEXPAND | wxALL, 4)
+            
+            panel.SetSizer(border)
+            panel.SetAutoLayout(True)
+            frame.Show()
+            border.Fit(panel)
+            frame.Fit()
+            
+        else:
+            self.begin()
+
+    def begin(self):
+        if self.separatetorrents:
+            frame = wxFrame(None, -1, 'BitTorrent make directory', size = wxSize(550, 250))
+        else:
+            frame = wxFrame(None, -1, 'BitTorrent make torrent', size = wxSize(550, 250))
+        self.frame = frame
+
+        panel = wxPanel(frame, -1)
+        gridSizer = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
+
+        if self.separatetorrents:
+            self.currentLabel = wxStaticText(panel, -1, 'checking file sizes')
+        else:
+            self.currentLabel = wxStaticText(panel, -1, 'building ' + self.d + '.torrent')
+        gridSizer.Add(self.currentLabel, 0, wxEXPAND)
+        self.gauge = wxGauge(panel, -1, range = 1000, style = wxGA_SMOOTH)
+        gridSizer.Add(self.gauge, 0, wxEXPAND)
+        gridSizer.Add((10, 10), 1, wxEXPAND)
+        self.button = wxButton(panel, -1, 'cancel')
+        gridSizer.Add(self.button, 0, wxALIGN_CENTER)
+        gridSizer.AddGrowableRow(2)
+        gridSizer.AddGrowableCol(0)
+
+        g2 = wxFlexGridSizer(cols = 1, vgap = 15, hgap = 8)
+        g2.Add(gridSizer, 1, wxEXPAND | wxALL, 25)
+        g2.AddGrowableRow(0)
+        g2.AddGrowableCol(0)
+        panel.SetSizer(g2)
+        panel.SetAutoLayout(True)
+        EVT_BUTTON(frame, self.button.GetId(), self.done)
+        EVT_CLOSE(frame, self.done)
+        EVT_INVOKE(frame, self.onInvoke)
+        frame.Show(True)
+        Thread(target = self.complete).start()
+
+    def complete(self):
+        try:
+            if self.separatetorrents:
+                completedir(self.d, self.a, self.params, self.flag,
+                            self.valcallback, self.filecallback)
+            else:
+                make_meta_file(self.d, self.a, self.params, self.flag,
+                            self.valcallback, progress_percent = 1)
+            if not self.flag.isSet():
+                self.currentLabel.SetLabel('Done!')
+                self.gauge.SetValue(1000)
+                self.button.SetLabel('Close')
+                self.frame.Refresh()
+        except (OSError, IOError), e:
+            self.currentLabel.SetLabel('Error!')
+            self.button.SetLabel('Close')
+            dlg = wxMessageDialog(self.frame, message = 'Error - ' + str(e), 
+                caption = 'Error', style = wxOK | wxICON_ERROR)
+            dlg.ShowModal()
+            dlg.Destroy()
+
+    def valcallback(self, amount):
+        self.invokeLater(self.onval, [amount])
+
+    def onval(self, amount):
+        self.gauge.SetValue(int(amount * 1000))
+
+    def filecallback(self, f):
+        self.invokeLater(self.onfile, [f])
+
+    def onfile(self, f):
+        self.currentLabel.SetLabel('building ' + join(self.d, f) + '.torrent')
+
+    def onInvoke(self, event):
+        if not self.flag.isSet():
+            apply(event.func, event.args, event.kwargs)
+
+    def invokeLater(self, func, args = [], kwargs = {}):
+        if not self.flag.isSet():
+            wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
+
+    def done(self, event):
+        self.flag.set()
+        self.frame.Destroy()
+
+class btWxApp(wxApp):
+    def OnInit(self):
+        d = DownloadInfo()
+        d.frame.Show(True)
+        self.SetTopWindow(d.frame)
+        return True
+
+if __name__ == '__main__':
+    btWxApp().MainLoop()

Propchange: debtorrent/branches/upstream/current/btmaketorrentgui.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btreannounce.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btreannounce.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btreannounce.py (added)
+++ debtorrent/branches/upstream/current/btreannounce.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,79 @@
+#!/usr/bin/env python
+
+# Written by Henry 'Pi' James and Bram Cohen
+# multitracker extensions by John Hoffman
+# see LICENSE.txt for license information
+
+from sys import argv,exit
+from os.path import split
+from BitTornado.bencode import bencode, bdecode
+
+if len(argv) < 3:
+    a,b = split(argv[0])
+    print ('Usage: ' + b + ' <announce> [--announce_list <arg>] file1.torrent [file2.torrent...]')
+    print ('')
+    print ('  Where:')
+    print ('    announce = tracker URL')
+    print ('           Example: http://www.tracker.com:6699/announce')
+    print ('')
+    print ('    announce_list = optional list of redundant/backup tracker URLs, in the format:')
+    print ('           url[,url...][|url[,url...]...]')
+    print ('                where URLs separated by commas are all tried first')
+    print ('                before the next group of URLs separated by the pipe is checked.')
+    print ("                If none is given, it is assumed you don't want one in the metafile.")
+    print ('                If announce-list is given, clients which support it')
+    print ('                will ignore the <announce> value.')
+    print ('           Examples:')
+    print ('                http://tracker1.com|http://tracker2.com|http://tracker3.com')
+    print ('                     (tries trackers 1-3 in order)')
+    print ('                http://tracker1.com,http://tracker2.com,http://tracker3.com')
+    print ('                     (tries trackers 1-3 in a randomly selected order)')
+    print ('                http://tracker1.com|http://backup1.com,http://backup2.com')
+    print ('                     (tries tracker 1 first, then tries between the 2 backups randomly)')
+    print ('')
+    exit(2) # common exit code for syntax error
+
+announce = argv[1]
+announce_list = []
+if argv[2] == '--announce_list':
+    for tier in argv[3].split('|'):
+        sublist = []
+        for tracker in tier.split(','):
+            sublist += [tracker]
+        announce_list += [sublist]
+    if len(argv) < 5:
+        print ('error: no .torrent files given')
+        print ('')
+        exit(2)
+    argv = argv[2:]
+    
+
+for f in argv[2:]:
+    h = open(f, 'rb')
+    metainfo = bdecode(h.read())
+    h.close()
+    print 'old announce for %s: %s' % (f, metainfo['announce'])
+    metainfo['announce'] = announce
+    if metainfo.has_key('announce-list'):
+        list = []
+        for tier in metainfo['announce-list']:
+            for tracker in tier:
+                list+=[tracker,',']
+            del list[-1]
+            list+=['|']
+        del list[-1]
+        liststring = ''
+        for i in list:
+            liststring+=i
+        print 'old announce-list for %s: %s' % (f, liststring)
+    if len(announce_list) > 0:
+        metainfo['announce-list'] = announce_list
+    elif metainfo.has_key('announce-list'):
+        try:
+            del metainfo['announce-list']
+        except:
+            pass
+        
+    h = open(f, 'wb')
+    h.write(bencode(metainfo))
+    h.close()

Propchange: debtorrent/branches/upstream/current/btreannounce.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btrename.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btrename.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btrename.py (added)
+++ debtorrent/branches/upstream/current/btrename.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,33 @@
+#!/usr/bin/env python
+
+# Written by Henry 'Pi' James
+# see LICENSE.txt for license information
+
+from sys import *
+from os.path import *
+from sha import *
+from BitTornado.bencode import *
+
+NAME, EXT = splitext(basename(argv[0]))
+VERSION = '20021119'
+
+print '%s %s - change the suggested filename in a .torrent file' % (NAME, VERSION)
+print
+
+if len(argv) != 3:
+  print '%s file.torrent new.filename.ext' % argv[0]
+  print
+  exit(2) # common exit code for syntax error
+
+metainfo_file = open(argv[1], 'rb')
+metainfo = bdecode(metainfo_file.read())
+metainfo_file.close()
+print 'old filename: %s' % metainfo['info']['name']
+metainfo['info']['name'] = argv[2]
+print 'new filename: %s' % metainfo['info']['name']
+metainfo_file = open(argv[1], 'wb')
+metainfo_file.write(bencode(metainfo))
+metainfo_file.close
+print
+print 'done.'
+print

Propchange: debtorrent/branches/upstream/current/btrename.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btsethttpseeds.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btsethttpseeds.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btsethttpseeds.py (added)
+++ debtorrent/branches/upstream/current/btsethttpseeds.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,48 @@
+#!/usr/bin/env python
+
+# Written by Henry 'Pi' James and Bram Cohen
+# multitracker extensions by John Hoffman
+# see LICENSE.txt for license information
+
+from sys import argv,exit
+from os.path import split
+from BitTornado.bencode import bencode, bdecode
+
+if len(argv) < 3:
+    a,b = split(argv[0])
+    print ('Usage: ' + b + ' <http-seeds> file1.torrent [file2.torrent...]')
+    print ('')
+    print ('  Where:')
+    print ('    http-seeds = list of seed URLs, in the format:')
+    print ('           url[|url...] or 0')
+    print ('                if the list is a zero, any http seeds will be stripped.')
+    print ('')
+    exit(2) # common exit code for syntax error
+
+seeds = argv[1]
+if seeds == '0':
+    seedlist = None
+else:
+    seedlist = seeds.split('|')
+
+for f in argv[2:]:
+    h = open(f, 'rb')
+    metainfo = bdecode(h.read())
+    h.close()
+    if metainfo.has_key('httpseeds'):
+        list = []
+        for url in metainfo['httpseeds']:
+            list += [url,'|']
+        del list[-1]
+        liststring = ''
+        for i in list:
+            liststring += i
+        print 'old http-seed list for %s: %s' % (f, liststring)
+        if not seedlist:
+            del metainfo['httpseeds']
+    if seedlist:
+        metainfo['httpseeds'] = seedlist
+
+    h = open(f, 'wb')
+    h.write(bencode(metainfo))
+    h.close()

Propchange: debtorrent/branches/upstream/current/btsethttpseeds.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/btshowmetainfo.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/btshowmetainfo.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/btshowmetainfo.py (added)
+++ debtorrent/branches/upstream/current/btshowmetainfo.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,78 @@
+#!/usr/bin/env python
+
+# Written by Henry 'Pi' James and Loring Holden
+# modified for multitracker display by John Hoffman
+# see LICENSE.txt for license information
+
+from sys import *
+from os.path import *
+from sha import *
+from BitTornado.bencode import *
+
+NAME, EXT = splitext(basename(argv[0]))
+VERSION = '20030621'
+
+print '%s %s - decode BitTorrent metainfo files' % (NAME, VERSION)
+print
+
+if len(argv) == 1:
+    print '%s file1.torrent file2.torrent file3.torrent ...' % argv[0]
+    print
+    exit(2) # common exit code for syntax error
+
+for metainfo_name in argv[1:]:
+    metainfo_file = open(metainfo_name, 'rb')
+    metainfo = bdecode(metainfo_file.read())
+#    print metainfo
+    info = metainfo['info']
+    info_hash = sha(bencode(info))
+
+    print 'metainfo file.: %s' % basename(metainfo_name)
+    print 'info hash.....: %s' % info_hash.hexdigest()
+    piece_length = info['piece length']
+    if info.has_key('length'):
+        # let's assume we just have a file
+        print 'file name.....: %s' % info['name']
+        file_length = info['length']
+        name ='file size.....:'
+    else:
+        # let's assume we have a directory structure
+        print 'directory name: %s' % info['name']
+        print 'files.........: '
+        file_length = 0;
+        for file in info['files']:
+            path = ''
+            for item in file['path']:
+                if (path != ''):
+                   path = path + "/"
+                path = path + item
+            print '   %s (%d)' % (path, file['length'])
+            file_length += file['length']
+            name ='archive size..:'
+    piece_number, last_piece_length = divmod(file_length, piece_length)
+    print '%s %i (%i * %i + %i)' \
+          % (name,file_length, piece_number, piece_length, last_piece_length)
+    print 'announce url..: %s' % metainfo['announce']
+    if metainfo.has_key('announce-list'):
+        list = []
+        for tier in metainfo['announce-list']:
+            for tracker in tier:
+                list+=[tracker,',']
+            del list[-1]
+            list+=['|']
+        del list[-1]
+        liststring = ''
+        for i in list:
+            liststring+=i
+        print 'announce-list.: %s' % liststring
+    if metainfo.has_key('httpseeds'):
+        list = []
+        for seed in metainfo['httpseeds']:
+            list += [seed,'|']
+        del list[-1]
+        liststring = ''
+        for i in list:
+            liststring+=i
+        print 'http seeds....: %s' % liststring
+    if metainfo.has_key('comment'):
+        print 'comment.......: %s' % metainfo['comment']

Propchange: debtorrent/branches/upstream/current/btshowmetainfo.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/bttrack.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/bttrack.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/bttrack.py (added)
+++ debtorrent/branches/upstream/current/bttrack.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,34 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from BitTornado import PSYCO
+if PSYCO.psyco:
+    try:
+        import psyco
+        assert psyco.__version__ >= 0x010100f0
+        psyco.full()
+    except:
+        pass
+
+PROFILE = 0
+    
+from sys import argv
+from BitTornado.BT1.track import track
+
+if __name__ == '__main__':
+    if PROFILE:
+        import profile, pstats
+        from time import strftime
+        import sys
+        p = profile.Profile()
+        p.runcall(track, argv[1:])
+        log = open('profile_data.'+strftime('%y%m%d%H%M%S')+'.txt','a')
+        normalstdout = sys.stdout
+        sys.stdout = log
+#        pstats.Stats(p).strip_dirs().sort_stats('cumulative').print_stats()
+        pstats.Stats(p).strip_dirs().sort_stats('time').print_stats()
+        sys.stdout = normalstdout
+    else:
+        track(argv[1:])

Propchange: debtorrent/branches/upstream/current/bttrack.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/completedir.nsi
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/completedir.nsi?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/completedir.nsi (added)
+++ debtorrent/branches/upstream/current/completedir.nsi Sat Apr 14 18:47:18 2007
@@ -1,0 +1,24 @@
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+Outfile completedir.exe
+Name completedir
+SilentInstall silent
+InstallDir "$PROGRAMFILES\completedir\"
+Section "Install"
+  WriteUninstaller "$INSTDIR\uninstall.exe"
+  SetOutPath $INSTDIR
+  File btcompletedirgui.exe
+  File *.pyd
+  File *.dll
+  CreateShortCut "$STARTMENU\Programs\completedir.lnk" "$INSTDIR\btcompletedirgui.exe"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\CompleteDir" "DisplayName" "BitTorrent complete dir 1.0.1"
+  WriteRegStr HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\CompleteDir" "UninstallString" '"$INSTDIR\uninstall.exe"'
+  MessageBox MB_OK "Complete dir has been successfully installed! Run it under the Programs in the Start Menu."
+SectionEnd
+
+Section "Uninstall"
+  DeleteRegKey HKLM "Software\Microsoft\Windows\CurrentVersion\Uninstall\CompleteDir"
+  Delete "$STARTMENU\Programs\completedir.lnk"
+  RMDir /r "$INSTDIR"
+SectionEnd

Added: debtorrent/branches/upstream/current/docs/BUILD.windows.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/BUILD.windows.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/BUILD.windows.txt (added)
+++ debtorrent/branches/upstream/current/docs/BUILD.windows.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,38 @@
+install Python, version 2.0 or later -
+            http://python.org/
+
+install wxPython -
+            http://wxpython.org/
+
+install Python for Windows extensions -
+            http://sourceforge.net/projects/pywin32/
+
+install py2exe -
+            http://py2exe.org
+
+install the Python Cryptography Toolkit, if available -
+            http://www.amk.ca/python/code/crypto
+            http://www.voidspace.org.uk/python/modules.shtml#pycrypto
+
+install the nullsoft installer -
+            http://nsis.sourceforge.net
+
+copy icon_bt and icon_done from the icons directory to the root
+BitTornado directory.  Then, in a shell, go to the root BitTornado
+directory and run this command
+
+python winsetup.py py2exe
+
+change to the newly created subdirectory dist, copy bittorrent.nsi
+and the icons into that directory, and run nsis on it.
+
+cd dist
+copy ..\bittorrent.nsi .
+copy ..\icon*.ico .
+c:\progra~1\nsis\makensis.exe bittorrent.nsi
+
+This will create an installer called bittornado-x.x.x.w32install.exe
+
+The installer is completely self-contained and will work on any 
+Windows machine, even without the above software having been 
+installed.

Added: debtorrent/branches/upstream/current/docs/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/docs/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,9 @@
+/BUILD.windows.txt/1.4/Sat Dec 23 04:51:04 2006//
+/FAQ.txt/1.2/Wed Oct  6 05:37:39 2004//
+/IMPORTANT-multitracker-readme.txt/1.1/Fri Sep  3 19:12:50 2004//
+/INSTALL.unix.txt/1.3/Sat Dec 23 04:52:05 2006//
+/README-Psyco.txt/1.1/Fri Sep  3 19:12:50 2004//
+/credits.txt/1.1/Fri Sep  3 19:12:50 2004//
+/multitracker-spec.txt/1.1/Fri Sep  3 19:12:50 2004//
+/webseed-spec.txt/1.1/Fri Sep  3 19:12:50 2004//
+D

Added: debtorrent/branches/upstream/current/docs/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/docs/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,8 @@
+/BUILD.windows.txt////*///
+/FAQ.txt////*///
+/IMPORTANT-multitracker-readme.txt////*///
+/INSTALL.unix.txt////*///
+/README-Psyco.txt////*///
+/credits.txt////*///
+/multitracker-spec.txt////*///
+/webseed-spec.txt////*///

Added: debtorrent/branches/upstream/current/docs/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/docs/CVS/Entries.Log
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/CVS/Entries.Log?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/CVS/Entries.Log (added)
+++ debtorrent/branches/upstream/current/docs/CVS/Entries.Log Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+A D/man////

Added: debtorrent/branches/upstream/current/docs/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/docs/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/docs/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/docs

Added: debtorrent/branches/upstream/current/docs/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/CVS/Root (added)
+++ debtorrent/branches/upstream/current/docs/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/docs/FAQ.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/FAQ.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/FAQ.txt (added)
+++ debtorrent/branches/upstream/current/docs/FAQ.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,51 @@
+Frequently Asked Questions about BitTorrent
+
+Q:  I use Mozilla/Opera, how can I use BT?
+
+A:  You need to edit the mimetype associations so that the "application/x-bittorrent"
+    mime type is launched by c:\progra~1\bittorrent\btdownloadprefetched.exe
+
+
+Q:  Does BitTorrent support resuming?
+
+A:  Yes, just save your download to the same location as the existing partial download.
+    BT will resume where it left off after checking the partial download.
+
+
+Q:  How do I know the download isn't corrupted?
+
+A:  BitTorrent does cryptographic hashing (SHA1) of all data.  When you see "Download
+    Succeeded" you can be sure that BT has already verified the integrity of the data.
+    The integrity and authenticity of a BT download is as good as the original request
+    to the tracker.
+
+
+Q:  I'm behind a firewall/NAT, can I use BT?
+
+A:  Yes, but you will get better performance if other peers can connect to you.  By
+    default, BitTorrent listens on port 6881, trying incrementially higher ports if
+    it is unable to bind, giving up after 6889 (the port range is configurable.)
+    It's up to you to figure out how to poke a hole in your firewall/NAT.
+
+
+Q:  I published a file but whenever I try to download it hangs saying "connecting to
+    peers" and/or the download just never starts.
+
+A:  You need to leave a downloader running which already has the whole file.  The
+    publishing step merely registers the download information with the tracker.
+    Make sure other peers can connect to this downloader (not behind firewall or
+    NAT!)
+
+
+Q:  When is the Java implementation going to be ready?
+
+A:  Soon after the check clears.
+
+
+
+Q:  How do I limit the amount of bandwidth consumed by BT?
+
+A:  BT allows you to control how many simultaneous connections can  actively
+    download simultaneously using --max_uploads in the btdownloadheadless.py
+    script.  Other than that you'll have to limit bandwidth some other way, perhaps
+    at the OS or router level.

Added: debtorrent/branches/upstream/current/docs/IMPORTANT-multitracker-readme.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/IMPORTANT-multitracker-readme.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/IMPORTANT-multitracker-readme.txt (added)
+++ debtorrent/branches/upstream/current/docs/IMPORTANT-multitracker-readme.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,98 @@
+                 MULTITRACKER EXTENSION INFORMATION
+                 ==================================
+
+The multitracker support given in this client is not officially
+supported, and future support may be different.
+
+                   THIS DOCUMENT IS VERY IMPORTANT.
+              READ CAREFULLY OR YOUR CLIENTS WILL SUFFER.
+
+-----------------------------------------------------------------------
+
+This specification allows the client to connect to back-up trackers in
+the event of the failure of a main tracker.  It can also function to
+divide tracker traffic between multiple trackers.  Do *NOT* use this
+feature unless your trackers can share peer data with each other.
+Doing so will result in the peers forming separate groups, or "clouds",
+between which they cannot share, and some groups may become unseeded or
+may operate inefficiently.
+
+The source package includes highly experimental peerable tracker code;
+please see the contents of the multitracker folder for more
+information.
+
+As of this release, the utilities "btmakemetafile.py", "btreannounce.py"
+and "btcompletedir.py" have been modified to be able to add a multiple
+tracker list, "btshowmetainfo.py" has been modified to be able to show
+the list, and a new utility "btmaketorrentgui.py" has been added, that is
+able to manipulate the multiple tracker list.  "btcompletedirgui.py" has
+been superceded by this new utility.  Also included is "btcopyannounce.py",
+which can copy announce information from a "template" .torrent file.
+
+The "announce list" is separate from the torrent file's standard
+"announce" entry.  To be compatible with all clients, the torrent must
+contain a standard announce entry.  Clients that support this
+multitracker specification will ignore the standard announce and use
+only the list if it is present.
+
+To reannounce a torrent file to use multiple trackers, use the
+following format:
+
+"btreannounce.py http://maintrk.com:6969/announce --announce-list http://maintrk.com:6969/announce|http://bkup1.com:6969/announce|http://bkup2.com:6969/announce mytorrent.torrent"
+
+Note that the main tracker is mentioned twice, both as the standard
+announce and as the first element in the tracker list.  This is
+important; do not forget it.
+
+Also note that the URLs for the trackers are separated by the vertical
+bar character ("|").  Again, UNLESS YOUR TRACKERS TRADE PEER
+INFORMATION, DO NOT USE THE COMMAS.
+
+
+-----------------------------------------------------------------------
+
+
+
+btmakemetafile.py file trackerurl [params]
+
+--announce-list <arg>
+          a list of announce URLs - explained below (defaults to '')
+
+--piece_size_pow2 <arg>
+          which power of 2 to set the piece size to (defaults to 18)
+
+--comment <arg>
+          optional human-readable comment to put in .torrent (defaults
+          to '')
+
+--target <arg>
+          optional target file for the torrent (defaults to '')
+
+
+
+
+btreannounce.py <announce> [--announce-list <arg>] file1.torrent [file2.torrent...]
+
+  Where:
+    announce = tracker URL
+           Example: http://www.tracker.com:6699/announce
+
+    announce-list = optional list of redundant/backup tracker URLs, in
+                    the format:
+     url[,url...][|url[,url...]...]
+          where URLs separated by commas are all tried first
+          before the next group of URLs separated by the pipe is
+          checked.
+          If none is given, it is assumed you don't want one in
+          the metafile.
+          If announce-list is given, clients which support it
+          will ignore the <announce> value.
+           Examples:
+          http://tracker1.com|http://tracker2.com|http://tracker3.com
+               (tries trackers 1-3 in order)
+          http://tracker1.com,http://tracker2.com,http://tracker3.com
+               (tries trackers 1-3 in a randomly selected order)
+          http://tracker1.com|http://backup1.com,http://backup2.com
+               (tries tracker 1 first, then tries between the 2
+               backups randomly)
+

Added: debtorrent/branches/upstream/current/docs/INSTALL.unix.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/INSTALL.unix.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/INSTALL.unix.txt (added)
+++ debtorrent/branches/upstream/current/docs/INSTALL.unix.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,28 @@
+install Python, version 2.0 or later -
+            http://python.org/
+
+install wxPython -
+            http://wxpython.org/
+
+install the Python Cryptography Toolkit, if available -
+            http://www.amk.ca/python/code/crypto
+
+(under debian, you can currently get the above using 
+apt-get install libwxgtk2.2-python 
+from testing and use python 2.1)
+
+untar and run: python setup.py install
+
+put a line in /etc/mailcap which is similar to the 
+following, only replace the path to 
+/usr/bin/btdownloadgui.py with the one it's actually in.
+
+application/x-bittorrent; /usr/bin/btdownloadgui.py %s; test=test -n "$DISPLAY"
+
+You may have to restart your web browser for it to start 
+using BitTorrent.
+
+If you're using a web browser which doesn't respect 
+/etc/mailcap you can go into the mimetype configuration for 
+your web browser and manually associate application/x-bittorrent 
+with btdownloadgui.py (with the appropriate path, of course.)

Added: debtorrent/branches/upstream/current/docs/README-Psyco.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/README-Psyco.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/README-Psyco.txt (added)
+++ debtorrent/branches/upstream/current/docs/README-Psyco.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,4 @@
+Psyco is a compiler that works in conjunction with Python
+to speed up program execution, but it also increases memory
+usage.  Edit PSYCO.py to enable/disable Psyco in the
+client and utilities.

Added: debtorrent/branches/upstream/current/docs/credits.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/credits.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/credits.txt (added)
+++ debtorrent/branches/upstream/current/docs/credits.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,34 @@
+The following people have all helped with BitTorrent in some way -
+
+Bill Bumgarner
+David Creswick
+Andrew Loewenstern
+Ross Cohen
+Jeremy Avnet
+Greg Broiles
+Barry Cohen
+Bram Cohen
+sayke
+Steve Jenson
+Myers Carpenter
+Francis Crick
+Petru Paler
+Jeff Darcy
+John Gilmore
+Yann Vernier
+Pat Mahoney
+Boris Zbarsky
+Eric Tiedemann
+Henry 'Pi' James
+Loring Holden
+Robert Stone
+Michael Janssen
+Eike Frost
+Andrew Todd
+Jason Hoffman
+Xavier Bassery
+Uoti Urpala
+Jon Wolf
+Christoph Hohmann
+Pav Lucistnik
+Micah Anderson

Added: debtorrent/branches/upstream/current/docs/man/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/docs/man/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,18 @@
+/bittorrent-downloader.bittornado.1/1.1/Mon Apr 18 04:20:55 2005//
+/bittorrent-multi-downloader.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btcompletedir.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btcompletedirgui.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btcopyannounce.1/1.1/Fri Sep  3 19:07:39 2004//
+/btdownloadcurses.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btdownloadgui.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btdownloadheadless.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btlaunchmany.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btlaunchmanycurses.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btmakemetafile.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btmaketorrentgui.1/1.1/Mon Apr 18 04:20:56 2005//
+/btreannounce.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btrename.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/btsethttpseeds.1/1.1/Fri Sep  3 19:07:39 2004//
+/btshowmetainfo.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+/bttrack.bittornado.1/1.1/Mon Apr 18 04:20:56 2005//
+D

Added: debtorrent/branches/upstream/current/docs/man/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/docs/man/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,17 @@
+/bittorrent-downloader.bittornado.1////*///
+/bittorrent-multi-downloader.bittornado.1////*///
+/btcompletedir.bittornado.1////*///
+/btcompletedirgui.bittornado.1////*///
+/btcopyannounce.1////*///
+/btdownloadcurses.bittornado.1////*///
+/btdownloadgui.bittornado.1////*///
+/btdownloadheadless.bittornado.1////*///
+/btlaunchmany.bittornado.1////*///
+/btlaunchmanycurses.bittornado.1////*///
+/btmakemetafile.bittornado.1////*///
+/btmaketorrentgui.1////*///
+/btreannounce.bittornado.1////*///
+/btrename.bittornado.1////*///
+/btsethttpseeds.1////*///
+/btshowmetainfo.bittornado.1////*///
+/bttrack.bittornado.1////*///

Added: debtorrent/branches/upstream/current/docs/man/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/docs/man/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/docs/man/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/docs/man/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/docs/man

Added: debtorrent/branches/upstream/current/docs/man/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/CVS/Root (added)
+++ debtorrent/branches/upstream/current/docs/man/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/docs/man/bittorrent-downloader.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/bittorrent-downloader.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/bittorrent-downloader.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/bittorrent-downloader.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,127 @@
+.TH "BITTORRENT-DOWNLOADER" 1 "Sep 24 2003"
+.SH NAME
+bittorrent-downloader \- download files using a scatter-gather network
+.SH SYNOPSIS
+.nf
+.B btdownloadheadless [ option ... ] \fIURL\fB
+.B btdownloadheadless [ option ... ] \fIfilename\fB
+.B btdownloadcurses   [ option ... ] \fIURL\fB
+.B btdownloadcurses   [ option ... ] \fIfilename\fB
+.B btdownloadgui      [ option ... ] \fIURL\fB
+.B btdownloadgui      [ option ... ] \fIfilename\fB
+.fi
+.br
+.B 
+.SH DESCRIPTION
+This manual page documents briefly the \fBbtdownloadheadless\fP,
+\fBbtdownloadcurses\fP, and \fBbtdownloadgui\fP commands.
+This manual page was written for the Debian distribution
+because the original program does not have a manual page.
+.PP
+These are all programs that allow a user to download files using
+bittorrent, a peer to peer, scatter-gather network protocol.
+They all have the same options.
+.SH OPTIONS
+These programs follow the usual GNU command line syntax, with long
+options starting with two dashes (`-').
+A summary of options is included below.
+.TP
+.B \-\-responsefile \fIfilename\fP
+treat \fIfilename\fP as a file which the server reponse was stored in. If this
+option is used, no filename or URL should be present on the command line.
+.TP
+.B \-\-url \fIurl\fP
+retrieve the torrent info file from \fIurl\fP.  If this option is used, no
+filename or URL should be present on the command line.
+.TP
+.B \-i \fIip\fP | \-\-ip \fIip\fP
+report \fIip\fP as your IP to the tracker
+.TP
+.B \-\-bind \fIip\fP
+bind to \fIip\fP instead of the default
+.TP
+.B \-\-minport \fIportnum\fP
+set \fIportnum\fP as the minimum port to listen on, counts up if unavailable (default 10000)
+.TP
+.B \-\-maxport \fIportnum\fP
+set \fIportnum\fP as the maximum port to listen on (default 60000)
+.TP
+.B \-\-saveas \fIfilename\fP
+store the downloaded file to \fIfilename\fP, instead of querying user (gui) or 
+using the filename stored in the torrent info file
+.TP
+.B \-\-max_uploads \fInum\fP
+Only allow \fInum\fP uploads at once (default 4)
+.TP 
+.B \-\-max_upload_rate \fIkbytes\fP
+maximum rate to upload at in kilobytes, 0 means no limit (default 0)
+.TP
+.B \-\-keepalive_interval \fIsecs\fP
+pause \fIsecs\fP seconds between sending keepalives (default 120.0)
+.TP
+.B \-\-download_slice_size \fIbytes\fP
+query for \fIbytes\fP bytes per request (default 32768)
+.TP
+.B \-\-request_backlog \fInum\fP
+keep \fInum\fP requests in a single pipe at once (default 5)
+.TP
+.B \-\-max_message_length \fIbytes\fP
+set \fIbytes\fP to the maximum length prefix encoding you'll accept over the wire - larger values get the connection dropped (default 8388608)
+.TP
+.B \-\-timeout \fIsecs\fP
+wait \fIsecs\fP before closing sockets which nothing has been recieved on (default 300.0)
+.TP
+.B \-\-timeout_check_interval \fIsecs\fP
+check whether connections have timed out every \fIsecs\fP seconds (default 60.0)
+.TP
+.B \-\-max_slice_length \fIbytes\fP
+requests from peers larger than \fIbytes\fP bytes are ignored (default 131072)
+.TP
+.B \-\-max_rate_recalculate_interval \fIsecs\fP
+connections that pause longer than \fIsecs\fP seconds are given reduced rate (default 15.0)
+.TP
+.B \-\-max_rate_period \fIsecs\fP
+set \fIsecs\fP to the maximum amount of time to guess the current rate estimate represents (default 20.0)
+.TP
+.B \-\-upload_rate_fudge \fIsecs\fP
+set the time equivalent of writing to kernel-level TCP buffer to \fIsecs\fP (default 5.0)
+.TP
+.B \-\-display_interval \fIsecs\fP
+update displayed information every \fIsecs\fP seconds (default 0.1)
+.TP
+.B \-\-rerequest_interval \fIsecs\fP
+request more peers every \fIsecs\fP seconds (default 300)
+.TP
+.B \-\-min_peers \fInum\fP
+do not rerequest if we have \fInum\fP peers already (default 20)
+.TP
+.B \-\-http_timeout \fIsecs\fP
+wait \fIsecs\fP seconds before assuming a http connection has timed out (default 60)
+.TP
+.B \-\-snub_time \fIsecs\fP
+wait \fIsecs\fP seconds for data to come in over a connection before assuming it's semi-permanently choked (default 30.0)
+.TP
+.B \-\-spew \fI 1 | 0 \fP
+whether to display diagnostic info to stdout.  This option is not useful when
+using btdownloadcurses or btdownloadgui. (default 0)
+.B \-\-max_initiate \fInum\fP
+stop initiating new connections when we have \fInum\fP peers (default 40)
+.TP
+.B \-\-check_hashes \fI 1 | 0 \fP
+whether to check hashes on disk (defaults to 1)
+.TP
+.B \-\-report_hash_failures \fI 1 | 0 \fP
+whether to report to the user that hash failuers occur (non-fatal, common error) (default 0)
+.TP 
+.B \-\-rarest_first_priority_cutoff \fInum\fP
+the number of peers which need to have a piece before other partials take
+priority over rarest first (default 3)
+
+.SH SEE ALSO
+.BR bttrack (1),
+.BR btmakemetafile (1),
+.BR btlaunchmany (1).
+.br
+.SH AUTHOR
+This manual page was written by Michael Janssen <jamuraa at debian.org>, and modified by Micah Anderson <micah at debian.org>
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/bittorrent-multi-downloader.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/bittorrent-multi-downloader.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/bittorrent-multi-downloader.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/bittorrent-multi-downloader.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,31 @@
+.TH "BITTORRENT-MULTI-DOWNLOADER" 1 "Jan 18 2003"
+.SH NAME
+bittorrent-multi-downloader \- multiple file clients for bittorrent
+.SH SYNOPSIS
+.nf
+.B btlaunchmany \fIdirname\fP [ option ... ]
+.B btlaunchmanycurses \fIdirname\fP [ option ... ]
+.fi
+.SH DESCRIPTION
+This manual page documents briefly the \fBbtlaunchmany\fP and 
+\fBbtlaunchmanycurses\fP commands.
+This manual page was written for the Debian distribution
+because the original program does not have a manual page.
+.PP
+\fBbtlaunchmany\fP is a program that eases the use of 
+bittorrent in multiple-downloader situations.  The program checks
+a directory for torrent files.  When it finds any, it starts a 
+separate downloader thread for each file.
+.SH OPTIONS
+
+These programs have the exact same options as the normal 
+downloaders, which are documented in \fBbittorrent-downloader\fP(1).
+
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btmakemetafile (1),
+.BR bttrack (1).
+.br
+.SH AUTHOR
+This manual page was written by Michael Janssen <jamuraa at debian.org>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btcompletedir.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btcompletedir.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btcompletedir.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btcompletedir.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,53 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTCOMPLETEDIR "1" "May 2004" "btcompletedir" "User Commands"
+.SH NAME
+btcompletedir \- manual page for btcompletedir 
+.SH SYNOPSIS
+.B btcompletedir
+\fI<trackerurl> <dir> \fR[\fIdir\fR...] [\fIparams\fR...]
+.SH DESCRIPTION
+makes a .torrent file for every file or directory present in each dir.
+.PP
+\fB\-\-piece_size_pow2\fR \fInum\fP
+.IP
+which power of 2 to set the piece size to (0 = automatic) (defaults
+to 0)
+.PP
+\fB\-\-comment\fR \fIcomment\fP
+.IP
+optional human-readable \fIcomment\fP to put in .torrent (defaults to '')
+.PP
+\fB\-\-target\fR \fIfile\fP
+.IP
+optional target \fIfile\fP for the torrent (defaults to '')
+\fB\-\-announce_list\fR \fIURLs\fP
+.IP
+a list of announce \fIURLs\fP (defaults to ''), this is an optional list
+of redundant/backup tracker \fIURLs\fP the format is as follows:
+.IP
+url[,url...][|url[,url...]...]
+.IP
+where URLs separated by commas are all tried first
+before the next group of URLs separated by the pipe is checked.
+If none is given, it is assumed you don't want one in the metafile.
+If announce_list is given, clients which support it
+will ignore the <announce> value.
+.IP
+Examples:
+.IP
+http://tracker1.com|http://tracker2.com|http://tracker3.com
+.IP
+(tries trackers 1-3 in order)
+.IP
+http://tracker1.com,http://tracker2.com,http://tracker3.com
+.IP
+(tries trackers 1-3 in a randomly selected order)
+.IP
+http://tracker1.com|http://backup1.com,http://backup2.com
+.IP
+(tries tracker 1 first, then tries between the 2 backups randomly)
+.PP
+makes a .torrent file for every file or directory present in each dir.
+.PP
+.SH "SEE ALSO"
+http://bittornado.org

Added: debtorrent/branches/upstream/current/docs/man/btcompletedirgui.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btcompletedirgui.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btcompletedirgui.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btcompletedirgui.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,54 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTCOMPLETEDIRGUI "1" "May 2004" "btcompletedirgui" "User Commands"
+.SH NAME
+btcompletedirgui \- manual page for btcompletedirgui 
+.SH SYNOPSIS
+.B btcompletedirgui
+\fI<trackerurl> <dir> \fR[\fIdir\fR...] [\fIparams\fR...]
+.SH DESCRIPTION
+makes a .torrent file for every file or directory present in each \fIdir\fP.
+.PP
+\fB\-\-piece_size_pow2\fR \fInum\fP
+.IP
+which power of 2 to set the piece size to (0 = automatic) (defaults
+to 0
+.IP
+\fB\-\-comment\fR \fIcomment\fP
+.IP
+optional human-readable \fIcomment\fP to put in .torrent (defaults to '')
+.PP
+\fB\-\-target\fR \fIfile\fP
+.IP
+optional target \fIfile\fP for the torrent (defaults to '')
+.IP
+.P\fB\-\-announce_list\fR \fIURLs\fP 
+.IP
+a list of announce \fIURLs\fP (defaults to ''). An optional list of 
+redundant/backup tracker \fIURLs\fP, in the format:
+.IP
+url[,url...][|url[,url...]...]
+.IP
+where URLs separated by commas are all tried first
+before the next group of URLs separated by the pipe is checked.
+If none is given, it is assumed you don't want one in the metafile.
+If announce_list is given, clients which support it
+will ignore the <announce> value.
+.IP
+Examples:
+.IP
+http://tracker1.com|http://tracker2.com|http://tracker3.com
+.IP
+(tries trackers 1-3 in order)
+.IP
+http://tracker1.com,http://tracker2.com,http://tracker3.com
+.IP
+(tries trackers 1-3 in a randomly selected order)
+.IP
+http://tracker1.com|http://backup1.com,http://backup2.com
+.IP
+(tries tracker 1 first, then tries between the 2 backups randomly)
+.PP
+makes a .torrent file for every file or directory present in each dir.
+.PP
+.SH "SEE ALSO"
+http://bittornado.org

Added: debtorrent/branches/upstream/current/docs/man/btcopyannounce.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btcopyannounce.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btcopyannounce.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btcopyannounce.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,13 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTCOPYANNOUNCE "1" "May 2004" "btcopyannounce" "User Commands"
+.SH NAME
+Btcopyannounce \- manual page for btcopyannounce
+.SH SYNOPSIS
+.B btcopyannounce
+\fI<source.torrent> <file1.torrent> \fR[\fIfile2.torrent\fR...]
+.SH DESCRIPTION
+copies announce information from source to all specified torrents
+.PP
+copies announce information from source to all specified torrents
+.SH "SEE ALSO"
+http://bittornado.org

Added: debtorrent/branches/upstream/current/docs/man/btdownloadcurses.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btdownloadcurses.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btdownloadcurses.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btdownloadcurses.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,258 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTDOWNLOADCURSES "1" "August 2004" "btdownloadcurses (bittornado)" "User Commands"
+.SH NAME
+Btdownloadcurses \- curses bittornado download interface
+.SH SYNOPSIS
+.B btdownloadcurses
+\fI<global options>\fR
+.SH DESCRIPTION
+Curses interface to download torrents.
+.SH OPTIONS
+This program follows the usual GNU command-line syntax, with long options
+starting with two dashes ('-'). A summary of options is included below.
+
+.PP
+\fB\-\-max_uploads\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of uploads to allow at once. (defaults to 7)
+.PP
+\fB\-\-keepalive_interval\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to pause between sending keepalives (defaults to 120.0)
+.PP
+\fB\-\-download_slice_size\fR \fIbytes\fP
+.IP
+How many \fIbytes\fP to query for per request. (defaults to 16384)
+.PP
+\fB\-\-upload_unit_size\fR \fIbytes\fP
+.IP
+when limiting upload rate, how many \fIbytes\fP to send at a time (defaults to 1460)
+.PP
+\fB\-\-request_backlog\fR \fInumber\fP
+.IP
+maximum \fInumber\fP of requests to keep in a single pipe at once. (defaults to 10)
+.PP
+\fB\-\-max_message_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP prefix encoding you'll accept over the wire - larger values get the
+connection dropped. (defaults to 8388608)
+.PP
+\fB\-\-ip\fR \fIip\fP
+.IP
+\fIip\fP to report you have to the tracker. (defaults to '')
+.PP
+\fB\-\-minport\fR \fIportnum\fP
+.IP
+set \fIportnum\fP as the minimum port to listen on, counts up if unavailable (defaults to 10000)
+.PP
+\fB\-\-maxport\fR \fIportnum\fP
+.IP
+set \fIportnum\fP as the maximum port to listen on (defaults to 60000)
+.PP
+\fB\-\-random_port\fR \fI 0 | 1 \fP
+.IP
+whether to choose randomly inside the port range instead of counting up linearly
+(defaults to 1)
+.PP
+\fB\-\-responsefile\fR \fIfile\fP
+.IP
+\fIfile\fP the server response was stored in, alternative to url (defaults to '')
+.PP
+\fB\-\-url\fR \fIURL\fP
+.IP
+\fIURL\fP to get file from, alternative to responsefile (defaults to '')
+.PP
+\fB\-\-selector_enabled\fR \fI 0 | 1 \fP
+.IP
+whether to enable the file selector and fast resume function (defaults to 1)
+.PP
+\fB\-\-expire_cache_data\fR \fIdays\fP
+.IP
+the number of \fIdays\fP after which you wish to expire old cache data (0 = disabled) (defaults
+to 10)
+.PP
+\fB\-\-priority\fR \fI -1|0|1|2[,-1|0|1|2] \fP
+.IP
+a list of file priorities separated by commas, must be one per file, 0 = highest, 1 =
+normal, 2 = lowest, \fB\-1\fR = download disabled (defaults to ''). Order is based
+on the file/torrent order as shown by btshowmetainfo. For example, to download only
+the third of four files use: --priority -1,-1,2,-1
+.PP
+\fB\-\-saveas\fR \fIfilename\fP
+.IP
+local \fIfilename\fP to save the file as, null indicates query user (defaults to '')
+.PP
+\fB\-\-timeout\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP to wait between closing sockets which nothing has been received on (defaults to
+300.0)
+.PP
+\fB\-\-timeout_check_interval\fR \fIseconds\fP
+.IP
+time to wait in \fIseconds\fP between checking if any connections have timed out (defaults to 60.0)
+.PP
+\fB\-\-max_slice_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP slice to send to peers, larger requests are ignored (defaults to 131072)
+.PP
+\fB\-\-max_rate_period\fR \fIseconds\fP
+.IP
+maximum amount of time in \fIseconds\fP to guess the current rate estimate represents (defaults to 20.0)
+.PP
+\fB\-\-bind\fR \fIip[,hostname]\fP
+.IP
+comma-separated list of \fIips/hostnames\fP to bind to locally (defaults to '')
+.PP
+\fB\-\-ipv6_enabled\fR \fI 0 | 1 \fP
+.IP
+allow the client to connect to peers via IPv6 (defaults to 0)
+.PP
+\fB\-\-ipv6_binds_v4\fR \fI 0 | 1 \fP
+.IP
+set if an IPv6 server socket will also field IPv4 connections (defaults to 1)
+.PP
+\fB\-\-upnp_nat_access\fR \fI 0 | 1 | 2 \fP
+.IP
+attempt to autoconfigure a UPnP router to forward a server port (0 = disabled, 1 = mode 1
+[fast], 2 = mode 2 [slow]) (defaults to 1)
+.PP
+\fB\-\-upload_rate_fudge\fR \fIseconds\fP
+.IP
+time equivalent in \fIseconds\fP of writing to kernel-level TCP buffer, for rate adjustment (defaults to
+5.0)
+.PP
+\fB\-\-tcp_ack_fudge\fR \fIoverhead\fP
+.IP
+how much TCP ACK download \fIoverhead\fP to add to upload rate calculations (0 = disabled)
+(defaults to 0.029999999999999999)
+.PP
+\fB\-\-display_interval\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP between updates of displayed information (defaults to 0.5)
+.PP
+\fB\-\-rerequest_interval\fR \fIseconds\fP
+.IP
+time to wait, in \fIseconds\fP, between requesting more peers (defaults to 300)
+.PP
+\fB\-\-min_peers\fR \fInumber\fP
+.IP
+minimum \fInumber\fP of peers to not do rerequesting (defaults to 20)
+.PP
+\fB\-\-http_timeout\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to wait before assuming that an http connection has timed out (defaults
+to 60)
+.PP
+\fB\-\-max_initiate\fR \fInumber\fP
+.IP
+\fInumber\fP of peers at which to stop initiating new connections (defaults to 40)
+.PP
+\fB\-\-check_hashes\fR \fI 0 | 1 \fP
+.IP
+whether to check hashes on disk (defaults to 1)
+.PP
+\fB\-\-max_upload_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to upload at (0 = no limit, \fB\-1\fR = automatic) (defaults to 0)
+.PP
+\fB\-\-max_download_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to download at (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-alloc_type\fR \fInormal | background | pre-allocate | sparse\fP
+.IP
+allocation type (may be normal, background, pre-allocate or sparse) (defaults to
+\&'normal')
+.PP
+\fB\-\-alloc_rate\fR \fIMiB/s\fP
+.IP
+rate (in \fIMiB/s\fP) to allocate space at using background allocation (defaults to 2.0)
+.PP
+\fB\-\-buffer_reads\fR \fI 0 | 1 \fP
+.IP
+whether to buffer disk reads (defaults to 1)
+.PP
+\fB\-\-write_buffer_size\fR \fIspace\fP
+.IP
+the maximum amount of \fIspace\fP to use for buffering disk writes (in megabytes, 0 = disabled)
+(defaults to 4)
+.PP
+\fB\-\-snub_time\fR \fIseconds\fP
+.IP
+\fIseconds\fP to wait for data to come in over a connection before assuming it's
+semi-permanently choked (defaults to 30.0)
+.PP
+\fB\-\-spew\fR \fI 0 | 1 \fP
+.IP
+whether to display diagnostic info to stdout (defaults to 0)
+.PP
+\fB\-\-rarest_first_cutoff\fR \fInumber\fP
+.IP
+\fInumber\fP of downloads at which to switch from random to rarest first (defaults to 2)
+.PP
+\fB\-\-rarest_first_priority_cutoff\fR \fInumber\fP
+.IP
+the \fInumber\fP of peers which need to have a piece before other partials take priority over
+rarest first (defaults to 5)
+.PP
+\fB\-\-min_uploads\fR \fInumber\fP
+.IP
+the \fInumber\fP of uploads to fill out to with extra optimistic unchokes (defaults to 4)
+.PP
+\fB\-\-max_files_open\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of files to keep open at a time, 0 means no limit (defaults to 50)
+.PP
+\fB\-\-round_robin_period\fR \fIseconds\fP
+.IP
+the number of \fIseconds\fP between the client's switching upload targets (defaults to 30)
+.PP
+\fB\-\-super_seeder\fR \fI 0 | 1 \fP
+.IP
+whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)
+(defaults to 0)
+.PP
+\fB\-\-security\fR \fI 0 | 1 \fP
+.IP
+whether to enable extra security features intended to prevent abuse (defaults to 1)
+.PP
+\fB\-\-max_connections\fR \fInumber\fP
+.IP
+the absolute maximum \fInumber\fP of peers to connect with (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-auto_kick\fR \fI 0 | 1 \fP
+.IP
+whether to allow the client to automatically kick/ban peers that send bad data (defaults
+to 1)
+.PP
+\fB\-\-double_check\fR \fI 0 | 1 \fP
+.IP
+whether to double-check data being written to the disk for errors (may increase CPU load)
+(defaults to 1)
+.PP
+\fB\-\-triple_check\fR \fI 0 | 1 \fP
+.IP
+whether to thoroughly check data being written to the disk (may slow disk access)
+(defaults to 0)
+.PP
+\fB\-\-lock_files\fR \fI 0 | 1 \fP
+.IP
+whether to lock files the client is working with (defaults to 1)
+.PP
+\fB\-\-lock_while_reading\fR \fI 0 | 1 \fP
+.IP
+whether to lock access to files being read (defaults to 0)
+.PP
+\fB\-\-auto_flush\fR \fIminutes\fP
+.IP
+\fIminutes\fP between automatic flushes to disk (0 = disabled) (defaults to 0)
+.PP
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR bittorrent-multi-downloader (1),
+.BR btdownloadgui (1),
+.BR btdownloadheadless (1).
+.br
+.SH AUTHOR
+This manual page was written by Micah Anderson <micah at riseup.net>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btdownloadgui.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btdownloadgui.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btdownloadgui.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btdownloadgui.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,258 @@
+\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTDOWNLOADGUI "1" "August 2004" "btdownloadgui (bittornado)" "User Commands"
+.SH NAME
+Btdownloadgui \- GUI bittornado download interface
+.SH SYNOPSIS
+.B btdownloadgui
+\fI<global options>\fR
+.SH DESCRIPTION
+GUI interface to download torrents.
+.SH OPTIONS
+This program follows the usual GNU command-line syntax, with long options
+starting with two dashes ('-'). A summary of options is included below.
+
+.PP
+\fB\-\-max_uploads\fR \fInumber\fP
+.IP
+the \fImaximum\fP number of uploads to allow at once. (defaults to 7)
+.PP
+\fB\-\-keepalive_interval\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to pause between sending keepalives (defaults to 120.0)
+.PP
+\fB\-\-download_slice_size\fR \fIbytes\fP
+.IP
+How many \fIbytes\fP to query for per request. (defaults to 16384)
+.PP
+\fB\-\-upload_unit_size\fR \fIbytes\fP
+.IP
+when limiting upload rate, how many \fIbytes\fP to send at a time (defaults to 1460)
+.PP
+\fB\-\-request_backlog\fR \fInumber\fP
+.IP
+maximum \fInumber\fP of requests to keep in a single pipe at once. (defaults to 10)
+.PP
+\fB\-\-max_message_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP prefix encoding you'll accept over the wire - larger values get the
+connection dropped. (defaults to 8388608)
+.PP
+\fB\-\-ip\fR \fIip\fP
+.IP
+\fIip\fP to report you have to the tracker. (defaults to '')
+.PP
+\fB\-\-minport\fR \fIport\fP
+.IP
+minimum \fIport\fP to listen on, counts up if unavailable (defaults to 10000)
+.PP
+\fB\-\-maxport\fR \fIport\fP
+.IP
+maximum \fIport\fP to listen on (defaults to 60000)
+.PP
+\fB\-\-random_port\fR \fI 0 | 1 \fP
+.IP
+whether to choose randomly inside the port range instead of counting up linearly
+(defaults to 1)
+.PP
+\fB\-\-responsefile\fR \fIfile\fP
+.IP
+\fIfile\fP the server response was stored in, alternative to url (defaults to '')
+.PP
+\fB\-\-url\fR \fIURL\fP
+.IP
+\fIURL\fP to get file from, alternative to responsefile (defaults to '')
+.PP
+\fB\-\-selector_enabled\fR \fI 0 | 1 \fP
+.IP
+whether to enable the file selector and fast resume function (defaults to 1)
+.PP
+\fB\-\-expire_cache_data\fR \fIdays\fP
+.IP
+the number of \fIdays\fP after which you wish to expire old cache data (0 = disabled) (defaults
+to 10)
+.PP
+\fB\-\-priority\fR \fI -1|0|1|2\fP[\fI,-1|0|1|2\fP]
+.IP
+a list of file priorities separated by commas, must be one per file, 0 = highest, 1 =
+normal, 2 = lowest, \fB\-1\fR = download disabled (defaults to ''), order is based
+on the file/torrent order as shown by btshowmetainfo. For example, to download only
+the third of four files use: --priority -1,-1,2,-1
+.PP
+\fB\-\-saveas\fR \fIfilename\fP
+.IP
+local \fIfilename\fP to save the file as, null indicates query user (defaults to '')
+.PP
+\fB\-\-timeout\fR \fseconds\fP
+.IP
+time in \fIseconds\fP to wait between closing sockets which nothing has been received on (defaults to
+300.0)
+.PP
+\fB\-\-timeout_check_interval\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP to wait between checking if any connections have timed out (defaults to 60.0)
+.PP
+\fB\-\-max_slice_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP slice to send to peers, larger requests are ignored (defaults to 131072)
+.PP
+\fB\-\-max_rate_period\fR \fIseconds\fP
+.IP
+maximum amount of time in \fIseconds\fP to guess the current rate estimate represents (defaults to 20.0)
+.PP
+\fB\-\-bind\fR \fIip | hostname\fP[\fI,ip | hostname\fP]
+.IP
+comma-separated list of \fIips/hostnames\fP to bind to locally (defaults to '')
+.PP
+\fB\-\-ipv6_enabled\fR \fI 0 | 1 \fP
+.IP
+allow the client to connect to peers via IPv6 (defaults to 0)
+.PP
+\fB\-\-ipv6_binds_v4\fR \fI 0 | 1 \fP
+.IP
+set if an IPv6 server socket will also field IPv4 connections (defaults to 1)
+.PP
+\fB\-\-upnp_nat_access\fR \fI 0 | 1 | 2 \fP
+.IP
+attempt to autoconfigure a UPnP router to forward a server port (0 = disabled, 1 = mode 1
+[fast], 2 = mode 2 [slow]) (defaults to 1)
+.PP
+\fB\-\-upload_rate_fudge\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP equivalent of writing to kernel-level TCP buffer, for rate adjustment (defaults to
+5.0)
+.PP
+\fB\-\-tcp_ack_fudge\fR \fIoverhead\fP
+.IP
+how much TCP ACK download \fIoverhead\fP to add to upload rate calculations (0 = disabled)
+(defaults to 0.029999999999999999)
+.PP
+\fB\-\-display_interval\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP between updates of displayed information (defaults to 0.5)
+.PP
+\fB\-\-rerequest_interval\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP to wait between requesting more peers (defaults to 300)
+.PP
+\fB\-\-min_peers\fR \fInumber\fP
+.IP
+minimum \fInumber\fP of peers to not do rerequesting (defaults to 20)
+.PP
+\fB\-\-http_timeout\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to wait before assuming that an http connection has timed out (defaults
+to 60)
+.PP
+\fB\-\-max_initiate\fR \fInumber\fP
+.IP
+\fInumber\fP of peers at which to stop initiating new connections (defaults to 40)
+.PP
+\fB\-\-check_hashes\fR \fI 0 | 1 \fP
+.IP
+whether to check hashes on disk (defaults to 1)
+.PP
+\fB\-\-max_upload_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to upload at (0 = no limit, \fB\-1\fR = automatic) (defaults to 0)
+.PP
+\fB\-\-max_download_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to download at (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-alloc_type\fR \fInormal | background | pre-allocate | sparse\fP
+.IP
+allocation type (may be \fInormal\fP, \fIbackground\fP, \fIpre-allocate\fP or \fIsparse\fP) (defaults to
+\&'\fInormal\fP')
+.PP
+\fB\-\-alloc_rate\fR \fIMiB/s\fP
+.IP
+rate (in \fIMiB/s\fP) to allocate space at using background allocation (defaults to 2.0)
+.PP
+\fB\-\-buffer_reads\fR \fI 0 | 1 \fP
+.IP
+whether to buffer disk reads (defaults to 1)
+.PP
+\fB\-\-write_buffer_size\fR \fImegabytes\fP
+.IP
+the maximum amount of space to use for buffering disk writes (in \fImegabytes\fP, 0 = disabled)
+(defaults to 4)
+.PP
+\fB\-\-snub_time\fR \fIseconds\fP
+.IP
+\fIseconds\fP to wait for data to come in over a connection before assuming it's
+semi-permanently choked (defaults to 30.0)
+.PP
+\fB\-\-spew\fR \fI 0 | 1 \fP
+.IP
+whether to display diagnostic info to stdout (defaults to 0)
+.PP
+\fB\-\-rarest_first_cutoff\fR \fInumber\fP
+.IP
+\fInumber\fP of downloads at which to switch from random to rarest first (defaults to 2)
+.PP
+\fB\-\-rarest_first_priority_cutoff\fR \fInumber\fP
+.IP
+the \fInumber\fP of peers which need to have a piece before other partials take priority over
+rarest first (defaults to 5)
+.PP
+\fB\-\-min_uploads\fR \fInumber\fP
+.IP
+the \fInumber\fP of uploads to fill out to with extra optimistic unchokes (defaults to 4)
+.PP
+\fB\-\-max_files_open\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of files to keep open at a time, 0 means no limit (defaults to 50)
+.PP
+\fB\-\-round_robin_period\fR \fIseconds\fP
+.IP
+the number of \fIseconds\fP between the client's switching upload targets (defaults to 30)
+.PP
+\fB\-\-super_seeder\fR \fI 0 | 1 \fP
+.IP
+whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)
+(defaults to 0)
+.PP
+\fB\-\-security\fR \fI 0 | 1 \fP
+.IP
+whether to enable extra security features intended to prevent abuse (defaults to 1)
+.PP
+\fB\-\-max_connections\fR \fInumber\fP
+.IP
+the absolute maximum \fInumber\fP of peers to connect with (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-auto_kick\fR \fI 0 | 1 \fP
+.IP
+whether to allow the client to automatically kick/ban peers that send bad data (defaults
+to 1)
+.PP
+\fB\-\-double_check\fR \fI 0 | 1 \fP
+.IP
+whether to double-check data being written to the disk for errors (may increase CPU load)
+(defaults to 1)
+.PP
+\fB\-\-triple_check\fR \fI 0 | 1 \fP
+.IP
+whether to thoroughly check data being written to the disk (may slow disk access)
+(defaults to 0)
+.PP
+\fB\-\-lock_files\fR \fI 0 | 1 \fP
+.IP
+whether to lock files the client is working with (defaults to 1)
+.PP
+\fB\-\-lock_while_reading\fR \fI 0 | 1 \fP
+.IP
+whether to lock access to files being read (defaults to 0)
+.PP
+\fB\-\-auto_flush\fR \fIminutes\fP
+.IP
+\fIminutes\fP between automatic flushes to disk (0 = disabled) (defaults to 0)
+.PP
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR bittorrent-multi-downloader (1),
+.BR btdownloadcurses (1),
+.BR btdownloadheadless (1).
+.br
+.SH AUTHOR
+This manual page was written by Micah Anderson <micah at riseup.net>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btdownloadheadless.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btdownloadheadless.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btdownloadheadless.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btdownloadheadless.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,272 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTDOWNLOADHEADLESS "1" "August 2004" "btdownloadheadless (bittornado)" "User Commands"
+.SH NAME
+btdownloadheadless \- headless bittornado download interface
+.SH DESCRIPTION
+Downloads torrents in the same manner as btdownloadcurses or
+btdownloadgui, but does not require a terminal to stay alive.
+.SH OPTIONS
+This program follows the usual GNU command-line syntax, with long options
+starting with two dashes ('-'). A summary of options is included below.
+
+.PP
+\fB\-\-max_uploads\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of uploads to allow at once. (defaults to 7)
+.PP
+\fB\-\-keepalive_interval\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to pause between sending keepalives (defaults to
+120.0)
+.PP
+\fB\-\-download_slice_size\fR \fIbytes\fP
+.IP
+How many \fIbytes\fP to query for per request. (defaults to 16384)
+.PP
+\fB\-\-upload_unit_size\fR \fIbytes\fP
+.IP
+when limiting upload rate, how many \fIbytes\fP to send at a time (defaults
+to 1460)
+.PP
+\fB\-\-request_backlog\fR \fInumber\fP
+.IP
+maximum \fInumber\fP of requests to keep in a single pipe at once.
+(defaults to 10)
+.PP
+\fB\-\-max_message_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP prefix encoding you'll accept over the wire - larger
+values get the connection dropped. (defaults to 8388608)
+.PP
+\fB\-\-ip\fR \fIip\fP
+.IP
+\fIip\fP to report you have to the tracker. (defaults to '')
+.PP
+\fB\-\-minport\fR \fIport\fP
+.IP
+minimum \fIport\fP to listen on, counts up if unavailable (defaults to
+10000)
+.PP
+\fB\-\-maxport\fR \fIport\fP
+.IP
+maximum \fIport\fP to listen on (defaults to 60000)
+.PP
+\fB\-\-random_port\fR \fI 0 | 1 \fP
+.IP
+whether to choose randomly inside the port range instead of counting
+up linearly (defaults to 1)
+.PP
+\fB\-\-responsefile\fR \fIfile\fP
+.IP
+\fIfile\fP the server response was stored in, alternative to url (defaults
+to '')
+.PP
+\fB\-\-url\fR \fIURL\fP
+.IP
+\fIURL\fP to get file from, alternative to responsefile (defaults to '')
+.PP
+\fB\-\-selector_enabled\fR \fI 0 | 1 \fP
+.IP
+whether to enable the file selector and fast resume function
+(defaults to 1)
+.PP
+\fB\-\-expire_cache_data\fR \fIdays\fP
+.IP
+the number of \fIdays\fP after which you wish to expire old cache data (0 =
+disabled) (defaults to 10)
+.PP
+\fB\-\-priority\fR \fI -1|0|1|2[,-1|0|1|2] \fP
+.IP
+a list of file priorities separated by commas, must be one per file, 0 = highest, 1 =
+normal, 2 = lowest, \fB\-1\fR = download disabled (defaults to ''). Order is based
+on the file/torrent order as shown by btshowmetainfo. For example, to download only
+the third of four files use: --priority -1,-1,2,-1
+.PP
+\fB\-\-priority\fR \fI -1|0|1|2[,-1|0|1|2] \fP
+.IP
+a list of file priorities separated by commas, must be one per file, 0 = highest, 1 =
+normal, 2 = lowest, \fB\-1\fR = download disabled (defaults to ''). Order is based
+on the file/torrent order as shown by btshowmetainfo. For example, to download only
+the third of four files use: --priority -1,-1,2,-1
+.PP
+\fB\-\-saveas\fR \fIfilename\fP
+.IP
+local \fIfilename\fP to save the file as, null indicates query user (defaults to '')
+.PP
+\fB\-\-timeout\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP to wait between closing sockets which nothing has been received on (defaults to
+300.0)
+.PP
+\fB\-\-timeout_check_interval\fR \fIseconds\fP
+.IP
+time to wait in \fIseconds\fP between checking if any connections have timed out (defaults to 60.0)
+.PP
+\fB\-\-max_slice_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP slice to send to peers, larger requests are ignored (defaults to 131072)
+.PP
+\fB\-\-max_rate_period\fR \fIseconds\fP
+.IP
+maximum amount of time in \fIseconds\fP to guess the current rate estimate represents (defaults to 20.0)
+.PP
+\fB\-\-bind\fR \fIip[,hostname]\fP
+.IP
+comma-separated list of \fIips/hostnames\fP to bind to locally (defaults to '')
+.PP
+\fB\-\-ipv6_enabled\fR \fI 0 | 1 \fP
+.IP
+allow the client to connect to peers via IPv6 (defaults to 0)
+.PP
+\fB\-\-ipv6_binds_v4\fR \fI 0 | 1 \fP
+.IP
+set if an IPv6 server socket will also field IPv4 connections (defaults to 1)
+.PP
+\fB\-\-upnp_nat_access\fR \fI 0 | 1 | 2 \fP
+.IP
+attempt to autoconfigure a UPnP router to forward a server port (0 = disabled, 1 = mode 1
+[fast], 2 = mode 2 [slow]) (defaults to 1)
+.PP
+\fB\-\-upload_rate_fudge\fR \fIseconds\fP
+.IP
+time equivalent in \fIseconds\fP of writing to kernel-level TCP buffer, for rate adjustment (defaults to
+5.0)
+.PP
+\fB\-\-tcp_ack_fudge\fR \fIoverhead\fP
+.IP
+how much TCP ACK download \fIoverhead\fP to add to upload rate calculations (0 = disabled)
+(defaults to 0.029999999999999999)
+.PP
+\fB\-\-display_interval\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP between updates of displayed information (defaults to 0.5)
+.PP
+\fB\-\-rerequest_interval\fR \fIseconds\fP
+.IP
+time to wait, in \fIseconds\fP, between requesting more peers (defaults to 300)
+.PP
+\fB\-\-min_peers\fR \fInumber\fP
+.IP
+minimum \fInumber\fP of peers to not do rerequesting (defaults to 20)
+.PP
+\fB\-\-http_timeout\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to wait before assuming that an http connection has timed out (defaults
+to 60)
+.PP
+\fB\-\-max_initiate\fR \fInumber\fP
+.IP
+\fInumber\fP of peers at which to stop initiating new connections (defaults to 40)
+.PP
+\fB\-\-check_hashes\fR \fI 0 | 1 \fP
+.IP
+whether to check hashes on disk (defaults to 1)
+.PP
+\fB\-\-max_upload_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to upload at (0 = no limit, \fB\-1\fR = automatic) (defaults to 0)
+.PP
+\fB\-\-max_download_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to download at (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-alloc_type\fR \fInormal | background | pre-allocate | sparse\fP
+.IP
+allocation type (may be normal, background, pre-allocate or sparse) (defaults to
+\&'normal')
+.PP
+\fB\-\-alloc_rate\fR \fIMiB/s\fP
+.IP
+rate (in \fIMiB/s\fP) to allocate space at using background allocation (defaults to 2.0)
+.PP
+\fB\-\-buffer_reads\fR \fI 0 | 1 \fP
+.IP
+whether to buffer disk reads (defaults to 1)
+.PP
+\fB\-\-write_buffer_size\fR \fIspace\fP
+.IP
+the maximum amount of \fIspace\fP to use for buffering disk writes (in megabytes, 0 = disabled)
+(defaults to 4)
+.PP
+\fB\-\-snub_time\fR \fIseconds\fP
+.IP
+\fIseconds\fP to wait for data to come in over a connection before assuming it's
+semi-permanently choked (defaults to 30.0)
+.PP
+\fB\-\-spew\fR \fI 0 | 1 \fP
+.IP
+whether to display diagnostic info to stdout (defaults to 0)
+.PP
+\fB\-\-rarest_first_cutoff\fR \fInumber\fP
+.IP
+\fInumber\fP of downloads at which to switch from random to rarest first (defaults to 2)
+.PP
+\fB\-\-rarest_first_priority_cutoff\fR \fInumber\fP
+.IP
+the \fInumber\fP of peers which need to have a piece before other partials take priority over
+rarest first (defaults to 5)
+.PP
+\fB\-\-min_uploads\fR \fInumber\fP
+.IP
+the \fInumber\fP of uploads to fill out to with extra optimistic unchokes (defaults to 4)
+.PP
+\fB\-\-max_files_open\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of files to keep open at a time, 0 means no limit (defaults to 50)
+.PP
+\fB\-\-round_robin_period\fR \fIseconds\fP
+.IP
+the number of \fIseconds\fP between the client's switching upload targets (defaults to 30)
+.PP
+\fB\-\-super_seeder\fR \fI 0 | 1 \fP
+.IP
+whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)
+(defaults to 0)
+.PP
+\fB\-\-security\fR \fI 0 | 1 \fP
+.IP
+whether to enable extra security features intended to prevent abuse (defaults to 1)
+.PP
+\fB\-\-max_connections\fR \fInumber\fP
+.IP
+the absolute maximum \fInumber\fP of peers to connect with (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-auto_kick\fR \fI 0 | 1 \fP
+.IP
+whether to allow the client to automatically kick/ban peers that send bad data (defaults
+to 1)
+.PP
+\fB\-\-double_check\fR \fI 0 | 1 \fP
+.IP
+whether to double-check data being written to the disk for errors (may increase CPU load)
+(defaults to 1)
+.PP
+\fB\-\-triple_check\fR \fI 0 | 1 \fP
+.IP
+whether to thoroughly check data being written to the disk (may slow disk access)
+(defaults to 0)
+.PP
+\fB\-\-lock_files\fR \fI 0 | 1 \fP
+.IP
+whether to lock files the client is working with (defaults to 1)
+.PP
+\fB\-\-lock_while_reading\fR \fI 0 | 1 \fP
+.IP
+whether to lock access to files being read (defaults to 0)
+.PP
+\fB\-\-auto_flush\fR \fIminutes\fP
+.IP
+\fIminutes\fP between automatic flushes to disk (0 = disabled) (defaults to 0)
+.PP
+\fB\-\-save_options\fR <arg>
+.IP
+whether to save the current options as the new default configuration
+(only for btdownloadheadless.py) (defaults to 0)
+.PP
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btmakemetafile (1).
+.br
+.SH AUTHOR
+This manual page was written by Micah Anderson <micah at riseup.net>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btlaunchmany.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btlaunchmany.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btlaunchmany.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btlaunchmany.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,278 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTLAUNCHMANY "1" "August 2004" "btlaunchmany (bttornado)" 
+.SH NAME
+Btlaunchmany \- manual page for btlaunchmany
+.SH SYNOPSIS
+.B btlaunchmany
+\fI<directory> <global options>\fR
+.SH DESCRIPTION
+<directory> - directory to look for .torrent files (semi-recursive)
+.SH OPTIONS
+.PP
+This program follows the usual GNU command-line syntax, with long options
+starting with two dashes ('-'). A summary of options is included below.
+
+.PP
+\fB\-\-max_uploads\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of uploads to allow at once. (defaults to 7)
+.PP
+\fB\-\-keepalive_interval\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to pause between sending keepalives (defaults to 120.0)
+.PP
+\fB\-\-download_slice_size\fR \fIbytes\fP
+.IP
+How many \fIbytes\fP to query for per request. (defaults to 16384)
+.PP
+\fB\-\-upload_unit_size\fR \fIbytes\fP
+.IP
+when limiting upload rate, how many \fIbytes\fP to send at a time (defaults to 1460)
+.PP
+\fB\-\-request_backlog\fR \fInumber\fP
+.IP
+maximum \fInumber\fP of requests to keep in a single pipe at once. (defaults to 10)
+.PP
+\fB\-\-max_message_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP prefix encoding you'll accept over the wire - larger values get the
+connection dropped. (defaults to 8388608)
+.PP
+\fB\-\-ip\fR \fIip\fP
+.IP
+\fIip\fP to report you have to the tracker. (defaults to '')
+.PP
+\fB\-\-minport\fR \fIportnum\fP
+.IP
+set \fIportnum\fP as the minimum port to listen on, counts up if unavailable (defaults to 10000)
+.PP
+\fB\-\-maxport\fR \fIportnum\fP
+.IP
+set \fIportnum\fP as the maximum port to listen on (defaults to 60000)
+.PP
+\fB\-\-random_port\fR \fI 0 | 1 \fP
+.IP
+whether to choose randomly inside the port range instead of counting up linearly
+(defaults to 1)
+.PP
+\fB\-\-responsefile\fR \fIfile\fP
+.IP
+\fIfile\fP the server response was stored in, alternative to url (defaults to '')
+.PP
+\fB\-\-url\fR \fIURL\fP
+.IP
+\fIURL\fP to get file from, alternative to responsefile (defaults to '')
+.PP
+\fB\-\-selector_enabled\fR \fI 0 | 1 \fP
+.IP
+whether to enable the file selector and fast resume function (defaults to 1)
+.PP
+\fB\-\-expire_cache_data\fR \fIdays\fP
+.IP
+the number of \fIdays\fP after which you wish to expire old cache data (0 = disabled) (defaults
+to 10)
+.PP
+\fB\-\-priority\fR \fI -1|0|1|2[,-1|0|1|2] \fP
+.IP
+a list of file priorities separated by commas, must be one per file, 0 = highest, 1 =
+normal, 2 = lowest, \fB\-1\fR = download disabled (defaults to ''). Order is based
+on the file/torrent order as shown by btshowmetainfo. For example, to download only
+the third of four files use: --priority -1,-1,2,-1
+.PP
+\fB\-\-saveas\fR \fIfilename\fP
+.IP
+local \fIfilename\fP to save the file as, null indicates query user (defaults to '')
+.PP
+\fB\-\-timeout\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP to wait between closing sockets which nothing has been received on (defaults to
+300.0)
+.PP
+\fB\-\-timeout_check_interval\fR \fIseconds\fP
+.IP
+time to wait in \fIseconds\fP between checking if any connections have timed out (defaults to 60.0)
+.PP
+\fB\-\-max_slice_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP slice to send to peers, larger requests are ignored (defaults to 131072)
+.PP
+\fB\-\-max_rate_period\fR \fIseconds\fP
+.IP
+maximum amount of time in \fIseconds\fP to guess the current rate estimate represents (defaults to 20.0)
+.PP
+\fB\-\-bind\fR \fIip[,hostname]\fP
+.IP
+comma-separated list of \fIips/hostnames\fP to bind to locally (defaults to '')
+.PP
+\fB\-\-ipv6_enabled\fR \fI 0 | 1 \fP
+.IP
+allow the client to connect to peers via IPv6 (defaults to 0)
+.PP
+\fB\-\-ipv6_binds_v4\fR \fI 0 | 1 \fP
+.IP
+set if an IPv6 server socket will also field IPv4 connections (defaults to 1)
+.PP
+\fB\-\-upnp_nat_access\fR \fI 0 | 1 | 2 \fP
+.IP
+attempt to autoconfigure a UPnP router to forward a server port (0 = disabled, 1 = mode 1
+[fast], 2 = mode 2 [slow]) (defaults to 1)
+.PP
+\fB\-\-upload_rate_fudge\fR \fIseconds\fP
+.IP
+time equivalent in \fIseconds\fP of writing to kernel-level TCP buffer, for rate adjustment (defaults to
+5.0)
+.PP
+\fB\-\-tcp_ack_fudge\fR \fIoverhead\fP
+.IP
+how much TCP ACK download \fIoverhead\fP to add to upload rate calculations (0 = disabled)
+(defaults to 0.029999999999999999)
+.PP
+\fB\-\-display_interval\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP between updates of displayed information (defaults to 0.5)
+.PP
+\fB\-\-rerequest_interval\fR \fIseconds\fP
+.IP
+time to wait, in \fIseconds\fP, between requesting more peers (defaults to 300)
+.PP
+\fB\-\-min_peers\fR \fInumber\fP
+.IP
+minimum \fInumber\fP of peers to not do rerequesting (defaults to 20)
+.PP
+\fB\-\-http_timeout\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to wait before assuming that an http connection has timed out (defaults
+to 60)
+.PP
+\fB\-\-max_initiate\fR \fInumber\fP
+.IP
+\fInumber\fP of peers at which to stop initiating new connections (defaults to 40)
+.PP
+\fB\-\-check_hashes\fR \fI 0 | 1 \fP
+.IP
+whether to check hashes on disk (defaults to 1)
+.PP
+\fB\-\-max_upload_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to upload at (0 = no limit, \fB\-1\fR = automatic) (defaults to 0)
+.PP
+\fB\-\-max_download_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to download at (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-alloc_type\fR \fInormal | background | pre-allocate | sparse\fP
+.IP
+allocation type (may be normal, background, pre-allocate or sparse) (defaults to
+\&'normal')
+.PP
+\fB\-\-alloc_rate\fR \fIMiB/s\fP
+.IP
+rate (in \fIMiB/s\fP) to allocate space at using background allocation (defaults to 2.0)
+.PP
+\fB\-\-buffer_reads\fR \fI 0 | 1 \fP
+.IP
+whether to buffer disk reads (defaults to 1)
+.PP
+\fB\-\-write_buffer_size\fR \fIspace\fP
+.IP
+the maximum amount of \fIspace\fP to use for buffering disk writes (in megabytes, 0 = disabled)
+(defaults to 4)
+.PP
+\fB\-\-snub_time\fR \fIseconds\fP
+.IP
+\fIseconds\fP to wait for data to come in over a connection before assuming it's
+semi-permanently choked (defaults to 30.0)
+.PP
+\fB\-\-spew\fR \fI 0 | 1 \fP
+.IP
+whether to display diagnostic info to stdout (defaults to 0)
+.PP
+\fB\-\-rarest_first_cutoff\fR \fInumber\fP
+.IP
+\fInumber\fP of downloads at which to switch from random to rarest first (defaults to 2)
+.PP
+\fB\-\-rarest_first_priority_cutoff\fR \fInumber\fP
+.IP
+the \fInumber\fP of peers which need to have a piece before other partials take priority over
+rarest first (defaults to 5)
+.PP
+\fB\-\-min_uploads\fR \fInumber\fP
+.IP
+the \fInumber\fP of uploads to fill out to with extra optimistic unchokes (defaults to 4)
+.PP
+\fB\-\-max_files_open\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of files to keep open at a time, 0 means no limit (defaults to 50)
+.PP
+\fB\-\-round_robin_period\fR \fIseconds\fP
+.IP
+the number of \fIseconds\fP between the client's switching upload targets (defaults to 30)
+.PP
+\fB\-\-super_seeder\fR \fI 0 | 1 \fP
+.IP
+whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)
+(defaults to 0)
+.PP
+\fB\-\-security\fR \fI 0 | 1 \fP
+.IP
+whether to enable extra security features intended to prevent abuse (defaults to 1)
+.PP
+\fB\-\-max_connections\fR \fInumber\fP
+.IP
+the absolute maximum \fInumber\fP of peers to connect with (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-auto_kick\fR \fI 0 | 1 \fP
+.IP
+whether to allow the client to automatically kick/ban peers that send bad data (defaults
+to 1)
+.PP
+\fB\-\-double_check\fR \fI 0 | 1 \fP
+.IP
+whether to double-check data being written to the disk for errors (may increase CPU load)
+(defaults to 1)
+.PP
+\fB\-\-triple_check\fR \fI 0 | 1 \fP
+.IP
+whether to thoroughly check data being written to the disk (may slow disk access)
+(defaults to 0)
+.PP
+\fB\-\-lock_files\fR \fI 0 | 1 \fP
+.IP
+whether to lock files the client is working with (defaults to 1)
+.PP
+\fB\-\-lock_while_reading\fR \fI 0 | 1 \fP
+.IP
+whether to lock access to files being read (defaults to 0)
+.PP
+\fB\-\-auto_flush\fR \fIminutes\fP
+.IP
+\fIminutes\fP between automatic flushes to disk (0 = disabled) (defaults to 0)
+.PP
+\fB\-\-parse_dir_interval\fR \fIseconds\fP
+.IP
+how often to rescan the torrent directory, in \fIseconds\fP (defaults to
+60)
+.PP
+\fB\-\-saveas_style\fR \fI 1 | 2 | 3 \fP
+.IP
+How to name torrent downloads (1 = rename to torrent name, 2 = save
+under name in torrent, 3 = save in directory under torrent name)
+(defaults to 1)
+.PP
+\fB\-\-display_path\fR \fI 0 | 1 \fP
+.IP
+whether to display the full path or the torrent contents for each
+torrent (defaults to 1)
+.PP
+\fB\-\-save_options\fR \fI 0 | 1 \fP
+.IP
+whether to save the current options as the new default configuration
+(only for btlaunchmany) (defaults to 0)
+.PP
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btmakemetafile (1).
+.br
+.SH AUTHOR
+This manual page was written by Micah Anderson <micah at riseup.net>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btlaunchmanycurses.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btlaunchmanycurses.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btlaunchmanycurses.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btlaunchmanycurses.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,280 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTLAUNCHMANYCURSES "1" "August 2004" "btlaunchmanycurses (bittornado)"
+.SH NAME
+Btlaunchmanycurses \- launch multi torrent downloads, curses interface
+.SH SYNOPSIS
+.B btlaunchmanycurses
+\fI<directory> <global options>\fR
+.SH DESCRIPTION
+Launches a separate torrent download thread for each .torrent file in
+the directory specified by <directory>, using the curses interface.
+.SH OPTIONS
+This program follows the usual GNU command-line syntax, with long options
+starting with two dashes ('-'). A summary of options is included below.
+.PP
+<directory> - directory to look for .torrent files (semi-recursive)
+.PP
+\fB\-\-max_uploads\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of uploads to allow at once. (defaults to 7)
+.PP
+\fB\-\-keepalive_interval\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to pause between sending keepalives (defaults to 120.0)
+.PP
+\fB\-\-download_slice_size\fR \fIbytes\fP
+.IP
+How many \fIbytes\fP to query for per request. (defaults to 16384)
+.PP
+\fB\-\-upload_unit_size\fR \fIbytes\fP
+.IP
+when limiting upload rate, how many \fIbytes\fP to send at a time (defaults to 1460)
+.PP
+\fB\-\-request_backlog\fR \fInumber\fP
+.IP
+maximum \fInumber\fP of requests to keep in a single pipe at once. (defaults to 10)
+.PP
+\fB\-\-max_message_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP prefix encoding you'll accept over the wire - larger values get the
+connection dropped. (defaults to 8388608)
+.PP
+\fB\-\-ip\fR \fIip\fP
+.IP
+\fIip\fP to report you have to the tracker. (defaults to '')
+.PP
+\fB\-\-minport\fR \fIportnum\fP
+.IP
+set \fIportnum\fP as the minimum port to listen on, counts up if unavailable (defaults to 10000)
+.PP
+\fB\-\-maxport\fR \fIportnum\fP
+.IP
+set \fIportnum\fP as the maximum port to listen on (defaults to 60000)
+.PP
+\fB\-\-random_port\fR \fI 0 | 1 \fP
+.IP
+whether to choose randomly inside the port range instead of counting up linearly
+(defaults to 1)
+.PP
+\fB\-\-responsefile\fR \fIfile\fP
+.IP
+\fIfile\fP the server response was stored in, alternative to url (defaults to '')
+.PP
+\fB\-\-url\fR \fIURL\fP
+.IP
+\fIURL\fP to get file from, alternative to responsefile (defaults to '')
+.PP
+\fB\-\-selector_enabled\fR \fI 0 | 1 \fP
+.IP
+whether to enable the file selector and fast resume function (defaults to 1)
+.PP
+\fB\-\-expire_cache_data\fR \fIdays\fP
+.IP
+the number of \fIdays\fP after which you wish to expire old cache data (0 = disabled) (defaults
+to 10)
+.PP
+\fB\-\-priority\fR \fI -1|0|1|2[,-1|0|1|2] \fP
+.IP
+a list of file priorities separated by commas, must be one per file, 0 = highest, 1 =
+normal, 2 = lowest, \fB\-1\fR = download disabled (defaults to ''). Order is based
+on the file/torrent order as shown by btshowmetainfo. For example, to download only
+the third of four files use: --priority -1,-1,2,-1
+.PP
+\fB\-\-saveas\fR \fIfilename\fP
+.IP
+local \fIfilename\fP to save the file as, null indicates query user (defaults to '')
+.PP
+\fB\-\-timeout\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP to wait between closing sockets which nothing has been received on (defaults to
+300.0)
+.PP
+\fB\-\-timeout_check_interval\fR \fIseconds\fP
+.IP
+time to wait in \fIseconds\fP between checking if any connections have timed out (defaults to 60.0)
+.PP
+\fB\-\-max_slice_length\fR \fIlength\fP
+.IP
+maximum \fIlength\fP slice to send to peers, larger requests are ignored (defaults to 131072)
+.PP
+\fB\-\-max_rate_period\fR \fIseconds\fP
+.IP
+maximum amount of time in \fIseconds\fP to guess the current rate estimate represents (defaults to 20.0)
+.PP
+\fB\-\-bind\fR \fIip[,hostname]\fP
+.IP
+comma-separated list of \fIips/hostnames\fP to bind to locally (defaults to '')
+.PP
+\fB\-\-ipv6_enabled\fR \fI 0 | 1 \fP
+.IP
+allow the client to connect to peers via IPv6 (defaults to 0)
+.PP
+\fB\-\-ipv6_binds_v4\fR \fI 0 | 1 \fP
+.IP
+set if an IPv6 server socket will also field IPv4 connections (defaults to 1)
+.PP
+\fB\-\-upnp_nat_access\fR \fI 0 | 1 | 2 \fP
+.IP
+attempt to autoconfigure a UPnP router to forward a server port (0 = disabled, 1 = mode 1
+[fast], 2 = mode 2 [slow]) (defaults to 1)
+.PP
+\fB\-\-upload_rate_fudge\fR \fIseconds\fP
+.IP
+time equivalent in \fIseconds\fP of writing to kernel-level TCP buffer, for rate adjustment (defaults to
+5.0)
+.PP
+\fB\-\-tcp_ack_fudge\fR \fIoverhead\fP
+.IP
+how much TCP ACK download \fIoverhead\fP to add to upload rate calculations (0 = disabled)
+(defaults to 0.029999999999999999)
+.PP
+\fB\-\-display_interval\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP between updates of displayed information (defaults to 0.5)
+.PP
+\fB\-\-rerequest_interval\fR \fIseconds\fP
+.IP
+time to wait, in \fIseconds\fP, between requesting more peers (defaults to 300)
+.PP
+\fB\-\-min_peers\fR \fInumber\fP
+.IP
+minimum \fInumber\fP of peers to not do rerequesting (defaults to 20)
+.PP
+\fB\-\-http_timeout\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP to wait before assuming that an http connection has timed out (defaults
+to 60)
+.PP
+\fB\-\-max_initiate\fR \fInumber\fP
+.IP
+\fInumber\fP of peers at which to stop initiating new connections (defaults to 40)
+.PP
+\fB\-\-check_hashes\fR \fI 0 | 1 \fP
+.IP
+whether to check hashes on disk (defaults to 1)
+.PP
+\fB\-\-max_upload_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to upload at (0 = no limit, \fB\-1\fR = automatic) (defaults to 0)
+.PP
+\fB\-\-max_download_rate\fR \fIkB/s\fP
+.IP
+maximum \fIkB/s\fP to download at (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-alloc_type\fR \fInormal | background | pre-allocate | sparse\fP
+.IP
+allocation type (may be normal, background, pre-allocate or sparse) (defaults to
+\&'normal')
+.PP
+\fB\-\-alloc_rate\fR \fIMiB/s\fP
+.IP
+rate (in \fIMiB/s\fP) to allocate space at using background allocation (defaults to 2.0)
+.PP
+\fB\-\-buffer_reads\fR \fI 0 | 1 \fP
+.IP
+whether to buffer disk reads (defaults to 1)
+.PP
+\fB\-\-write_buffer_size\fR \fIspace\fP
+.IP
+the maximum amount of \fIspace\fP to use for buffering disk writes (in megabytes, 0 = disabled)
+(defaults to 4)
+.PP
+\fB\-\-snub_time\fR \fIseconds\fP
+.IP
+\fIseconds\fP to wait for data to come in over a connection before assuming it's
+semi-permanently choked (defaults to 30.0)
+.PP
+\fB\-\-spew\fR \fI 0 | 1 \fP
+.IP
+whether to display diagnostic info to stdout (defaults to 0)
+.PP
+\fB\-\-rarest_first_cutoff\fR \fInumber\fP
+.IP
+\fInumber\fP of downloads at which to switch from random to rarest first (defaults to 2)
+.PP
+\fB\-\-rarest_first_priority_cutoff\fR \fInumber\fP
+.IP
+the \fInumber\fP of peers which need to have a piece before other partials take priority over
+rarest first (defaults to 5)
+.PP
+\fB\-\-min_uploads\fR \fInumber\fP
+.IP
+the \fInumber\fP of uploads to fill out to with extra optimistic unchokes (defaults to 4)
+.PP
+\fB\-\-max_files_open\fR \fInumber\fP
+.IP
+the maximum \fInumber\fP of files to keep open at a time, 0 means no limit (defaults to 50)
+.PP
+\fB\-\-round_robin_period\fR \fIseconds\fP
+.IP
+the number of \fIseconds\fP between the client's switching upload targets (defaults to 30)
+.PP
+\fB\-\-super_seeder\fR \fI 0 | 1 \fP
+.IP
+whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)
+(defaults to 0)
+.PP
+\fB\-\-security\fR \fI 0 | 1 \fP
+.IP
+whether to enable extra security features intended to prevent abuse (defaults to 1)
+.PP
+\fB\-\-max_connections\fR \fInumber\fP
+.IP
+the absolute maximum \fInumber\fP of peers to connect with (0 = no limit) (defaults to 0)
+.PP
+\fB\-\-auto_kick\fR \fI 0 | 1 \fP
+.IP
+whether to allow the client to automatically kick/ban peers that send bad data (defaults
+to 1)
+.PP
+\fB\-\-double_check\fR \fI 0 | 1 \fP
+.IP
+whether to double-check data being written to the disk for errors (may increase CPU load)
+(defaults to 1)
+.PP
+\fB\-\-triple_check\fR \fI 0 | 1 \fP
+.IP
+whether to thoroughly check data being written to the disk (may slow disk access)
+(defaults to 0)
+.PP
+\fB\-\-lock_files\fR \fI 0 | 1 \fP
+.IP
+whether to lock files the client is working with (defaults to 1)
+.PP
+\fB\-\-lock_while_reading\fR \fI 0 | 1 \fP
+.IP
+whether to lock access to files being read (defaults to 0)
+.PP
+\fB\-\-auto_flush\fR \fIminutes\fP
+.IP
+\fIminutes\fP between automatic flushes to disk (0 = disabled) (defaults to 0)
+.PP
+\fB\-\-parse_dir_interval\fR \fIseconds\fP
+.IP
+how often to rescan the torrent directory, in \fIseconds\fP (defaults to
+60)
+.PP
+\fB\-\-saveas_style\fR \fI 1 | 2 | 3 \fP
+.IP
+How to name torrent downloads (1 = rename to torrent name, 2 = save
+under name in torrent, 3 = save in directory under torrent name)
+(defaults to 2)
+.PP
+\fB\-\-display_path\fR \fI 0 | 1 \fP
+.IP
+whether to display the full path or the torrent contents for each
+torrent (defaults to 0)
+.PP
+\fB\-\-save_options\fR \fI 0 | 1 \fP
+.IP
+whether to save the current options as the new default configuration
+(only for btlaunchmanycurses.py) (defaults to 0)
+.PP
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btmakemetafile (1).
+.br
+.SH AUTHOR
+This manual page was written by Micah Anderson <micah at riseup.net>,
+for the Debian GNU/Linux system (but may be used by others).
+

Added: debtorrent/branches/upstream/current/docs/man/btmakemetafile.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btmakemetafile.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btmakemetafile.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btmakemetafile.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,72 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTMAKEMETAFILE "1" "September 2004" 
+.SH NAME
+Btmakemetafile \- manual page for btmakemetafile <trackerurl> <file> [file...] [params...]
+.SH SYNOPSIS
+.B btmakemetafile
+\fI<trackerurl> <file> \fR[\fIfile\fR...] [\fIparams\fR...]
+.SH DESCRIPTION
+\fBbtmakemetafile\fP is a program that is used to generate the torrent info files
+that are distributed to bittorrent clients (usually over http) to inform them
+of where the designated tracker for the torrent is located, and to enable them
+to verify the file's contents.
+.PP
+\fBbtmakemetafile\fP requires two arguments. The first is the
+"announce" address of a tracker (ex. http://127.0.0.1:6969/announce),
+the second is the file that you
+wish to generate a torrent info file for. 
+.PP
+\fB\-\-piece_size_pow2\fR \fInum\fP
+.IP
+which power of 2 to set the piece size to (0 = automatic) (defaults
+to 0
+.IP
+\fB\-\-comment\fR \fIcomment\fP
+.IP
+optional human-readable \fIcomment\fP to put in .torrent (defaults to '')
+.PP
+\fB\-\-target\fR \fIfile\fP
+.IP
+optional target \fIfile\fP for the torrent (defaults to '')
+.IP
+.P\fB\-\-announce_list\fR \fIURLs\fP 
+.IP
+a list of announce \fIURLs\fP (defaults to ''). An optional list of 
+redundant/backup tracker \fIURLs\fP, in the format:
+.IP
+url[,url...][|url[,url...]...]
+.IP
+where URLs separated by commas are all tried first
+before the next group of URLs separated by the pipe is checked.
+If none is given, it is assumed you don't want one in the metafile.
+If announce_list is given, clients which support it
+will ignore the <announce> value.
+.IP
+Examples:
+.IP
+http://tracker1.com|http://tracker2.com|http://tracker3.com
+.IP
+(tries trackers 1-3 in order)
+.IP
+http://tracker1.com,http://tracker2.com,http://tracker3.com
+.IP
+(tries trackers 1-3 in a randomly selected order)
+.IP
+http://tracker1.com|http://backup1.com,http://backup2.com
+.IP
+(tries tracker 1 first, then tries between the 2 backups randomly)
+.PP
+makes a .torrent file for every file or directory present in each dir.
+.PP
+.SH "SEE ALSO"
+http://bittornado.org
+.PP
+.SH "SEE ALSO"
+.BR bittorrent-downloader (1),
+.BR btrename (1),
+.BR btreannounce (1),
+.BR bttrack (1).
+.br
+.SH AUTHOR
+This manual page was written by Micah Anderson <micah at riseup.net>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btmaketorrentgui.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btmaketorrentgui.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btmaketorrentgui.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btmaketorrentgui.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,53 @@
+.\"                                      Hey, EMACS: -*- nroff -*-
+.\" First parameter, NAME, should be all caps
+.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
+.\" other parameters are allowed: see man(7), man(1)
+.TH "BTMAKETORRENTGUI" 1 "Sep 3 2004"
+.\" Please adjust this date whenever revising the manpage.
+.\"
+.\" Some roff macros, for reference:
+.\" .nh        disable hyphenation
+.\" .hy        enable hyphenation
+.\" .ad l      left justify
+.\" .ad b      justify to both left and right margins
+.\" .nf        disable filling
+.\" .fi        enable filling
+.\" .br        insert line break
+.\" .sp <n>    insert n+1 empty lines
+.\" for manpage-specific macros, see man(7)
+.SH NAME
+btmaketorrentgui \- program to generate torrent info files for bittorrent
+.SH SYNOPSIS
+.B btmaketorrentgui 
+.SH DESCRIPTION
+This manual page documents briefly the \fBbtmaketorrentgui\fP
+command.
+This manual page was written for the Debian distribution
+because the original program does not have a manual page.
+.PP
+.\" TeX users may be more comfortable with the \fB<whatever>\fP and
+.\" \fI<whatever>\fP escape sequences to invode bold face and italics, 
+.\" respectively.
+\fBbtmaketorrentgui\fP is a GUI interface to btmakemetafile, it generates 
+the torrent info files which are distributed to bittorrent clients 
+(usually via the WWW) in order to inform them where the designated tracker 
+for the torrent is located, and to allow them to verify the file's contents.
+.PP
+\fBbtmaketorrentgui\fP does not require arguments like btmakemetafiles does,
+everything is done in the GUI interface. The first box in the GUI is the
+file that the torrent info file will be generated for, the second is the
+"announce" address of a tracker (ex. http://my.tracker:6969/announce). The
+third box is for a list of announces, separated by commas or whitespace and on
+several lines, this is not required. Additionally, you can set a piece size
+or an optional comment.
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btrename (1),
+.BR btreannounce (1),
+.BR bttrack (1).
+.br
+.SH AUTHOR
+This manual page was written by Micah Anderson <micah at riseup.net> and was
+based on the manual page for btmakemetafile written for the bittorrent package
+by Michael Janssen <jamuraa at debian.org> for the Debian GNU/Linux system 
+(but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btreannounce.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btreannounce.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btreannounce.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btreannounce.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,44 @@
+.\"                                      Hey, EMACS: -*- nroff -*-
+.\" First parameter, NAME, should be all caps
+.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
+.\" other parameters are allowed: see man(7), man(1)
+.TH "BTREANNOUNCE" 1 "Jan 18 2003"
+.\" Please adjust this date whenever revising the manpage.
+.\"
+.\" Some roff macros, for reference:
+.\" .nh        disable hyphenation
+.\" .hy        enable hyphenation
+.\" .ad l      left justify
+.\" .ad b      justify to both left and right margins
+.\" .nf        disable filling
+.\" .fi        enable filling
+.\" .br        insert line break
+.\" .sp <n>    insert n+1 empty lines
+.\" for manpage-specific macros, see man(7)
+.SH NAME
+btreannounce \- change the announce address of a torrent file.
+.SH SYNOPSIS
+\fBbttrack\fP \fIurl\fP \fIoldtorrent\fP \fInewtorrent\fP
+.SH DESCRIPTION
+This manual page documents briefly the \fBbtreannounce\fP
+command.
+This manual page was written for the Debian distribution
+because the original program does not have a manual page.
+.PP
+.\" TeX users may be more comfortable with the \fB<whatever>\fP and
+.\" \fI<whatever>\fP escape sequences to invode bold face and italics, 
+.\" respectively.
+\fBbtreannounce\fP is a program which will change the announce 
+address of a existing torrent file.  The torrent specified by 
+the \fIoldtorrent\fP argument will be modified to use the new 
+announce url given by the \fIurl\fP argument, and the changed 
+file will be saved as \fInewtorrent\fP.
+
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btmakemetafile (1),
+.BR btrename (1).
+.br
+.SH AUTHOR
+This manual page was written by Michael Janssen <jamuraa at debian.org>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btrename.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btrename.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btrename.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btrename.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,44 @@
+.\"                                      Hey, EMACS: -*- nroff -*-
+.\" First parameter, NAME, should be all caps
+.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
+.\" other parameters are allowed: see man(7), man(1)
+.TH "BTRENAME" 1 "Jan 18 2003"
+.\" Please adjust this date whenever revising the manpage.
+.\"
+.\" Some roff macros, for reference:
+.\" .nh        disable hyphenation
+.\" .hy        enable hyphenation
+.\" .ad l      left justify
+.\" .ad b      justify to both left and right margins
+.\" .nf        disable filling
+.\" .fi        enable filling
+.\" .br        insert line break
+.\" .sp <n>    insert n+1 empty lines
+.\" for manpage-specific macros, see man(7)
+.SH NAME
+btrename \- change the suggested filename inside a bittorrent file
+.SH SYNOPSIS
+\fBbttrack\fP \fItorrent\fP \fInewfilename\fP
+.SH DESCRIPTION
+This manual page documents briefly the \fBbtrename\fP
+command.
+This manual page was written for the Debian distribution
+because the original program does not have a manual page.
+.PP
+.\" TeX users may be more comfortable with the \fB<whatever>\fP and
+.\" \fI<whatever>\fP escape sequences to invode bold face and italics, 
+.\" respectively.
+\fBbtrename\fP is a program which will change the suggested 
+filename presented to the user for a bittorrent file. This 
+will make the downloader save it as a different name by default.
+The torrent specified by the \fItorrent\fP option is modified 
+in-place, changing the suggested filename to \fInewfilename\fP.
+
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btmakemetafile (1),
+.BR btreannounce (1).
+.br
+.SH AUTHOR
+This manual page was written by Michael Janssen <jamuraa at debian.org>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/btsethttpseeds.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btsethttpseeds.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btsethttpseeds.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btsethttpseeds.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,26 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTSETHTTPSEEDS "1" "May 2004" "btsethttpseeds" "User Commands"
+.SH NAME
+Btsethttpseeds \- manual page for btsethttpseeds
+.SH SYNOPSIS
+.B btsethttpseeds
+\fI<http-seeds> file1.torrent \fR[\fIfile2.torrent\fR...]
+.SH DESCRIPTION
+.IP
+Where:
+.IP
+http-seeds = list of seed URLs, in the format:
+.IP
+url[|url...] or 0
+.IP
+if the list is a zero, any http seeds will be stripped.
+.IP
+Where:
+.IP
+http-seeds = list of seed URLs, in the format:
+.IP
+url[|url...] or 0
+.IP
+if the list is a zero, any http seeds will be stripped.
+.SH "SEE ALSO"
+http://bittornado.org

Added: debtorrent/branches/upstream/current/docs/man/btshowmetainfo.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/btshowmetainfo.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/btshowmetainfo.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/btshowmetainfo.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,40 @@
+.\"                                      Hey, EMACS: -*- nroff -*-
+.\" First parameter, NAME, should be all caps
+.\" Second parameter, SECTION, should be 1-8, maybe w/ subsection
+.\" other parameters are allowed: see man(7), man(1)
+.TH "BTSHOWMETAINFO" 1 "Jan 18 2003"
+.\" Please adjust this date whenever revising the manpage.
+.\"
+.\" Some roff macros, for reference:
+.\" .nh        disable hyphenation
+.\" .hy        enable hyphenation
+.\" .ad l      left justify
+.\" .ad b      justify to both left and right margins
+.\" .nf        disable filling
+.\" .fi        enable filling
+.\" .br        insert line break
+.\" .sp <n>    insert n+1 empty lines
+.\" for manpage-specific macros, see man(7)
+.SH NAME
+btshowmetainfo \- display information in bittorrent files
+.SH SYNOPSIS
+\fBbtshowmetainfo\fP \fIfile\fP [ file ... ] 
+.SH DESCRIPTION
+This manual page documents briefly the \fBbtshowmetainfo\fP
+command.
+This manual page was written for the Debian distribution
+because the original program does not have a manual page.
+.PP
+.\" TeX users may be more comfortable with the \fB<whatever>\fP and
+.\" \fI<whatever>\fP escape sequences to invode bold face and italics, 
+.\" respectively.
+\fBbtshowmetainfo\fP is a program which will display the information
+stored in a bittorrent file. 
+
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btmakemetafile (1).
+.br
+.SH AUTHOR
+This manual page was written by Michael Janssen <jamuraa at debian.org>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/man/bttrack.bittornado.1
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/man/bttrack.bittornado.1?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/man/bttrack.bittornado.1 (added)
+++ debtorrent/branches/upstream/current/docs/man/bttrack.bittornado.1 Sat Apr 14 18:47:18 2007
@@ -1,0 +1,189 @@
+.\" DO NOT MODIFY THIS FILE!  It was generated by help2man 1.33.
+.TH BTTRACK "1" "September 2004" 
+.SH NAME
+Bttrack \- tracker for bittornado
+.SH SYNOPSIS
+.B bttrack \fI --dfile <file> <args>
+.SH DESCRIPTION
+\fBbttrack\fP is a "tracker" for bittorrent clients. This program keeps track
+of the completion of each client, and communicates that information when
+requested to other clients.
+.SH OPTIONS
+This program follows the usual GNU command-line syntax, with long options
+starting with two dashes ('-'). A summary of options is included below.
+
+\fB\-\-port\fR \fIportnum\fP
+.IP
+\fIportnum\fP to listen on. (defaults to 80)
+.PP
+\fB\-\-dfile\fR \fIfilename\fP
+.IP
+\fIfilename\fP to store recent downloader info in
+.PP
+\fB\-\-bind\fR \fIip,ip,ip\fP
+.IP
+comma-separated list of ips/hostnames to bind to locally (defaults to
+\&'')
+.PP
+\fB\-\-ipv6_enabled\fR \fI 0 | 1 \fP
+.IP
+allow the client to connect to peers via IPv6 (defaults to 0)
+.PP
+\fB\-\-ipv6_binds_v4\fR \fI 0 | 1 \fP
+.IP
+set if an IPv6 server socket will also field IPv4 connections
+(defaults to 1)
+.PP
+\fB\-\-socket_timeout\fR \fIseconds\fP
+.IP
+\fIseconds\fP timeout for closing connections (defaults to 15)
+.PP
+\fB\-\-save_dfile_interval\fR \fIseconds\fP
+.IP
+\fIseconds\fP between saving dfile (defaults to 300)
+.PP
+\fB\-\-timeout_downloaders_interval\fR \fIseconds\fP
+.IP
+\fIseconds\fP between expiring downloaders (defaults to 2700)
+.PP
+\fB\-\-reannounce_interval\fR \fIseconds\fP
+.IP
+\fIseconds\fP downloaders should wait between reannouncements (defaults to
+1800)
+.PP
+\fB\-\-response_size\fR \fInum\fP
+.IP
+\fInum\fP of peers to send in an info message (defaults to 50)
+.PP
+\fB\-\-timeout_check_interval\fR \fIseconds\fP
+.IP
+time in \fIseconds\fP to wait between checking if any connections have timed out
+(defaults to 5)
+.PP
+\fB\-\-nat_check\fR \fInum\fP
+.IP
+\fInum\fP times to check if a downloader is behind a NAT (0 = don't
+check) (defaults to 3)
+.PP
+\fB\-\-log_nat_checks\fR \fI 0 | 1 \fP
+.IP
+whether to add entries to the log for nat-check results (defaults to
+0)
+.PP
+\fB\-\-min_time_between_log_flushes\fR \fIseconds\fP
+.IP
+minimum time in \fIseconds\fP it must have been since the last flush to do another one
+(defaults to 3.0)
+.PP
+\fB\-\-min_time_between_cache_refreshes\fR \fIseconds\fP
+.IP
+minimum time in \fIseconds\fP before a cache is considered stale and is
+flushed (defaults to 600.0)
+.PP
+\fB\-\-allowed_dir\fR \fIdirectory\fP
+.IP
+only allow downloads for .torrents in this \fIdirectory\fP (defaults to '')
+.PP
+\fB\-\-allowed_controls\fR \fI 0 | 1 \fP
+.IP
+allow special keys in torrents in the allowed_dir to affect tracker
+access (defaults to 0)
+.PP
+\fB\-\-multitracker_enabled\fR \fI 0 | 1 \fP
+.IP
+whether to enable multitracker operation (defaults to 0)
+.PP
+\fB\-\-multitracker_allowed\fR \fI autodetect | none | all \fP
+.IP
+whether to allow incoming tracker announces (can be none, autodetect
+or \fIall\fP) (defaults to 'autodetect')
+.PP
+\fB\-\-multitracker_reannounce_interval\fR \fIseconds\fP
+.IP
+number of \fIseconds\fP between outgoing tracker announces (defaults to 120)
+.PP
+\fB\-\-multitracker_maxpeers\fR \fInum\fP
+.IP
+\fInum\fP of peers to get in a tracker announce (defaults to 20)
+.PP
+\fB\-\-aggregate_forward\fR \fIurl[,<password>]\fP
+.IP
+if set, forwards all non-multitracker to
+this \fIurl\fP with this optional \fIpassword\fP (defaults to '')
+.PP
+\fB\-\-aggregator\fR \fI 0 | 1 | <password> \fP
+.IP
+whether to act as a data aggregator rather than a tracker. If
+enabled, may be 1, or <password>; if password is set, then an
+incoming password is required for access (defaults to '0')
+.PP
+\fB\-\-hupmonitor\fR \fI 0 | 1 \fP
+.IP
+whether to reopen the log file upon receipt of HUP signal (defaults
+to 0)
+.PP
+\fB\-\-http_timeout\fR \fI seconds \fP
+.IP
+number of \fIseconds\fP to wait before assuming that an http connection has
+timed out (defaults to 60)
+.PP
+\fB\-\-parse_dir_interval\fR \fIseconds\fP
+.IP
+\fIseconds\fP between reloading of allowed_dir (defaults to 60)
+.PP
+\fB\-\-show_infopage\fR \fI 0 | 1 \fP
+.IP
+whether to display an info page when the tracker's root dir is loaded
+(defaults to 1)
+.PP
+\fB\-\-infopage_redirect\fR \fIURL\fP
+.IP
+a \fIURL\fP to redirect the info page to (defaults to '')
+.PP
+\fB\-\-show_names\fR \fI0 | 1\fP
+.IP
+whether to display names from allowed dir (defaults to 1)
+.PP
+\fB\-\-favicon\fR \fIfilename\fP
+.IP
+\fIfilename\fP containing x-icon data to return when browser requests
+favicon.ico (defaults to '')
+.PP
+\fB\-\-allowed_ips\fR \fIfile\fP
+.IP
+only allow connections from IPs specified in the given \fIfile\fP; 
+contains subnet data in the format: aa.bb.cc.dd/len (defaults to '')
+.PP
+\fB\-\-only_local_override_ip\fR \fI 0 | 1 | 2 \fP
+.IP
+ignore the ip GET parameter from machines which aren't on local
+network IPs (0 = never, 1 = always, 2 = ignore if NAT checking is not
+enabled) (defaults to 2)
+.PP
+\fB\-\-logfile\fR \fIfile\fP
+.IP
+\fIfile\fP to write the tracker logs, use - for stdout (default) (defaults
+to '')
+.PP
+\fB\-\-allow_get\fR \fI0 | 1\fP
+.IP
+use with allowed_dir; adds a /file?hash={hash} url that allows users
+to download the torrent file (defaults to 0)
+.PP
+\fB\-\-keep_dead\fR \fI0 | 1\fP
+.IP
+keep dead torrents after they expire (so they still show up on your
+/scrape and web page) (defaults to 0)
+.PP
+\fB\-\-scrape_allowed\fR \fIfull | specific | none\fP
+.IP
+scrape access allowed (can be none, specific or full) (defaults to
+\&'full')
+.IP
+.SH SEE ALSO
+.BR bittorrent-downloader (1),
+.BR btmakemetafile (1).
+.br
+.SH AUTHOR
+This manual page was written by Micah Anderson <micah at riseup.net>,
+for the Debian GNU/Linux system (but may be used by others).

Added: debtorrent/branches/upstream/current/docs/multitracker-spec.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/multitracker-spec.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/multitracker-spec.txt (added)
+++ debtorrent/branches/upstream/current/docs/multitracker-spec.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,53 @@
+               MULTITRACKER METADATA ENTRY SPECIFICATION
+               =========================================
+
+This specification is for John Hoffman's proposed extension to the
+BitTorrent metadata format.  This extension is not official as of this
+writing.
+
+
+* "announce-list"
+
+In addition to the standard "announce" key, in the main area of the
+metadata file and not part of the "info" section, will be a new key,
+"announce-list".  This key will refer to a list of lists of URLs, and
+will contain a list of tiers of announces.  If the client is compatible
+with the multitracker specification, and if the "announce-list" key is
+present, the client will ignore the "announce" key and only use the
+URLs in "announce-list".
+
+
+* order of processing
+
+The tiers of announces will be processed sequentially; all URLs in each
+tier must be checked before the client goes on to the next tier.  URLs
+within each tier will be processed in a randomly chosen order; in other
+words, the list will be shuffled when first read, and then parsed in
+order.  In addition, if a connection with a tracker is successful, it
+will be moved to the front of the tier.
+
+
+* examples.
+
+d['announce-list'] = [ [tracker1], [backup1], [backup2] ]
+  On each announce, first try tracker1, then if that cannot be reached,
+  try backup1 and backup2 respectively.  On the next announce, repeat
+  in the same order.  This is meant for when the trackers are standard
+  and can not share information.
+
+d['announce-list'] = [[ tracker1, tracker2, tracker3 ]]
+  First, shuffle the list.  (For argument's sake, we'll say the list
+  has already been shuffled.)  Then, if tracker1 cannot be reached, try
+  tracker2.  If tracker2 can be reached, the list is now:
+  tracker2,tracker1,tracker3.  From then on, this will be the order the
+  client tries.  If later neither tracker2 nor tracker1 can be reached,
+  but tracker3 responds, then the list will be changed to:
+  tracker3,tracker2,tracker1, and will be tried in that order in the
+  future.  This form is meant for trackers which can trade peer
+  information and will cause the clients to help balance the load
+  between the trackers.
+
+d['announce-list'] = [ [ tracker1, tracker2 ], [backup1] ]
+  The first tier, consisting of tracker1 and tracker2, is shuffled.
+  Both trackers 1 and 2 will be tried on each announce (though perhaps
+  in varying order) before the client tries to reach backup1.

Added: debtorrent/branches/upstream/current/docs/webseed-spec.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/docs/webseed-spec.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/docs/webseed-spec.txt (added)
+++ debtorrent/branches/upstream/current/docs/webseed-spec.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,107 @@
+                   HTTP-BASED SEEDING SPECIFICATION
+                   ================================
+
+This specification is for John Hoffman's and DeHackEd's proposed
+extension to the BitTorrent metadata format, and for an alternate
+protocol for retrieving torrent data from a web server.  This
+extension is not official as of this writing.
+
+
+METADATA EXTENSION:
+
+* "httpseeds"
+
+In the main area of the metadata file and not part of the "info"
+section, will be a new key, "httpseeds".  This key will refer to a
+list of URLs, and will contain a list of web addresses where torrent
+data can be retrieved.  This key may be safely ignored if the client
+is not capable of using it.
+
+* examples.
+
+d['httpseeds'] = [ 'http://www.whatever.com/seed.php' ]
+  This specifies the client can retrieve data by accessing the given
+  URL with the parameters supplied in the protocol specification
+  below.
+
+d['httpseeds'] = [ 'http://www.site1.com/source1.php',
+                   'http://www.site2.com/source2.php'  ]
+  More than one URL may be specified; if so, the client will attempt
+  to access both URLs to download seed data.
+
+
+PROTOCOL:
+
+The client calls the URL given, in the following format:
+<url>?info_hash=[hash]&piece=[piece]{&ranges=[start]-[end]{,[start]-[end]}...}
+
+Examples:
+http://www.whatever.com/seed.php?info_hash=%9C%D9i%8A%F5Uu%1A%91%86%AE%06lW%EA%21W%235%E0&piece=3
+http://www.whatever.com/seed.php?info_hash=%9C%D9i%8A%F5Uu%1A%91%86%AE%06lW%EA%21W%235%E0&piece=8&ranges=49152-131071,180224-262143
+
+The URL would be for a script which has access to the files
+contained in the torrent, and to the metadata (.torrent) file
+itself, so that it may calculate what byte ranges to pull from
+what files.  One such script has been written by DeHackEd, and
+is available at http://bt.degreez.net .
+
+The script should return, if everything is okay, either a status
+of 200 (OK) and a block of data (either the entire piece if no
+ranges were given, or the ranges of data requested for that piece
+appended together), in binary format, or 503 (Service Temporarily
+Unavailable), with the body of the return being an ASCII integer
+value specifying how long the client should wait before retrying.
+The client should consider any other return code as an error.
+In the case of an error, the client should retry, but should
+retry less often if the failure to contact the seed continues.
+
+
+* server-side implementation notes.
+
+The purpose of the http seed script is to limit access to the
+data being downloaded so that the web server isn't overwhelmed
+by clients asking for the data.  If it weren't for this limiting,
+there would be no way to prevent someone from coding a client
+to try to download continuously or multiply, resulting in a
+heavy load on the server.  Limiting the download rate also
+allows an http seed script to be run on a web account where
+the total amount of data downloaded is restricted or may result
+in extra service charges.
+
+The script must provide three major functions:
+
+1. Limit its average upload to a reasonable level. 
+
+2. Intelligently tell peers how long they should wait before
+   retrying.
+
+3. translate from an info-hash and piece number to a byte range
+   within a file or set of files, and return those bytes.
+
+Another highly desirable function is to check whether peers are
+retrying too often, and to automatically ban those peers.
+
+Other desirable features include a way of monitoring the tracker
+the torrent is using and to stop uploading data if sufficient
+P2P seeds exist, and a way to feed back to the tracker to show
+a seed is present.
+
+
+
+* client-side implementation notes.
+
+The prototype code base has a default retry time of 30 seconds;
+after 3 retries with errors, the time is lengthened with each
+cycle.
+
+The prototype code will not display any errors with contacting
+http seeds (unless the URL given in the .torrent is incorrect)
+until it has received data from that seed.  (The prototype code
+also won't display any errors for any http reply that was
+actually received.)
+
+Current behavior is:  Request the rarest piece you're missing
+in entirety that you can locate.  If you have no pieces that
+aren't partially downloaded, skip one retry cycle, then start
+requesting partials.  If you receive a 503 response, set the
+retry time equal to the integer value received in the response.

Added: debtorrent/branches/upstream/current/icons/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/icons/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/icons/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,13 @@
+/alloc.gif/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/black.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/black1.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/blue.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/green.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/green1.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/icon_bt.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/icon_done.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/red.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/white.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/yellow.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+/yellow1.ico/1.1/Fri Sep  3 19:15:32 2004/-kb/
+D

Added: debtorrent/branches/upstream/current/icons/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/icons/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/icons/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,12 @@
+/alloc.gif////*///
+/black.ico////*///
+/black1.ico////*///
+/blue.ico////*///
+/green.ico////*///
+/green1.ico////*///
+/icon_bt.ico////*///
+/icon_done.ico////*///
+/red.ico////*///
+/white.ico////*///
+/yellow.ico////*///
+/yellow1.ico////*///

Added: debtorrent/branches/upstream/current/icons/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/icons/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/icons/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/icons/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/icons/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/icons

Added: debtorrent/branches/upstream/current/icons/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/icons/CVS/Root (added)
+++ debtorrent/branches/upstream/current/icons/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/icons/alloc.gif
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/alloc.gif?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/alloc.gif
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/black.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/black.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/black.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/black1.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/black1.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/black1.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/blue.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/blue.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/blue.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/green.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/green.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/green.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/green1.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/green1.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/green1.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/icon_bt.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/icon_bt.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/icon_bt.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/icon_done.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/icon_done.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/icon_done.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/red.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/red.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/red.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/white.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/white.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/white.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/yellow.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/yellow.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/yellow.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/icons/yellow1.ico
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/icons/yellow1.ico?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/icons/yellow1.ico
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/ipranges.portugal.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/ipranges.portugal.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/ipranges.portugal.txt (added)
+++ debtorrent/branches/upstream/current/ipranges.portugal.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,152 @@
+# example file for tracker allowed_ips feature
+193.53.22.0/24 # AS12527         SERVIBANCA Autonomous System
+81.84.0.0/16 # AS12542         TVCABO Autonomous System
+212.113.160.0/19 # AS12542         TVCABO Autonomous System
+213.22.0.0/16 # AS12542         TVCABO Autonomous System
+193.194.155.0/24 # AS12603         SHOPPING DIRECT Autonomous System
+193.236.127.0/24 # AS12833         Sistema de Mail para Todos do MCT
+213.63.0.0/17 # AS12926         Jazztel Portugal Autonomous System
+213.63.128.0/17 # AS12926         Jazztel Portugal Autonomous System
+213.63.140.0/24 # AS12926         Jazztel Portugal Autonomous System
+213.141.0.0/19 # AS12926         Jazztel Portugal Autonomous System
+213.141.0.0/21 # AS12926         Jazztel Portugal Autonomous System
+213.141.8.0/22 # AS12926         Jazztel Portugal Autonomous System
+213.141.16.0/22 # AS12926         Jazztel Portugal Autonomous System
+213.141.20.0/23 # AS12926         Jazztel Portugal Autonomous System
+213.141.25.0/24 # AS12926         Jazztel Portugal Autonomous System
+213.141.26.0/24 # AS12926         Jazztel Portugal Autonomous System
+213.129.128.0/19 # AS13011         netway - comunicacao de dados
+194.153.132.0/24 # AS13200         BPI
+213.190.192.0/19 # AS15457         CABOTVM.PT Autonomous System
+62.48.128.0/17 # AS15525        PT Prime Autonomous System
+195.35.66.0/24 # AS15525        PT Prime Autonomous System
+193.41.114.0/23 # AS15931         YASP Autonomous System
+193.126.0.0/16 # AS1897         KPNQwest Portugal Backbone AS
+193.192.1.0/24 # AS1897         KPNQwest Portugal Backbone AS
+193.192.2.0/24 # AS1897         KPNQwest Portugal Backbone AS
+193.192.3.0/24 # AS1897         KPNQwest Portugal Backbone AS
+139.83.0.0/16 # AS1930         RCCN-NET
+158.162.0.0/17 # AS1930         RCCN-NET
+192.92.144.0/24 # AS1930         RCCN-NET
+193.136.0.0/15 # AS1930         RCCN-NET
+193.236.0.0/21 # AS1930         RCCN-NET
+194.117.0.0/20 # AS1930         RCCN-NET
+194.117.16.0/21 # AS1930         RCCN-NET
+194.117.40.0/22 # AS1930         RCCN-NET
+194.210.0.0/16 # AS1930         RCCN-NET
+193.192.4.0/22 # AS25060         INE - Instituto Nacional de Estatistica Portugal
+193.192.8.0/22 # AS25060         INE - Instituto Nacional de Estatistica Portugal
+192.67.76.0/24 # AS3251         Centro de Calculo da Faculdade de Ciencias de Lisboa
+194.130.254.0/23 # AS5533         VIA NET.WORKS Portugal -  Tecnologias de Informacao
+195.22.0.0/19 # AS5533         VIA NET.WORKS Portugal -  Tecnologias de Informacao
+195.22.16.0/21 # AS5533         VIA NET.WORKS Portugal -  Tecnologias de Informacao
+195.138.0.0/19 # AS6773         SIBS - Sociedade Interbancaria de Servicos
+212.54.128.0/19 # AS8824         ITNET-Network
+194.145.59.0/24 # AS8826         Siemens Autonomous System
+194.145.60.0/24 # AS8826         Siemens Autonomous System
+194.145.61.0/24 # AS8826         Siemens Autonomous System
+194.145.62.0/24 # AS8826         Siemens Autonomous System
+212.48.64.0/19 # AS8994         HLC Telecomunicacoes e Multimedia AS
+194.145.121.0/24 # AS9118         ESDI Autonomous System
+195.245.128.0/18 # AS9186         ONITELECOM Autonomous System
+195.245.128.0/19 # AS9186         ONITELECOM Autonomous System
+195.245.160.0/19 # AS9186         ONITELECOM Autonomous System
+213.58.0.0/16 # AS9186         ONITELECOM Autonomous System
+213.58.0.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.8.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.16.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.24.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.32.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.40.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.48.0/20 # AS9186         ONITELECOM Autonomous System
+213.58.48.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.64.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.80.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.80.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.84.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.88.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.88.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.92.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.96.0/19 # AS9186         ONITELECOM Autonomous System
+213.58.96.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.100.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.104.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.108.0/22 # AS9186         ONITELECOM Autonomous System
+213.58.112.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.120.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.128.0/17 # AS9186         ONITELECOM Autonomous System
+213.58.128.0/18 # AS9186         ONITELECOM Autonomous System
+213.58.128.0/21 # AS9186         ONITELECOM Autonomous System
+213.58.136.128/25 # AS9186         ONITELECOM Autonomous System
+213.58.192.0/18 # AS9186         ONITELECOM Autonomous System
+213.58.192.0/21 # AS9186         ONITELECOM Autonomous System
+192.188.10.0/24 # AS2860         Novis Telecom
+192.199.16.0/20 # AS2860         Novis Telecom
+193.236.120.0/24 # AS2860         Novis Telecom
+193.236.121.0/24 # AS2860         Novis Telecom
+193.236.122.0/24 # AS2860         Novis Telecom
+193.236.123.0/24 # AS2860         Novis Telecom
+194.79.64.0/19 # AS2860         Novis Telecom
+194.79.64.0/21 # AS2860         Novis Telecom
+194.79.72.0/21 # AS2860         Novis Telecom
+194.79.80.0/21 # AS2860         Novis Telecom
+194.79.88.0/21 # AS2860         Novis Telecom
+194.117.36.0/22 # AS2860         Novis Telecom
+195.23.0.0/16 # AS2860         Novis Telecom
+195.23.0.0/17 # AS2860         Novis Telecom
+195.23.0.0/18 # AS2860         Novis Telecom
+195.23.64.0/18 # AS2860         Novis Telecom
+195.23.128.0/17 # AS2860         Novis Telecom
+195.23.128.0/18 # AS2860         Novis Telecom
+195.23.192.0/18 # AS2860         Novis Telecom
+213.205.64.0/19 # AS2860         Novis Telecom
+81.20.240.0/20 # AS3243         Telepac - Comunicacoes Interactivas
+81.193.0.0/16 # AS3243         Telepac - Comunicacoes Interactivas
+158.162.128.0/18 # AS3243         Telepac - Comunicacoes Interactivas
+158.162.192.0/18 # AS3243         Telepac - Comunicacoes Interactivas
+192.92.150.0/24 # AS3243         Telepac - Comunicacoes Interactivas
+194.65.0.0/16 # AS3243         Telepac - Comunicacoes Interactivas
+212.55.128.0/18 # AS3243         Telepac - Comunicacoes Interactivas
+213.13.0.0/16 # AS3243         Telepac - Comunicacoes Interactivas
+195.8.0.0/19 # AS8657         CPRM Autonomous System
+212.18.160.0/19 # AS12353         Vodafone Telecel
+213.30.0.0/17 # AS12353         Vodafone Telecel
+62.229.64.0/19 # AS6853         GLOBAL-ONE-PORTUGAL
+194.235.128.0/21 # AS6853         GLOBAL-ONE-PORTUGAL
+194.235.136.0/22 # AS6853         GLOBAL-ONE-PORTUGAL
+195.61.64.0/19 # AS6853         GLOBAL-ONE-PORTUGAL
+194.38.128.0/19 # AS5626         COMNEXO
+194.38.128.0/20 # AS5626         COMNEXO
+194.38.144.0/20 # AS5626         COMNEXO
+213.146.192.0/19 # AS5626         COMNEXO
+213.146.192.0/22 # AS5626         COMNEXO
+213.146.200.0/22 # AS5626         COMNEXO
+213.146.204.0/22 # AS5626         COMNEXO
+213.146.208.0/22 # AS5626         COMNEXO
+212.13.32.0/19 # AS12305         NORTENET- Sistemas de Comunicacao SA.
+213.228.128.0/18 # AS13156         Cabovisao
+213.228.128.0/19 # AS13156         Cabovisao
+217.129.0.0/16 # AS13156         Cabovisao
+195.234.134.0/24 # AS25253         CAIXANET - Telematica e Telecomunicacoes
+62.169.64.0/19 # AS24698         Optimus Telecomunicacoes
+193.111.42.0/24 # AS25005         Economical Group - Finibanco
+195.245.234.0/24 # AS28672         Banco Portugues de Negocios
+193.126.192.0/24 # AS13134         Instituto de Informatica e Estatistica da Solidariedade
+195.245.197.0/24 # AS13134         Instituto de Informatica e Estatistica da Solidariedade
+195.35.96.0/24 # AS15647         BanifServ - Empresa de Servicos
+80.90.192.0/19 # AS21167         Autonomous System for Oni Way
+146.193.0.0/16 # AS5516         INESC - Instituto de Engenharia de Sistemas e Computadores
+192.12.232.0/24 # AS5516         INESC - Instituto de Engenharia de Sistemas e Computadores
+192.35.246.0/24 # AS5516         INESC - Instituto de Engenharia de Sistemas e Computadores
+192.135.129.0/24 # AS5516         INESC - Instituto de Engenharia de Sistemas e Computadores
+192.136.49.0/24 # AS5516         INESC - Instituto de Engenharia de Sistemas e Computadores
+194.117.24.0/21 # AS5516         INESC - Instituto de Engenharia de Sistemas e Computadores
+194.117.32.0/22 # AS5516         INESC - Instituto de Engenharia de Sistemas e Computadores
+193.219.96.0/21 # AS6806         IP Global
+212.16.128.0/19 # AS8765         Teleweb
+212.251.128.0/17 # AS8765         Teleweb
+212.113.128.0/19 # AS9190         MaxitelCom
+217.70.64.0/21 # AS9190         MaxitelCom
+81.92.192.0/20 # AS25137         NFSi - Solucoes Internet Lda.
+80.79.0.0/20 # AS20957         Guiao
+217.23.0.0/20 # AS15749         Cofina.com Autonomous System

Added: debtorrent/branches/upstream/current/setup.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/setup.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/setup.py (added)
+++ debtorrent/branches/upstream/current/setup.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,28 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+import sys
+assert sys.version >= '2', "Install Python 2.0 or greater"
+from distutils.core import setup, Extension
+import BitTornado
+
+setup(
+    name = "BitTornado",
+    version = BitTornado.version,
+    author = "Bram Cohen, John Hoffman, Uoti Arpala et. al.",
+    author_email = "<theshadow at degreez.net>",
+    url = "http://www.bittornado.com",
+    license = "MIT",
+    
+    packages = ["BitTornado","BitTornado.BT1"],
+
+    scripts = ["btdownloadgui.py", "btdownloadheadless.py", 
+        "bttrack.py", "btmakemetafile.py", "btlaunchmany.py", "btcompletedir.py",
+        "btdownloadcurses.py", "btcompletedirgui.py", "btlaunchmanycurses.py", 
+        "btmakemetafile.py", "btreannounce.py", "btrename.py", "btshowmetainfo.py",
+        'btmaketorrentgui.py', 'btcopyannounce.py', 'btsethttpseeds.py',
+        'bt-t-make.py',
+        ]
+    )

Propchange: debtorrent/branches/upstream/current/setup.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/targets/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/targets/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/targets/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,6 @@
+/default(toobig).gif/1.1/Tue Feb 24 17:22:05 2004/-kb/
+/default-large.gif/1.1/Tue Feb 24 17:22:05 2004/-kb/
+/default-small.gif/1.1/Tue Feb 24 17:22:05 2004/-kb/
+/default.gif/1.1/Tue Feb 24 17:22:05 2004/-kb/
+/zip.gif/1.1/Tue Feb 24 17:22:05 2004/-kb/
+D

Added: debtorrent/branches/upstream/current/targets/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/targets/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/targets/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,5 @@
+/default(toobig).gif////*///
+/default-large.gif////*///
+/default-small.gif////*///
+/default.gif////*///
+/zip.gif////*///

Added: debtorrent/branches/upstream/current/targets/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/targets/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/targets/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/targets/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/targets/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/targets

Added: debtorrent/branches/upstream/current/targets/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/targets/CVS/Root (added)
+++ debtorrent/branches/upstream/current/targets/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/targets/default(toobig).gif
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/default%28toobig%29.gif?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/targets/default(toobig).gif
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/targets/default-large.gif
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/default-large.gif?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/targets/default-large.gif
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/targets/default-small.gif
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/default-small.gif?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/targets/default-small.gif
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/targets/default.gif
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/default.gif?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/targets/default.gif
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/targets/zip.gif
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/targets/zip.gif?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/targets/zip.gif
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/test/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/test/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,2 @@
+D/multitracker////
+D/tracker////

Added: debtorrent/branches/upstream/current/test/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/test/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,2 @@
+D/multitracker///////
+D/tracker///////

Added: debtorrent/branches/upstream/current/test/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/test/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/CVS/Entries.Old?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/CVS/Entries.Old (added)
+++ debtorrent/branches/upstream/current/test/CVS/Entries.Old Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+D

Added: debtorrent/branches/upstream/current/test/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/test/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/test

Added: debtorrent/branches/upstream/current/test/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/CVS/Root (added)
+++ debtorrent/branches/upstream/current/test/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/test/multitracker/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/test/multitracker/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,6 @@
+/README.txt/1.1/Tue Feb 24 17:22:05 2004//
+/tracker0.bat/1.1/Tue Feb 24 17:22:05 2004//
+/tracker1.bat/1.1/Tue Feb 24 17:22:05 2004//
+/tracker2.bat/1.1/Tue Feb 24 17:22:05 2004//
+/tracker3.bat/1.1/Tue Feb 24 17:22:05 2004//
+D

Added: debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,5 @@
+/README.txt////*///
+/tracker0.bat////*///
+/tracker1.bat////*///
+/tracker2.bat////*///
+/tracker3.bat////*///

Added: debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Log
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Log?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Log (added)
+++ debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Log Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+A D/allowed////

Added: debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/test/multitracker/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/test/multitracker/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/test/multitracker

Added: debtorrent/branches/upstream/current/test/multitracker/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/CVS/Root (added)
+++ debtorrent/branches/upstream/current/test/multitracker/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/test/multitracker/README.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/README.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/README.txt (added)
+++ debtorrent/branches/upstream/current/test/multitracker/README.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,133 @@
+           USING THE TRACKER FOR MULTITRACKER OPERATIONS
+           =============================================
+
+The tracker in this package has been enhanced so that it can operate as
+part of a cluster of trackers.  This directory includes some examples
+that may help you set up such a cluster for your own use.
+
+(This document assumes you are familiar with setting up a standalone
+python tracker.  If you do not, you really need to find out how before
+trying this.)
+
+
+MULTITRACKER OPERATION
+----------------------
+The following option has been added to the tracker:
+
+--multitracker_enabled <arg>
+          whether to enable multitracker operation (defaults to 0)
+
+Enabling this is step 1 for operating with multiple trackers.  Step 2
+is to create (or reannounce) the .torrent files to include all the
+trackers involved.  (For this use, you would separate the trackers with
+commas; for instance, if you had 3 peered trackers, you could do
+"btreannounce.py http://tracker1 --announce_list tracker1,tracker2,tracker3 file.torrent".
+("tracker1" etc. are the full URLs for those trackers' announces.)  You
+would then place these torrents in the tracker's allowed_dir.
+
+When the tracker parses the allowed_dir, it will also read the
+announce-lists from the .torrent files and begin polling those trackers
+for their peer data.  The trackers are polled much the way a client
+would poll them, except that they are polled more frequently but
+requesting fewer users with each connection.  The data collected is
+then mixed in with the peer data returned by the tracker to its
+clients.
+
+This operation does take extra bandwidth.  Each additional tracker in
+the cluster, and each additional torrent being tracked will increase
+the amount of bandwidth consumed.  For a 4-tracker cluster tracking 10
+torrents together, the extra bandwidth consumed will be equivalent to
+up to 60 additional clients connected to each tracker.  (This number
+should not, however, increase depending on the number of peers per
+tracker; at least, not past a certain point.)
+
+PLEASE NOTE:  When running a tracker enabled for multitracker
+operations, one needs to be careful about the data in the .torrent
+files placed in that tracker's allowed_dir.  Since that data tells the
+tracker to establish outgoing connections based on the contents of
+those .torrents, the potential for abuse is high.  It is therefore
+recommended that any .torrent added to the tracker's allowed_dir have
+its announce-list either screened or automatically replaced.  (The
+included utility "btcopyannounce.py" is useful for this purpose, in
+that one can set up a "template" .torrent file and copy that data over
+every incoming .torrent file.
+
+
+DATA AGGREGATION
+----------------
+It would probably be sufficient to simply collect the data from each
+tracker and add them together.  However, for anyone who wishes to keep
+more accurate records, or obtain more specific log information, the
+following options have been added to the tracker:
+
+--aggregator <arg>
+          whether to act as a data aggregator rather than a tracker. If
+          enabled, may be 1, or <password>; if password is set, then an
+          incoming password is required for access (defaults to '0')
+
+--aggregate_forward <arg>
+          format: <url>[,<password>] - if set, forwards all non-
+          multitracker to this url with this optional password
+          (defaults to '')
+
+The first option changes the tracker's operation from a tracker to a
+"data aggregator".  When it receives an announce, it adds the data to
+its internal statistics, but then returns nothing.
+
+The second option is to be used on the members of the tracker cluster,
+and directs them to send a copy of each query received to a tracker
+designated as a data aggregator.
+
+The result is that the aggregator receives all the statistical
+information captured by each tracker, including the peer IDs.  It is
+able to sort this data by peer ID, developing an accurate picture of
+the torrent even if a peer connects to more than one tracker.
+
+Please note operating like this DOES use up quite a bit of bandwidth;
+the upstream bandwidth use of each tracker will increase by 10-15%.
+Alternatives are being looked into.  If you feel you cannot waste this
+much bandwidth, the presence of an aggregator is optional.
+
+
+EXAMPLES
+--------
+In this directory are some examples to show how a tracker cluster might
+be set up.  To try them, copy the files and "allowed" directory to the
+root BitTorrent directory.
+
+"tracker0.bat" runs one tracker as a data aggregator on localhost, port
+80.  (While it is named as an MS-DOS batch file, it can easily be
+modified to work as a shell script.)  "tracker1.bat", "tracker2.bat",
+and "tracker3.bat" will each run a tracker on ports 81, 82 and 83
+respectively, configured to use the common directory "allowed" to read
+multitracker data from.
+
+The common allowed_dir directory also contains the metadata file
+"blah.torrent", which has been set up to expect co-equivalent trackers
+on localhost ports 81, 82 and 83.  You can run it multiple times from
+the local computer and see from the tracker logs that the client will
+connect randomly to the trackers; but they are able to see each other
+(at worst, after a short delay), and that http://localhost will show
+all the clients you run in its statistics.
+
+
+TIPS 'N TRICKS
+--------------
+* When you start distributing a torrent, expecting heavy load, set it
+  up normally, adding all the trackers in the cluster to the torrent
+  file and letting it distribute itself across the cluster.  Then, when
+  it gets old and the load tails off, remove the .torrent file from
+  some or most of the trackers' allowed_dirs.  The clients will
+  automatically skip off the trackers that have removed it, and attach
+  to the ones that still have it.  Tracker-to-tracker bandwidth for
+  these torrents will automatically discontinue from trackers that no
+  longer support it.  As a result, you can save bandwidth, and save T2T
+  for when you really need it.
+
+* You can also set up the trackers under a round-robin DNS, though you
+  will need to change the announce-list in the torrents in the
+  allowed_dirs to reflect the actual IPs.  If you do this, even an old
+  client that doesn't support the multitracker specification can search
+  to multiple trackers.  The statistics on each tracker will be
+  especially inaccurate, but if you are using an aggregator, its stats
+  won't be affected.

Added: debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,2 @@
+/blah.torrent/1.1/Tue Feb 24 17:22:05 2004/-kb/
+D

Added: debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+/blah.torrent////*///

Added: debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/test/multitracker/allowed

Added: debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Root (added)
+++ debtorrent/branches/upstream/current/test/multitracker/allowed/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/test/multitracker/allowed/blah.torrent
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/allowed/blah.torrent?rev=1&op=file
==============================================================================
Binary file - no diff available.

Propchange: debtorrent/branches/upstream/current/test/multitracker/allowed/blah.torrent
------------------------------------------------------------------------------
    svn:mime-type = application/octet-stream

Added: debtorrent/branches/upstream/current/test/multitracker/tracker0.bat
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/tracker0.bat?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/tracker0.bat (added)
+++ debtorrent/branches/upstream/current/test/multitracker/tracker0.bat Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bttrack.py --dfile statefile0 --port 80 --aggregator pwd

Added: debtorrent/branches/upstream/current/test/multitracker/tracker1.bat
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/tracker1.bat?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/tracker1.bat (added)
+++ debtorrent/branches/upstream/current/test/multitracker/tracker1.bat Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bttrack.py --dfile statefile1 --port 81 --multitracker_enabled 1 --allowed_dir allowed --aggregate_forward http://localhost:80/announce,pwd --scrape_allowed none --show_infopage 0

Added: debtorrent/branches/upstream/current/test/multitracker/tracker2.bat
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/tracker2.bat?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/tracker2.bat (added)
+++ debtorrent/branches/upstream/current/test/multitracker/tracker2.bat Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bttrack.py --dfile statefile2 --port 82 --multitracker_enabled 1 --allowed_dir allowed --aggregate_forward http://localhost:80/announce,pwd --scrape_allowed none --show_infopage 0

Added: debtorrent/branches/upstream/current/test/multitracker/tracker3.bat
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/multitracker/tracker3.bat?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/multitracker/tracker3.bat (added)
+++ debtorrent/branches/upstream/current/test/multitracker/tracker3.bat Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bttrack.py --dfile statefile3 --port 83 --multitracker_enabled 1 --allowed_dir allowed --aggregate_forward http://localhost:80/announce,pwd --scrape_allowed none --show_infopage 0

Added: debtorrent/branches/upstream/current/test/tracker/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/tracker/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/test/tracker/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,4 @@
+/FAQ.txt/1.1/Sat Dec 23 20:11:09 2006//
+/FAQ.txt.torrent/1.1/Sat Dec 23 20:11:09 2006//
+/tracker.bat/1.1/Sat Dec 23 20:11:09 2006//
+D

Added: debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,3 @@
+/FAQ.txt////*///
+/FAQ.txt.torrent////*///
+/tracker.bat////*///

Added: debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/test/tracker/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/tracker/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/test/tracker/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/test/tracker

Added: debtorrent/branches/upstream/current/test/tracker/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/tracker/CVS/Root (added)
+++ debtorrent/branches/upstream/current/test/tracker/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/test/tracker/FAQ.txt
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/FAQ.txt?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/tracker/FAQ.txt (added)
+++ debtorrent/branches/upstream/current/test/tracker/FAQ.txt Sat Apr 14 18:47:18 2007
@@ -1,0 +1,51 @@
+Frequently Asked Questions about BitTorrent
+
+Q:  I use Mozilla/Opera, how can I use BT?
+
+A:  You need to edit the mimetype associations so that the "application/x-bittorrent"
+    mime type is launched by c:\progra~1\bittorrent\btdownloadprefetched.exe
+
+
+Q:  Does BitTorrent support resuming?
+
+A:  Yes, just save your download to the same location as the existing partial download.
+    BT will resume where it left off after checking the partial download.
+
+
+Q:  How do I know the download isn't corrupted?
+
+A:  BitTorrent does cryptographic hashing (SHA1) of all data.  When you see "Download
+    Succeeded" you can be sure that BT has already verified the integrity of the data.
+    The integrity and authenticity of a BT download is as good as the original request
+    to the tracker.
+
+
+Q:  I'm behind a firewall/NAT, can I use BT?
+
+A:  Yes, but you will get better performance if other peers can connect to you.  By
+    default, BitTorrent listens on port 6881, trying incrementially higher ports if
+    it is unable to bind, giving up after 6889 (the port range is configurable.)
+    It's up to you to figure out how to poke a hole in your firewall/NAT.
+
+
+Q:  I published a file but whenever I try to download it hangs saying "connecting to
+    peers" and/or the download just never starts.
+
+A:  You need to leave a downloader running which already has the whole file.  The
+    publishing step merely registers the download information with the tracker.
+    Make sure other peers can connect to this downloader (not behind firewall or
+    NAT!)
+
+
+Q:  When is the Java implementation going to be ready?
+
+A:  Soon after the check clears.
+
+
+
+Q:  How do I limit the amount of bandwidth consumed by BT?
+
+A:  BT allows you to control how many simultaneous connections can  actively
+    download simultaneously using --max_uploads in the btdownloadheadless.py
+    script.  Other than that you'll have to limit bandwidth some other way, perhaps
+    at the OS or router level.

Added: debtorrent/branches/upstream/current/test/tracker/FAQ.txt.torrent
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/FAQ.txt.torrent?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/tracker/FAQ.txt.torrent (added)
+++ debtorrent/branches/upstream/current/test/tracker/FAQ.txt.torrent Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+d8:announce30:http://127.0.0.1:6969/announce13:creation datei1166903567e4:infod6:lengthi2022e4:name7:FAQ.txt12:piece lengthi262144e6:pieces20:%»è|‹‰æa͏ä¥ôa%	õíPæee

Added: debtorrent/branches/upstream/current/test/tracker/tracker.bat
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/test/tracker/tracker.bat?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/test/tracker/tracker.bat (added)
+++ debtorrent/branches/upstream/current/test/tracker/tracker.bat Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bttrack.py --dfile statefile1 --port 6969

Added: debtorrent/branches/upstream/current/thosts/ASS.thost
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/ASS.thost?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/thosts/ASS.thost (added)
+++ debtorrent/branches/upstream/current/thosts/ASS.thost Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+d8:announce59:http://www.animeskyscraper.com/tracker/tracker.php/announcee

Added: debtorrent/branches/upstream/current/thosts/CVS/Entries
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/CVS/Entries?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/thosts/CVS/Entries (added)
+++ debtorrent/branches/upstream/current/thosts/CVS/Entries Sat Apr 14 18:47:18 2007
@@ -1,0 +1,4 @@
+/ASS.thost/1.1/Tue Feb 24 17:22:05 2004//
+/ILA.thost/1.1/Tue Feb 24 17:22:05 2004//
+/Plucker.thost/1.1/Tue Feb 24 17:22:05 2004//
+D

Added: debtorrent/branches/upstream/current/thosts/CVS/Entries.Extra
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/CVS/Entries.Extra?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/thosts/CVS/Entries.Extra (added)
+++ debtorrent/branches/upstream/current/thosts/CVS/Entries.Extra Sat Apr 14 18:47:18 2007
@@ -1,0 +1,3 @@
+/ASS.thost////*///
+/ILA.thost////*///
+/Plucker.thost////*///

Added: debtorrent/branches/upstream/current/thosts/CVS/Entries.Extra.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/CVS/Entries.Extra.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/thosts/CVS/Entries.Old
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/CVS/Entries.Old?rev=1&op=file
==============================================================================
    (empty)

Added: debtorrent/branches/upstream/current/thosts/CVS/Repository
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/CVS/Repository?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/thosts/CVS/Repository (added)
+++ debtorrent/branches/upstream/current/thosts/CVS/Repository Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+bittornado/thosts

Added: debtorrent/branches/upstream/current/thosts/CVS/Root
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/CVS/Root?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/thosts/CVS/Root (added)
+++ debtorrent/branches/upstream/current/thosts/CVS/Root Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+:ext:theshadow at cvs.degreez.net:/cvsroot

Added: debtorrent/branches/upstream/current/thosts/ILA.thost
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/ILA.thost?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/thosts/ILA.thost (added)
+++ debtorrent/branches/upstream/current/thosts/ILA.thost Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+d8:announce38:http://tene-chan.mine.nu:6969/announcee

Added: debtorrent/branches/upstream/current/thosts/Plucker.thost
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/thosts/Plucker.thost?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/thosts/Plucker.thost (added)
+++ debtorrent/branches/upstream/current/thosts/Plucker.thost Sat Apr 14 18:47:18 2007
@@ -1,0 +1,1 @@
+d8:announce38:http://torrents.plkr.org:8000/announcee

Added: debtorrent/branches/upstream/current/wincompletedirsetup.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/wincompletedirsetup.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/wincompletedirsetup.py (added)
+++ debtorrent/branches/upstream/current/wincompletedirsetup.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,9 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+from distutils.core import setup
+import py2exe
+
+setup(name='completedir', scripts=['btcompletedirgui.py'])

Propchange: debtorrent/branches/upstream/current/wincompletedirsetup.py
------------------------------------------------------------------------------
    svn:executable = 

Added: debtorrent/branches/upstream/current/winsetup.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/upstream/current/winsetup.py?rev=1&op=file
==============================================================================
--- debtorrent/branches/upstream/current/winsetup.py (added)
+++ debtorrent/branches/upstream/current/winsetup.py Sat Apr 14 18:47:18 2007
@@ -1,0 +1,16 @@
+#!/usr/bin/env python
+
+# Written by Bram Cohen
+# see LICENSE.txt for license information
+
+
+from distutils.core import setup
+import py2exe
+
+setup(
+    windows = [ { 'script': 'btdownloadgui.py',
+                  'icon_resources': [ (1, 'icon_bt.ico')],
+                    'excludes': ["pywin", "pywin.debugger", "pywin.debugger.dbgcon",
+                "pywin.dialogs", "pywin.dialogs.list",
+                "Tkconstants","Tkinter","tcl" ]  } ]
+    )

Propchange: debtorrent/branches/upstream/current/winsetup.py
------------------------------------------------------------------------------
    svn:executable = 




More information about the Debtorrent-commits mailing list