[Debtorrent-commits] r80 - /debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py

camrdale-guest at users.alioth.debian.org camrdale-guest at users.alioth.debian.org
Sat Jun 2 05:22:54 UTC 2007


Author: camrdale-guest
Date: Sat Jun  2 05:22:54 2007
New Revision: 80

URL: http://svn.debian.org/wsvn/debtorrent/?sc=1&rev=80
Log:
Remove unneeded tracker stuff from AptListener, and document the remaining.

Modified:
    debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py

Modified: debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py?rev=80&op=diff
==============================================================================
--- debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py (original)
+++ debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py Sat Jun  2 05:22:54 2007
@@ -1,15 +1,19 @@
 # Written by Cameron Dale
 # see LICENSE.txt for license information
-
+#
 # $Id$
+
+"""Listen for download requests from Apt.
+
+ at type alas: C{string}
+ at var alas: the message to send when the data is not found
+
+"""
 
 from DebTorrent.parseargs import parseargs, formatDefinitions
 from DebTorrent.RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
 from DebTorrent.HTTPHandler import HTTPHandler, months, weekdays
 from DebTorrent.parsedir import parsedir
-from NatCheck import NatCheck, CHECK_PEER_ID_ENCRYPTED
-from DebTorrent.BTcrypto import CRYPTO_OK
-from T2T import T2TList
 from DebTorrent.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4
 from DebTorrent.iprangeparse import IP_List as IP_Range_List
 from DebTorrent.torrentlistparse import parsetorrentlist
@@ -41,64 +45,15 @@
     False = 0
     bool = lambda x: not not x
 
-defaults = [
-    ('port', 80, "Port to listen on."),
-    ('dfile', None, 'file to store recent downloader info in'),
-    ('bind', '', 'comma-separated list of ips/hostnames to bind to locally'),
-#    ('ipv6_enabled', autodetect_ipv6(),
-    ('ipv6_enabled', 0,
-         'allow the client to connect to peers via IPv6'),
-    ('ipv6_binds_v4', autodetect_socket_style(),
-        'set if an IPv6 server socket will also field IPv4 connections'),
-    ('socket_timeout', 15, 'timeout for closing connections'),
-    ('save_dfile_interval', 5 * 60, 'seconds between saving dfile'),
-    ('timeout_downloaders_interval', 45 * 60, 'seconds between expiring downloaders'),
-    ('reannounce_interval', 30 * 60, 'seconds downloaders should wait between reannouncements'),
-    ('response_size', 50, 'number of peers to send in an info message'),
-    ('timeout_check_interval', 5,
-        'time to wait between checking if any connections have timed out'),
-    ('nat_check', 3,
-        "how many times to check if a downloader is behind a NAT (0 = don't check)"),
-    ('log_nat_checks', 0,
-        "whether to add entries to the log for nat-check results"),
-    ('min_time_between_log_flushes', 3.0,
-        'minimum time it must have been since the last flush to do another one'),
-    ('min_time_between_cache_refreshes', 600.0,
-        'minimum time in seconds before a cache is considered stale and is flushed'),
-    ('allowed_dir', '', 'only allow downloads for .dtorrents in this dir'),
-    ('allowed_list', '', 'only allow downloads for hashes in this list (hex format, one per line)'),
-    ('allowed_controls', 0, 'allow special keys in torrents in the allowed_dir to affect tracker access'),
-    ('multitracker_enabled', 0, 'whether to enable multitracker operation'),
-    ('multitracker_allowed', 'autodetect', 'whether to allow incoming tracker announces (can be none, autodetect or all)'),
-    ('multitracker_reannounce_interval', 2 * 60, 'seconds between outgoing tracker announces'),
-    ('multitracker_maxpeers', 20, 'number of peers to get in a tracker announce'),
-    ('aggregate_forward', '', 'format: <url>[,<password>] - if set, forwards all non-multitracker to this url with this optional password'),
-    ('aggregator', '0', 'whether to act as a data aggregator rather than a tracker.  If enabled, may be 1, or <password>; ' +
-             'if password is set, then an incoming password is required for access'),
-    ('hupmonitor', 0, 'whether to reopen the log file upon receipt of HUP signal'),
-    ('http_timeout', 60, 
-        'number of seconds to wait before assuming that an http connection has timed out'),
-    ('parse_dir_interval', 60, 'seconds between reloading of allowed_dir or allowed_file ' +
-             'and allowed_ips and banned_ips lists'),
-    ('show_infopage', 1, "whether to display an info page when the tracker's root dir is loaded"),
-    ('infopage_redirect', '', 'a URL to redirect the info page to'),
-    ('show_names', 1, 'whether to display names from allowed dir'),
-    ('favicon', '', 'file containing x-icon data to return when browser requests favicon.ico'),
-    ('allowed_ips', '', 'only allow connections from IPs specified in the given file; '+
-             'file contains subnet data in the format: aa.bb.cc.dd/len'),
-    ('banned_ips', '', "don't allow connections from IPs specified in the given file; "+
-             'file contains IP range data in the format: xxx:xxx:ip1-ip2'),
-    ('only_local_override_ip', 2, "ignore the ip GET parameter from machines which aren't on local network IPs " +
-             "(0 = never, 1 = always, 2 = ignore if NAT checking is not enabled)"),
-    ('logfile', '', 'file to write the tracker logs, use - for stdout (default)'),
-    ('allow_get', 0, 'use with allowed_dir; adds a /file?hash={hash} url that allows users to download the torrent file'),
-    ('keep_dead', 0, 'keep dead torrents after they expire (so they still show up on your /scrape and web page)'),
-    ('scrape_allowed', 'full', 'scrape access allowed (can be none, specific or full)'),
-    ('dedicated_seed_id', '', 'allows tracker to monitor dedicated seed(s) and flag torrents as seeded'),
-    ('compact_reqd', 1, "only allow peers that accept a compact response"),
-  ]
-
 def statefiletemplate(x):
+    """Check the saved state file for corruption.
+    
+    @type x: C{dictionary}
+    @param x: the dictionary of information retrieved from the state file
+    @raise ValueError: if the state file info is corrupt
+    
+    """
+    
     if type(x) != DictType:
         raise ValueError
     for cname, cinfo in x.items():
@@ -153,66 +108,90 @@
 
 alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n'
 
-local_IPs = IP_List()
-local_IPs.set_intranet_addresses()
-
 
 def isotime(secs = None):
+    """Create an ISO formatted string of the time.
+    
+    @type secs: C{float}
+    @param secs: number of seconds since the epoch 
+        (optional, default is to use the current time)
+    @rtype: C{string}
+    @return: the ISO formatted string representation of the time
+    
+    """
+    
     if secs == None:
         secs = time()
     return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
 
-http_via_filter = re.compile(' for ([0-9.]+)\Z')
-
-def _get_forwarded_ip(headers):
-    header = headers.get('x-forwarded-for')
-    if header:
-        try:
-            x,y = header.split(',')
-        except:
-            return header
-        if is_valid_ip(x) and not local_IPs.includes(x):
-            return x
-        return y
-    header = headers.get('client-ip')
-    if header:
-        return header
-    header = headers.get('via')
-    if header:
-        x = http_via_filter.search(header)
-        try:
-            return x.group(1)
-        except:
-            pass
-    header = headers.get('from')
-    #if header:
-    #    return header
-    #return None
-    return header
-
-def get_forwarded_ip(headers):
-    x = _get_forwarded_ip(headers)
-    if not is_valid_ip(x) or local_IPs.includes(x):
-        return None
-    return x
-
-def compact_peer_info(ip, port):
-    try:
-        s = ( ''.join([chr(int(i)) for i in ip.split('.')])
-              + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
-        if len(s) != 6:
-            raise ValueError
-    except:
-        s = ''  # not a valid IP, must be a domain name
-    return s
-
 class AptListener:
+    """Listen for Apt requests to download files.
+    
+    @type config: C{dictionary}
+    @ivar config: the configuration parameters
+    @type dfile: C{string}
+    @ivar dfile: the state file to use when saving the current state
+    @type parse_dir_interval: C{int}
+    @ivar parse_dir_interval: seconds between reloading of the allowed 
+        directory or file, and the lists of allowed and banned IPs
+    @type favicon: C{string}
+    @ivar favicon: file containing x-icon data
+    @type rawserver: L{DebTorrent.RawServer}
+    @ivar rawserver: the server to use for scheduling
+    @type times: unknown
+    @ivar times: unknown
+    @type state: C{dictionary}
+    @ivar state: the current state information for the tracking
+    @type allowed_IPs: unknown
+    @ivar allowed_IPs: unknown
+    @type banned_IPs: unknown
+    @ivar banned_IPs: unknown
+    @type allowed_ip_mtime: unknown
+    @ivar allowed_ip_mtime: unknown
+    @type banned_ip_mtime: unknown
+    @ivar banned_ip_mtime: unknown
+    @type trackerid: unknown
+    @ivar trackerid: unknown
+    @type save_dfile_interval: C{int}
+    @ivar save_dfile_interval: seconds between saving the state file
+    @type show_names: C{boolean}
+    @ivar show_names: whether to display names from allowed dir
+    @type prevtime: unknown
+    @ivar prevtime: unknown
+    @type logfile: unknown
+    @ivar logfile: unknown
+    @type log: unknown
+    @ivar log: unknown
+    @type allow_get: unknown
+    @ivar allow_get: unknown
+    @type allowed: unknown
+    @ivar allowed: unknown
+    @type allowed_list_mtime: unknown
+    @ivar allowed_list_mtime: unknown
+    @type allowed_dir_files: unknown
+    @ivar allowed_dir_files: unknown
+    @type allowed_dir_blocked: unknown
+    @ivar allowed_dir_blocked: unknown
+    @type uq_broken: unknown
+    @ivar uq_broken: unknown
+    @type Filter: unknown
+    @ivar Filter: unknown
+    
+    """
+
     def __init__(self, config, rawserver):
+        """Initialize the instance.
+        
+        @type config: C{dictionary}
+        @param config: the configuration parameters
+        @type rawserver: L{DebTorrent.RawServer}
+        @param rawserver: the server to use for scheduling
+        
+        """
+        
         self.config = config
         return
-        self.response_size = config['response_size']
         self.dfile = config['dfile']
-        self.natcheck = config['nat_check']
         favicon = config['favicon']
         self.parse_dir_interval = config['parse_dir_interval']
         self.favicon = None
@@ -224,11 +203,8 @@
             except:
                 print "**warning** specified favicon file -- %s -- does not exist." % favicon
         self.rawserver = rawserver
-        self.cached = {}    # format: infohash: [[time1, l1, s1], [time2, l2, s2], ...]
-        self.cached_t = {}  # format: infohash: [time, cache]
         self.times = {}
         self.state = {}
-        self.seedcount = {}
 
         self.allowed_IPs = None
         self.banned_IPs = None
@@ -237,14 +213,6 @@
             self.banned_ip_mtime = 0
             self.read_ip_lists()
                 
-        self.only_local_override_ip = config['only_local_override_ip']
-        if self.only_local_override_ip == 2:
-            self.only_local_override_ip = not config['nat_check']
-
-        if CHECK_PEER_ID_ENCRYPTED and not CRYPTO_OK:
-            print ('**warning** crypto library not installed,' +
-                   ' cannot completely verify encrypted peers')
-
         if exists(self.dfile):
             try:
                 h = open(self.dfile, 'rb')
@@ -257,56 +225,14 @@
                 self.state = tempstate
             except:
                 print '**warning** statefile '+self.dfile+' corrupt; resetting'
-        self.downloads = self.state.setdefault('peers', {})
-        self.completed = self.state.setdefault('completed', {})
-
-        self.becache = {}
-        ''' format: infohash: [[l0, s0], [l1, s1], ...]
-                l0,s0 = compact, not requirecrypto=1
-                l1,s1 = compact, only supportcrypto=1
-                l2,s2 = [compact, crypto_flag], all peers
-            if --compact_reqd 0:
-                l3,s3 = [ip,port,id]
-                l4,l4 = [ip,port] nopeerid
-        '''
-        if config['compact_reqd']:
-            x = 3
-        else:
-            x = 5
-        self.cache_default = [({},{}) for i in xrange(x)]
-        for infohash, ds in self.downloads.items():
-            self.seedcount[infohash] = 0
-            for x,y in ds.items():
-                ip = y['ip']
-                if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
-                     or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
-                    del ds[x]
-                    continue
-                if not y['left']:
-                    self.seedcount[infohash] += 1
-                if y.get('nat',-1):
-                    continue
-                gip = y.get('given_ip')
-                if is_valid_ip(gip) and (
-                    not self.only_local_override_ip or local_IPs.includes(ip) ):
-                    ip = gip
-                self.natcheckOK(infohash,x,ip,y['port'],y)
-            
-        for x in self.downloads.keys():
-            self.times[x] = {}
-            for y in self.downloads[x].keys():
-                self.times[x][y] = 0
 
         self.trackerid = createPeerID('-T-')
         seed(self.trackerid)
                 
-        self.reannounce_interval = config['reannounce_interval']
         self.save_dfile_interval = config['save_dfile_interval']
         self.show_names = config['show_names']
         rawserver.add_task(self.save_state, self.save_dfile_interval)
         self.prevtime = clock()
-        self.timeout_downloaders_interval = config['timeout_downloaders_interval']
-        rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
         self.logfile = None
         self.log = None
         if (config['logfile']) and (config['logfile'] != '-'):
@@ -332,11 +258,6 @@
                 
         self.allow_get = config['allow_get']
         
-        self.t2tlist = T2TList(config['multitracker_enabled'], self.trackerid,
-                               config['multitracker_reannounce_interval'],
-                               config['multitracker_maxpeers'], config['http_timeout'],
-                               self.rawserver)
-
         if config['allowed_list']:
             if config['allowed_dir']:
                 print '**warning** allowed_dir and allowed_list options cannot be used together'
@@ -346,8 +267,6 @@
             self.allowed_list_mtime = 0
             self.parse_allowed()
             self.remove_from_state('allowed','allowed_dir_files')
-            if config['multitracker_allowed'] == 'autodetect':
-                config['multitracker_allowed'] = 'none'
             config['allowed_controls'] = 0
 
         elif config['allowed_dir']:
@@ -360,64 +279,22 @@
         else:
             self.allowed = None
             self.remove_from_state('allowed','allowed_dir_files', 'allowed_list')
-            if config['multitracker_allowed'] == 'autodetect':
-                config['multitracker_allowed'] = 'none'
             config['allowed_controls'] = 0
                 
         self.uq_broken = unquote('+') != ' '
-        self.keep_dead = config['keep_dead']
         self.Filter = Filter(rawserver.add_task)
-        
-        aggregator = config['aggregator']
-        if aggregator == '0':
-            self.is_aggregator = False
-            self.aggregator_key = None
-        else:
-            self.is_aggregator = True
-            if aggregator == '1':
-                self.aggregator_key = None
-            else:
-                self.aggregator_key = aggregator
-            self.natcheck = False
-                
-        send = config['aggregate_forward']
-        if not send:
-            self.aggregate_forward = None
-        else:
-            try:
-                self.aggregate_forward, self.aggregate_password = send.split(',')
-            except:
-                self.aggregate_forward = send
-                self.aggregate_password = None
-
-        self.dedicated_seed_id = config['dedicated_seed_id']
-        self.is_seeded = {}
-
-        self.cachetime = 0
-        self.cachetimeupdate()
-
-    def cachetimeupdate(self):
-        self.cachetime += 1     # raw clock, but more efficient for cache
-        self.rawserver.add_task(self.cachetimeupdate,1)
-
-    def aggregate_senddata(self, query):
-        url = self.aggregate_forward+'?'+query
-        if self.aggregate_password is not None:
-            url += '&password='+self.aggregate_password
-        rq = Thread(target = self._aggregate_senddata, args = [url])
-        rq.setDaemon(False)
-        rq.start()
-
-    def _aggregate_senddata(self, url):     # just send, don't attempt to error check,
-        try:                                # discard any returned data
-            h = urlopen(url)
-            h.read()
-            h.close()
-        except:
-            return
-
 
     def get_infopage(self):
+        """Format the info page to display for normal browsers.
+        
+        Formats the currently tracked torrents into a table in human-readable
+        format to display in a browser window.
+        
+        @rtype: (C{int}, C{string}, C{dictionary}, C{string})
+        @return: the HTTP status code, status message, headers, and message body
+        
+        """
+        
         try:
             if not self.config['show_infopage']:
                 return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
@@ -509,59 +386,42 @@
             return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')
 
 
-    def scrapedata(self, hash, return_name = True):
-        l = self.downloads[hash]
-        n = self.completed.get(hash, 0)
-        c = self.seedcount[hash]
-        d = len(l) - c
-        f = {'complete': c, 'incomplete': d, 'downloaded': n}
-        if return_name and self.show_names and self.config['allowed_dir']:
-            f['name'] = self.allowed[hash]['name']
-        return (f)
-
-    def get_scrape(self, paramslist):
-        fs = {}
-        if paramslist.has_key('info_hash'):
-            if self.config['scrape_allowed'] not in ['specific', 'full']:
-                return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
-                    bencode({'failure reason':
-                    'specific scrape function is not available with this tracker.'}))
-            for hash in paramslist['info_hash']:
-                if self.allowed is not None:
-                    if self.allowed.has_key(hash):
-                        fs[hash] = self.scrapedata(hash)
-                else:
-                    if self.downloads.has_key(hash):
-                        fs[hash] = self.scrapedata(hash)
-        else:
-            if self.config['scrape_allowed'] != 'full':
-                return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
-                    bencode({'failure reason':
-                    'full scrape function is not available with this tracker.'}))
-            if self.allowed is not None:
-                keys = self.allowed.keys()
-            else:
-                keys = self.downloads.keys()
-            for hash in keys:
-                fs[hash] = self.scrapedata(hash)
-
-        return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
-
-
     def get_file(self, hash):
-         if not self.allow_get:
-             return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
-                 'get function is not available with this tracker.')
-         if not self.allowed.has_key(hash):
-             return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
-         fname = self.allowed[hash]['file']
-         fpath = self.allowed[hash]['path']
-         return (200, 'OK', {'Content-Type': 'application/x-debtorrent',
-             'Content-Disposition': 'attachment; filename=' + fname},
-             open(fpath, 'rb').read())
+        """Get the metainfo file for a torrent.
+        
+        @type hash: C{string}
+        @param hash: the infohash of the torrent to get the metainfo of
+        @rtype: (C{int}, C{string}, C{dictionary}, C{string})
+        @return: the HTTP status code, status message, headers, and bencoded 
+            metainfo file
+        
+        """
+        
+        if not self.allow_get:
+            return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+                'get function is not available with this tracker.')
+        if not self.allowed.has_key(hash):
+            return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+        fname = self.allowed[hash]['file']
+        fpath = self.allowed[hash]['path']
+        return (200, 'OK', {'Content-Type': 'application/x-debtorrent',
+            'Content-Disposition': 'attachment; filename=' + fname},
+            open(fpath, 'rb').read())
 
 
     def check_allowed(self, infohash, paramslist):
+        """Determine whether the tracker is tracking this torrent.
+        
+        @type infohash: C{string}
+        @param infohash: the infohash of the torrent to check
+        @type paramslist: C{dictionary}
+        @param paramslist: the query parameters from the GET request
+        @rtype: (C{int}, C{string}, C{dictionary}, C{string})
+        @return: the HTTP status code, status message, headers, and bencoded 
+            message body if the request is not allowed, or None if it is
+
+        """
+        
         if ( self.aggregator_key is not None
                 and not ( paramslist.has_key('password')
                         and paramslist['password'][0] == self.aggregator_key ) ):
@@ -594,257 +454,25 @@
         return None
 
 
-    def add_data(self, infohash, event, ip, paramslist):
-        peers = self.downloads.setdefault(infohash, {})
-        ts = self.times.setdefault(infohash, {})
-        self.completed.setdefault(infohash, 0)
-        self.seedcount.setdefault(infohash, 0)
-
-        def params(key, default = None, l = paramslist):
-            if l.has_key(key):
-                return l[key][0]
-            return default
-        
-        myid = params('peer_id','')
-        if len(myid) != 20:
-            raise ValueError, 'id not of length 20'
-        if event not in ['started', 'completed', 'stopped', 'snooped', None]:
-            raise ValueError, 'invalid event'
-        port = params('cryptoport')
-        if port is None:
-            port = params('port','')
-        port = long(port)
-        if port < 0 or port > 65535:
-            raise ValueError, 'invalid port'
-        left = long(params('left',''))
-        if left < 0:
-            raise ValueError, 'invalid amount left'
-        uploaded = long(params('uploaded',''))
-        downloaded = long(params('downloaded',''))
-        if params('supportcrypto'):
-            supportcrypto = 1
-            try:
-                s = int(params['requirecrypto'])
-                chr(s)
-            except:
-                s = 0
-            requirecrypto = s
-        else:
-            supportcrypto = 0
-            requirecrypto = 0
-
-        peer = peers.get(myid)
-        islocal = local_IPs.includes(ip)
-        mykey = params('key')
-        if peer:
-            auth = peer.get('key',-1) == mykey or peer.get('ip') == ip
-
-        gip = params('ip')
-        if is_valid_ip(gip) and (islocal or not self.only_local_override_ip):
-            ip1 = gip
-        else:
-            ip1 = ip
-
-        if params('numwant') is not None:
-            rsize = min(int(params('numwant')),self.response_size)
-        else:
-            rsize = self.response_size
-
-        if event == 'stopped':
-            if peer:
-                if auth:
-                    self.delete_peer(infohash,myid)
-        
-        elif not peer:
-            ts[myid] = clock()
-            peer = { 'ip': ip, 'port': port, 'left': left,
-                     'supportcrypto': supportcrypto,
-                     'requirecrypto': requirecrypto }
-            if mykey:
-                peer['key'] = mykey
-            if gip:
-                peer['given ip'] = gip
-            if port:
-                if not self.natcheck or islocal:
-                    peer['nat'] = 0
-                    self.natcheckOK(infohash,myid,ip1,port,peer)
-                else:
-                    NatCheck(self.connectback_result,infohash,myid,ip1,port,
-                             self.rawserver,encrypted=requirecrypto)
-            else:
-                peer['nat'] = 2**30
-            if event == 'completed':
-                self.completed[infohash] += 1
-            if not left:
-                self.seedcount[infohash] += 1
-                
-            peers[myid] = peer
-
-        else:
-            if not auth:
-                return rsize    # return w/o changing stats
-
-            ts[myid] = clock()
-            if not left and peer['left']:
-                self.completed[infohash] += 1
-                self.seedcount[infohash] += 1
-                if not peer.get('nat', -1):
-                    for bc in self.becache[infohash]:
-                        if bc[0].has_key(myid):
-                            bc[1][myid] = bc[0][myid]
-                            del bc[0][myid]
-            elif left and not peer['left']:
-                self.completed[infohash] -= 1
-                self.seedcount[infohash] -= 1
-                if not peer.get('nat', -1):
-                    for bc in self.becache[infohash]:
-                        if bc[1].has_key(myid):
-                            bc[0][myid] = bc[1][myid]
-                            del bc[1][myid]
-            peer['left'] = left
-
-            if port:
-                recheck = False
-                if ip != peer['ip']:
-                    peer['ip'] = ip
-                    recheck = True
-                if gip != peer.get('given ip'):
-                    if gip:
-                        peer['given ip'] = gip
-                    elif peer.has_key('given ip'):
-                        del peer['given ip']
-                    recheck = True
-
-                natted = peer.get('nat', -1)
-                if recheck:
-                    if natted == 0:
-                        l = self.becache[infohash]
-                        y = not peer['left']
-                        for x in l:
-                            del x[y][myid]
-                    if natted >= 0:
-                        del peer['nat'] # restart NAT testing
-                if natted and natted < self.natcheck:
-                    recheck = True
-
-                if recheck:
-                    if not self.natcheck or islocal:
-                        peer['nat'] = 0
-                        self.natcheckOK(infohash,myid,ip1,port,peer)
-                    else:
-                        NatCheck(self.connectback_result,infohash,myid,ip1,port,
-                                 self.rawserver,encrypted=requirecrypto)
-
-        return rsize
-
-
-    def peerlist(self, infohash, stopped, tracker, is_seed,
-                 return_type, rsize, supportcrypto):
-        data = {}    # return data
-        seeds = self.seedcount[infohash]
-        data['complete'] = seeds
-        data['incomplete'] = len(self.downloads[infohash]) - seeds
-        
-        if ( self.config['allowed_controls']
-                and self.allowed[infohash].has_key('warning message') ):
-            data['warning message'] = self.allowed[infohash]['warning message']
-
-        if tracker:
-            data['interval'] = self.config['multitracker_reannounce_interval']
-            if not rsize:
-                return data
-            cache = self.cached_t.setdefault(infohash, None)
-            if ( not cache or len(cache[1]) < rsize
-                 or cache[0] + self.config['min_time_between_cache_refreshes'] < clock() ):
-                bc = self.becache.setdefault(infohash,self.cache_default)
-                cache = [ clock(), bc[0][0].values() + bc[0][1].values() ]
-                self.cached_t[infohash] = cache
-                shuffle(cache[1])
-                cache = cache[1]
-
-            data['peers'] = cache[-rsize:]
-            del cache[-rsize:]
-            return data
-
-        data['interval'] = self.reannounce_interval
-        if stopped or not rsize:     # save some bandwidth
-            data['peers'] = []
-            return data
-
-        bc = self.becache.setdefault(infohash,self.cache_default)
-        len_l = len(bc[2][0])
-        len_s = len(bc[2][1])
-        if not (len_l+len_s):   # caches are empty!
-            data['peers'] = []
-            return data
-        l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
-        cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
-        if cache and ( not cache[1]
-                       or (is_seed and len(cache[1]) < rsize)
-                       or len(cache[1]) < l_get_size
-                       or cache[0]+self.config['min_time_between_cache_refreshes'] < self.cachetime ):
-            cache = None
-        if not cache:
-            peers = self.downloads[infohash]
-            if self.config['compact_reqd']:
-                vv = ([],[],[])
-            else:
-                vv = ([],[],[],[],[])
-            for key, ip, port in self.t2tlist.harvest(infohash):   # empty if disabled
-                if not peers.has_key(key):
-                    cp = compact_peer_info(ip, port)
-                    vv[0].append(cp)
-                    vv[2].append((cp,'\x00'))
-                    if not self.config['compact_reqd']:
-                        vv[3].append({'ip': ip, 'port': port, 'peer id': key})
-                        vv[4].append({'ip': ip, 'port': port})
-            cache = [ self.cachetime,
-                      bc[return_type][0].values()+vv[return_type],
-                      bc[return_type][1].values() ]
-            shuffle(cache[1])
-            shuffle(cache[2])
-            self.cached[infohash][return_type] = cache
-            for rr in xrange(len(self.cached[infohash])):
-                if rr != return_type:
-                    try:
-                        self.cached[infohash][rr][1].extend(vv[rr])
-                    except:
-                        pass
-        if len(cache[1]) < l_get_size:
-            peerdata = cache[1]
-            if not is_seed:
-                peerdata.extend(cache[2])
-            cache[1] = []
-            cache[2] = []
-        else:
-            if not is_seed:
-                peerdata = cache[2][l_get_size-rsize:]
-                del cache[2][l_get_size-rsize:]
-                rsize -= len(peerdata)
-            else:
-                peerdata = []
-            if rsize:
-                peerdata.extend(cache[1][-rsize:])
-                del cache[1][-rsize:]
-        if return_type == 0:
-            data['peers'] = ''.join(peerdata)
-        elif return_type == 1:
-            data['crypto_flags'] = "0x01"*len(peerdata)
-            data['peers'] = ''.join(peerdata)
-        elif return_type == 2:
-            data['crypto_flags'] = ''.join([p[1] for p in peerdata])
-            data['peers'] = ''.join([p[0] for p in peerdata])
-        else:
-            data['peers'] = peerdata
-        return data
-
-
     def get(self, connection, path, headers):
-        print 'URL: ' + path + '\n'
-        print 'HEADERS: ',
-        print headers,
-        print
-        return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+        """Respond to a GET request.
+        
+        Process a GET request from APT/browser/other. Process the request,
+        calling the helper functions above if needed. Return the response to
+        be returned to the requester.
+        
+        @type connection: unknown
+        @param connection: the conection the request came in on
+        @type path: C{string}
+        @param path: the URL being requested
+        @type headers: C{dictionary}
+        @param headers: the headers from the request
+        @rtype: (C{int}, C{string}, C{dictionary}, C{string})
+        @return: the HTTP status code, status message, headers, and message body
+        
+        """
+        
+#        return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
         real_ip = connection.get_ip()
         ip = real_ip
         if is_ipv4(ip):
@@ -873,6 +501,20 @@
 
         paramslist = {}
         def params(key, default = None, l = paramslist):
+            """Get the user parameter, or the default.
+            
+            @type key: C{string}
+            @param key: the parameter to get
+            @type default: C{string}
+            @param default: the default value to use if no parameter is set
+                (optional, defaults to None)
+            @type l: C{dictionary}
+            @param l: the user parameters (optional, defaults to L{paramslist})
+            @rtype: C{string}
+            @return: the parameter's value
+            
+            """
+            
             if l.has_key(key):
                 return l[key][0]
             return default
@@ -966,54 +608,8 @@
         return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data))
 
 
-    def natcheckOK(self, infohash, peerid, ip, port, peer):
-        seed = not peer['left']
-        bc = self.becache.setdefault(infohash,self.cache_default)
-        cp = compact_peer_info(ip, port)
-        reqc = peer['requirecrypto']
-        bc[2][seed][peerid] = (cp,chr(reqc))
-        if peer['supportcrypto']:
-            bc[1][seed][peerid] = cp
-        if not reqc:
-            bc[0][seed][peerid] = cp
-            if not self.config['compact_reqd']:
-                bc[3][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
-                                                         'peer id': peerid}))
-                bc[4][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
-
-
-    def natchecklog(self, peerid, ip, port, result):
-        year, month, day, hour, minute, second, a, b, c = localtime(time())
-        print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
-            ip, quote(peerid), day, months[month], year, hour, minute, second,
-            ip, port, result)
-
-    def connectback_result(self, result, downloadid, peerid, ip, port):
-        record = self.downloads.get(downloadid,{}).get(peerid)
-        if ( record is None 
-                 or (record['ip'] != ip and record.get('given ip') != ip)
-                 or record['port'] != port ):
-            if self.config['log_nat_checks']:
-                self.natchecklog(peerid, ip, port, 404)
-            return
-        if self.config['log_nat_checks']:
-            if result:
-                x = 200
-            else:
-                x = 503
-            self.natchecklog(peerid, ip, port, x)
-        if not record.has_key('nat'):
-            record['nat'] = int(not result)
-            if result:
-                self.natcheckOK(downloadid,peerid,ip,port,record)
-        elif result and record['nat']:
-            record['nat'] = 0
-            self.natcheckOK(downloadid,peerid,ip,port,record)
-        elif not result:
-            record['nat'] += 1
-
-
     def remove_from_state(self, *l):
+        """Remove all the input parameter names from the current state."""
         for s in l:
             try:
                 del self.state[s]
@@ -1021,6 +617,7 @@
                 pass
 
     def save_state(self):
+        """Save the state file to disk."""
         self.rawserver.add_task(self.save_state, self.save_dfile_interval)
         h = open(self.dfile, 'wb')
         h.write(bencode(self.state))
@@ -1028,6 +625,7 @@
 
 
     def parse_allowed(self):
+        """Periodically parse the directory and list for allowed torrents."""
         self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval)
 
         if self.config['allowed_dir']:
@@ -1062,6 +660,7 @@
 
 
     def read_ip_lists(self):
+        """Periodically parse the allowed and banned IPs lists."""
         self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval)
             
         f = self.config['allowed_ips']
@@ -1083,37 +682,16 @@
                 print '**warning** unable to read banned_IP list'
                 
 
-    def delete_peer(self, infohash, peerid):
-        dls = self.downloads[infohash]
-        peer = dls[peerid]
-        if not peer['left']:
-            self.seedcount[infohash] -= 1
-        if not peer.get('nat',-1):
-            l = self.becache[infohash]
-            y = not peer['left']
-            for x in l:
-                if x[y].has_key(peerid):
-                    del x[y][peerid]
-        del self.times[infohash][peerid]
-        del dls[peerid]
-
-    def expire_downloaders(self):
-        for x in self.times.keys():
-            for myid, t in self.times[x].items():
-                if t < self.prevtime:
-                    self.delete_peer(x,myid)
-        self.prevtime = clock()
-        if (self.keep_dead != 1):
-            for key, value in self.downloads.items():
-                if len(value) == 0 and (
-                        self.allowed is None or not self.allowed.has_key(key) ):
-                    del self.times[key]
-                    del self.downloads[key]
-                    del self.seedcount[key]
-        self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
-
-
 def size_format(s):
+    """Format a byte size for reading by the user.
+    
+    @type s: C{long}
+    @param s: the number of bytes
+    @rtype: C{string}
+    @return: the formatted size with appropriate units
+    
+    """
+    
     if (s < 1024):
         r = str(s) + 'B'
     elif (s < 1048576):




More information about the Debtorrent-commits mailing list