[Debtorrent-commits] r46 - in /debtorrent/branches/http-listen:
DebTorrent/BT1/AptListener.py
DebTorrent/RawServer.py DebTorrent/SocketHandler.py
DebTorrent/download_bt1.py DebTorrent/launchmanycore.py
btdownloadheadless.py
camrdale-guest at users.alioth.debian.org
camrdale-guest at users.alioth.debian.org
Thu May 10 06:41:08 UTC 2007
Author: camrdale-guest
Date: Thu May 10 06:41:07 2007
New Revision: 46
URL: http://svn.debian.org/wsvn/debtorrent/?sc=1&rev=46
Log:
Made downloaders listen to another port, with an HTTPHandler for it, and an AptListener copied from the tracker
Added:
debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py
Modified:
debtorrent/branches/http-listen/DebTorrent/RawServer.py
debtorrent/branches/http-listen/DebTorrent/SocketHandler.py
debtorrent/branches/http-listen/DebTorrent/download_bt1.py
debtorrent/branches/http-listen/DebTorrent/launchmanycore.py
debtorrent/branches/http-listen/btdownloadheadless.py
Added: debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py?rev=46&op=file
==============================================================================
--- debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py (added)
+++ debtorrent/branches/http-listen/DebTorrent/BT1/AptListener.py Thu May 10 06:41:07 2007
@@ -1,0 +1,1123 @@
+# Written by Cameron Dale
+# see LICENSE.txt for license information
+
+# $Id$
+
+from DebTorrent.parseargs import parseargs, formatDefinitions
+from DebTorrent.RawServer import RawServer, autodetect_ipv6, autodetect_socket_style
+from DebTorrent.HTTPHandler import HTTPHandler, months, weekdays
+from DebTorrent.parsedir import parsedir
+from NatCheck import NatCheck, CHECK_PEER_ID_ENCRYPTED
+from DebTorrent.BTcrypto import CRYPTO_OK
+from T2T import T2TList
+from DebTorrent.subnetparse import IP_List, ipv6_to_ipv4, to_ipv4, is_valid_ip, is_ipv4
+from DebTorrent.iprangeparse import IP_List as IP_Range_List
+from DebTorrent.torrentlistparse import parsetorrentlist
+from threading import Event, Thread
+from DebTorrent.bencode import bencode, bdecode, Bencached
+from DebTorrent.zurllib import urlopen, quote, unquote
+from Filter import Filter
+from urlparse import urlparse
+from os import rename, getpid
+from os.path import exists, isfile
+from cStringIO import StringIO
+from traceback import print_exc
+from time import time, gmtime, strftime, localtime
+from DebTorrent.clock import clock
+from random import shuffle, seed, randrange
+from sha import sha
+from types import StringType, IntType, LongType, ListType, DictType
+from binascii import b2a_hex, a2b_hex, a2b_base64
+from string import lower
+import sys, os
+import signal
+import re
+import DebTorrent.__init__
+from DebTorrent.__init__ import version, createPeerID
+try:
+ True
+except:
+ True = 1
+ False = 0
+ bool = lambda x: not not x
+
+defaults = [
+ ('port', 80, "Port to listen on."),
+ ('dfile', None, 'file to store recent downloader info in'),
+ ('bind', '', 'comma-separated list of ips/hostnames to bind to locally'),
+# ('ipv6_enabled', autodetect_ipv6(),
+ ('ipv6_enabled', 0,
+ 'allow the client to connect to peers via IPv6'),
+ ('ipv6_binds_v4', autodetect_socket_style(),
+ 'set if an IPv6 server socket will also field IPv4 connections'),
+ ('socket_timeout', 15, 'timeout for closing connections'),
+ ('save_dfile_interval', 5 * 60, 'seconds between saving dfile'),
+ ('timeout_downloaders_interval', 45 * 60, 'seconds between expiring downloaders'),
+ ('reannounce_interval', 30 * 60, 'seconds downloaders should wait between reannouncements'),
+ ('response_size', 50, 'number of peers to send in an info message'),
+ ('timeout_check_interval', 5,
+ 'time to wait between checking if any connections have timed out'),
+ ('nat_check', 3,
+ "how many times to check if a downloader is behind a NAT (0 = don't check)"),
+ ('log_nat_checks', 0,
+ "whether to add entries to the log for nat-check results"),
+ ('min_time_between_log_flushes', 3.0,
+ 'minimum time it must have been since the last flush to do another one'),
+ ('min_time_between_cache_refreshes', 600.0,
+ 'minimum time in seconds before a cache is considered stale and is flushed'),
+ ('allowed_dir', '', 'only allow downloads for .dtorrents in this dir'),
+ ('allowed_list', '', 'only allow downloads for hashes in this list (hex format, one per line)'),
+ ('allowed_controls', 0, 'allow special keys in torrents in the allowed_dir to affect tracker access'),
+ ('multitracker_enabled', 0, 'whether to enable multitracker operation'),
+ ('multitracker_allowed', 'autodetect', 'whether to allow incoming tracker announces (can be none, autodetect or all)'),
+ ('multitracker_reannounce_interval', 2 * 60, 'seconds between outgoing tracker announces'),
+ ('multitracker_maxpeers', 20, 'number of peers to get in a tracker announce'),
+ ('aggregate_forward', '', 'format: <url>[,<password>] - if set, forwards all non-multitracker to this url with this optional password'),
+ ('aggregator', '0', 'whether to act as a data aggregator rather than a tracker. If enabled, may be 1, or <password>; ' +
+ 'if password is set, then an incoming password is required for access'),
+ ('hupmonitor', 0, 'whether to reopen the log file upon receipt of HUP signal'),
+ ('http_timeout', 60,
+ 'number of seconds to wait before assuming that an http connection has timed out'),
+ ('parse_dir_interval', 60, 'seconds between reloading of allowed_dir or allowed_file ' +
+ 'and allowed_ips and banned_ips lists'),
+ ('show_infopage', 1, "whether to display an info page when the tracker's root dir is loaded"),
+ ('infopage_redirect', '', 'a URL to redirect the info page to'),
+ ('show_names', 1, 'whether to display names from allowed dir'),
+ ('favicon', '', 'file containing x-icon data to return when browser requests favicon.ico'),
+ ('allowed_ips', '', 'only allow connections from IPs specified in the given file; '+
+ 'file contains subnet data in the format: aa.bb.cc.dd/len'),
+ ('banned_ips', '', "don't allow connections from IPs specified in the given file; "+
+ 'file contains IP range data in the format: xxx:xxx:ip1-ip2'),
+ ('only_local_override_ip', 2, "ignore the ip GET parameter from machines which aren't on local network IPs " +
+ "(0 = never, 1 = always, 2 = ignore if NAT checking is not enabled)"),
+ ('logfile', '', 'file to write the tracker logs, use - for stdout (default)'),
+ ('allow_get', 0, 'use with allowed_dir; adds a /file?hash={hash} url that allows users to download the torrent file'),
+ ('keep_dead', 0, 'keep dead torrents after they expire (so they still show up on your /scrape and web page)'),
+ ('scrape_allowed', 'full', 'scrape access allowed (can be none, specific or full)'),
+ ('dedicated_seed_id', '', 'allows tracker to monitor dedicated seed(s) and flag torrents as seeded'),
+ ('compact_reqd', 1, "only allow peers that accept a compact response"),
+ ]
+
+def statefiletemplate(x):
+ if type(x) != DictType:
+ raise ValueError
+ for cname, cinfo in x.items():
+ if cname == 'peers':
+ for y in cinfo.values(): # The 'peers' key is a dictionary of SHA hashes (torrent ids)
+ if type(y) != DictType: # ... for the active torrents, and each is a dictionary
+ raise ValueError
+ for id, info in y.items(): # ... of client ids interested in that torrent
+ if (len(id) != 20):
+ raise ValueError
+ if type(info) != DictType: # ... each of which is also a dictionary
+ raise ValueError # ... which has an IP, a Port, and a Bytes Left count for that client for that torrent
+ if type(info.get('ip', '')) != StringType:
+ raise ValueError
+ port = info.get('port')
+ if type(port) not in (IntType,LongType) or port < 0:
+ raise ValueError
+ left = info.get('left')
+ if type(left) not in (IntType,LongType) or left < 0:
+ raise ValueError
+ if type(info.get('supportcrypto')) not in (IntType,LongType):
+ raise ValueError
+ if type(info.get('requirecrypto')) not in (IntType,LongType):
+ raise ValueError
+ elif cname == 'completed':
+ if (type(cinfo) != DictType): # The 'completed' key is a dictionary of SHA hashes (torrent ids)
+ raise ValueError # ... for keeping track of the total completions per torrent
+ for y in cinfo.values(): # ... each torrent has an integer value
+ if type(y) not in (IntType,LongType):
+ raise ValueError # ... for the number of reported completions for that torrent
+ elif cname == 'allowed':
+ if (type(cinfo) != DictType): # a list of info_hashes and included data
+ raise ValueError
+ if x.has_key('allowed_dir_files'):
+ adlist = [z[1] for z in x['allowed_dir_files'].values()]
+ for y in cinfo.keys(): # and each should have a corresponding key here
+ if not y in adlist:
+ raise ValueError
+ elif cname == 'allowed_dir_files':
+ if (type(cinfo) != DictType): # a list of files, their attributes and info hashes
+ raise ValueError
+ dirkeys = {}
+ for y in cinfo.values(): # each entry should have a corresponding info_hash
+ if not y[1]:
+ continue
+ if not x['allowed'].has_key(y[1]):
+ raise ValueError
+ if dirkeys.has_key(y[1]): # and each should have a unique info_hash
+ raise ValueError
+ dirkeys[y[1]] = 1
+
+
+alas = 'your file may exist elsewhere in the universe\nbut alas, not here\n'
+
+local_IPs = IP_List()
+local_IPs.set_intranet_addresses()
+
+
+def isotime(secs = None):
+ if secs == None:
+ secs = time()
+ return strftime('%Y-%m-%d %H:%M UTC', gmtime(secs))
+
+http_via_filter = re.compile(' for ([0-9.]+)\Z')
+
+def _get_forwarded_ip(headers):
+ header = headers.get('x-forwarded-for')
+ if header:
+ try:
+ x,y = header.split(',')
+ except:
+ return header
+ if is_valid_ip(x) and not local_IPs.includes(x):
+ return x
+ return y
+ header = headers.get('client-ip')
+ if header:
+ return header
+ header = headers.get('via')
+ if header:
+ x = http_via_filter.search(header)
+ try:
+ return x.group(1)
+ except:
+ pass
+ header = headers.get('from')
+ #if header:
+ # return header
+ #return None
+ return header
+
+def get_forwarded_ip(headers):
+ x = _get_forwarded_ip(headers)
+ if not is_valid_ip(x) or local_IPs.includes(x):
+ return None
+ return x
+
+def compact_peer_info(ip, port):
+ try:
+ s = ( ''.join([chr(int(i)) for i in ip.split('.')])
+ + chr((port & 0xFF00) >> 8) + chr(port & 0xFF) )
+ if len(s) != 6:
+ raise ValueError
+ except:
+ s = '' # not a valid IP, must be a domain name
+ return s
+
+class AptListener:
+ def __init__(self, config, rawserver):
+ self.config = config
+ return
+ self.response_size = config['response_size']
+ self.dfile = config['dfile']
+ self.natcheck = config['nat_check']
+ favicon = config['favicon']
+ self.parse_dir_interval = config['parse_dir_interval']
+ self.favicon = None
+ if favicon:
+ try:
+ h = open(favicon,'r')
+ self.favicon = h.read()
+ h.close()
+ except:
+ print "**warning** specified favicon file -- %s -- does not exist." % favicon
+ self.rawserver = rawserver
+ self.cached = {} # format: infohash: [[time1, l1, s1], [time2, l2, s2], ...]
+ self.cached_t = {} # format: infohash: [time, cache]
+ self.times = {}
+ self.state = {}
+ self.seedcount = {}
+
+ self.allowed_IPs = None
+ self.banned_IPs = None
+ if config['allowed_ips'] or config['banned_ips']:
+ self.allowed_ip_mtime = 0
+ self.banned_ip_mtime = 0
+ self.read_ip_lists()
+
+ self.only_local_override_ip = config['only_local_override_ip']
+ if self.only_local_override_ip == 2:
+ self.only_local_override_ip = not config['nat_check']
+
+ if CHECK_PEER_ID_ENCRYPTED and not CRYPTO_OK:
+ print ('**warning** crypto library not installed,' +
+ ' cannot completely verify encrypted peers')
+
+ if exists(self.dfile):
+ try:
+ h = open(self.dfile, 'rb')
+ ds = h.read()
+ h.close()
+ tempstate = bdecode(ds)
+ if not tempstate.has_key('peers'):
+ tempstate = {'peers': tempstate}
+ statefiletemplate(tempstate)
+ self.state = tempstate
+ except:
+ print '**warning** statefile '+self.dfile+' corrupt; resetting'
+ self.downloads = self.state.setdefault('peers', {})
+ self.completed = self.state.setdefault('completed', {})
+
+ self.becache = {}
+ ''' format: infohash: [[l0, s0], [l1, s1], ...]
+ l0,s0 = compact, not requirecrypto=1
+ l1,s1 = compact, only supportcrypto=1
+ l2,s2 = [compact, crypto_flag], all peers
+ if --compact_reqd 0:
+ l3,s3 = [ip,port,id]
+ l4,l4 = [ip,port] nopeerid
+ '''
+ if config['compact_reqd']:
+ x = 3
+ else:
+ x = 5
+ self.cache_default = [({},{}) for i in xrange(x)]
+ for infohash, ds in self.downloads.items():
+ self.seedcount[infohash] = 0
+ for x,y in ds.items():
+ ip = y['ip']
+ if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
+ or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
+ del ds[x]
+ continue
+ if not y['left']:
+ self.seedcount[infohash] += 1
+ if y.get('nat',-1):
+ continue
+ gip = y.get('given_ip')
+ if is_valid_ip(gip) and (
+ not self.only_local_override_ip or local_IPs.includes(ip) ):
+ ip = gip
+ self.natcheckOK(infohash,x,ip,y['port'],y)
+
+ for x in self.downloads.keys():
+ self.times[x] = {}
+ for y in self.downloads[x].keys():
+ self.times[x][y] = 0
+
+ self.trackerid = createPeerID('-T-')
+ seed(self.trackerid)
+
+ self.reannounce_interval = config['reannounce_interval']
+ self.save_dfile_interval = config['save_dfile_interval']
+ self.show_names = config['show_names']
+ rawserver.add_task(self.save_state, self.save_dfile_interval)
+ self.prevtime = clock()
+ self.timeout_downloaders_interval = config['timeout_downloaders_interval']
+ rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
+ self.logfile = None
+ self.log = None
+ if (config['logfile']) and (config['logfile'] != '-'):
+ try:
+ self.logfile = config['logfile']
+ self.log = open(self.logfile,'a')
+ sys.stdout = self.log
+ print "# Log Started: ", isotime()
+ except:
+ print "**warning** could not redirect stdout to log file: ", sys.exc_info()[0]
+
+ if config['hupmonitor']:
+ def huphandler(signum, frame, self = self):
+ try:
+ self.log.close ()
+ self.log = open(self.logfile,'a')
+ sys.stdout = self.log
+ print "# Log reopened: ", isotime()
+ except:
+ print "**warning** could not reopen logfile"
+
+ signal.signal(signal.SIGHUP, huphandler)
+
+ self.allow_get = config['allow_get']
+
+ self.t2tlist = T2TList(config['multitracker_enabled'], self.trackerid,
+ config['multitracker_reannounce_interval'],
+ config['multitracker_maxpeers'], config['http_timeout'],
+ self.rawserver)
+
+ if config['allowed_list']:
+ if config['allowed_dir']:
+ print '**warning** allowed_dir and allowed_list options cannot be used together'
+ print '**warning** disregarding allowed_dir'
+ config['allowed_dir'] = ''
+ self.allowed = self.state.setdefault('allowed_list',{})
+ self.allowed_list_mtime = 0
+ self.parse_allowed()
+ self.remove_from_state('allowed','allowed_dir_files')
+ if config['multitracker_allowed'] == 'autodetect':
+ config['multitracker_allowed'] = 'none'
+ config['allowed_controls'] = 0
+
+ elif config['allowed_dir']:
+ self.allowed = self.state.setdefault('allowed',{})
+ self.allowed_dir_files = self.state.setdefault('allowed_dir_files',{})
+ self.allowed_dir_blocked = {}
+ self.parse_allowed()
+ self.remove_from_state('allowed_list')
+
+ else:
+ self.allowed = None
+ self.remove_from_state('allowed','allowed_dir_files', 'allowed_list')
+ if config['multitracker_allowed'] == 'autodetect':
+ config['multitracker_allowed'] = 'none'
+ config['allowed_controls'] = 0
+
+ self.uq_broken = unquote('+') != ' '
+ self.keep_dead = config['keep_dead']
+ self.Filter = Filter(rawserver.add_task)
+
+ aggregator = config['aggregator']
+ if aggregator == '0':
+ self.is_aggregator = False
+ self.aggregator_key = None
+ else:
+ self.is_aggregator = True
+ if aggregator == '1':
+ self.aggregator_key = None
+ else:
+ self.aggregator_key = aggregator
+ self.natcheck = False
+
+ send = config['aggregate_forward']
+ if not send:
+ self.aggregate_forward = None
+ else:
+ try:
+ self.aggregate_forward, self.aggregate_password = send.split(',')
+ except:
+ self.aggregate_forward = send
+ self.aggregate_password = None
+
+ self.dedicated_seed_id = config['dedicated_seed_id']
+ self.is_seeded = {}
+
+ self.cachetime = 0
+ self.cachetimeupdate()
+
+ def cachetimeupdate(self):
+ self.cachetime += 1 # raw clock, but more efficient for cache
+ self.rawserver.add_task(self.cachetimeupdate,1)
+
+ def aggregate_senddata(self, query):
+ url = self.aggregate_forward+'?'+query
+ if self.aggregate_password is not None:
+ url += '&password='+self.aggregate_password
+ rq = Thread(target = self._aggregate_senddata, args = [url])
+ rq.setDaemon(False)
+ rq.start()
+
+ def _aggregate_senddata(self, url): # just send, don't attempt to error check,
+ try: # discard any returned data
+ h = urlopen(url)
+ h.read()
+ h.close()
+ except:
+ return
+
+
+ def get_infopage(self):
+ try:
+ if not self.config['show_infopage']:
+ return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+ red = self.config['infopage_redirect']
+ if red:
+ return (302, 'Found', {'Content-Type': 'text/html', 'Location': red},
+ '<A HREF="'+red+'">Click Here</A>')
+
+ s = StringIO()
+ s.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' \
+ '<html><head><title>DebTorrent download info</title>\n')
+ if self.favicon is not None:
+ s.write('<link rel="shortcut icon" href="/favicon.ico">\n')
+ s.write('</head>\n<body>\n' \
+ '<h3>DebTorrent download info</h3>\n'\
+ '<ul>\n'
+ '<li><strong>tracker version:</strong> %s</li>\n' \
+ '<li><strong>server time:</strong> %s</li>\n' \
+ '</ul>\n' % (version, isotime()))
+ if self.config['allowed_dir']:
+ if self.show_names:
+ names = [ (self.allowed[hash]['name'],hash)
+ for hash in self.allowed.keys() ]
+ else:
+ names = [ (None,hash)
+ for hash in self.allowed.keys() ]
+ else:
+ names = [ (None,hash) for hash in self.downloads.keys() ]
+ if not names:
+ s.write('<p>not tracking any files yet...</p>\n')
+ else:
+ names.sort()
+ tn = 0
+ tc = 0
+ td = 0
+ tt = 0 # Total transferred
+ ts = 0 # Total size
+ nf = 0 # Number of files displayed
+ if self.config['allowed_dir'] and self.show_names:
+ s.write('<table summary="files" border="1">\n' \
+ '<tr><th>info hash</th><th>torrent name</th><th align="right">size</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th><th align="right">transferred</th></tr>\n')
+ else:
+ s.write('<table summary="files">\n' \
+ '<tr><th>info hash</th><th align="right">complete</th><th align="right">downloading</th><th align="right">downloaded</th></tr>\n')
+ for name,hash in names:
+ l = self.downloads[hash]
+ n = self.completed.get(hash, 0)
+ tn = tn + n
+ c = self.seedcount[hash]
+ tc = tc + c
+ d = len(l) - c
+ td = td + d
+ if self.config['allowed_dir'] and self.show_names:
+ if self.allowed.has_key(hash):
+ nf = nf + 1
+ sz = self.allowed[hash]['length'] # size
+ ts = ts + sz
+ szt = sz * n # Transferred for this torrent
+ tt = tt + szt
+ if self.allow_get == 1:
+ linkname = '<a href="/file?info_hash=' + quote(hash) + '">' + name + '</a>'
+ else:
+ linkname = name
+ s.write('<tr><td><code>%s</code></td><td>%s</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n' \
+ % (b2a_hex(hash), linkname, size_format(sz), c, d, n, size_format(szt)))
+ else:
+ s.write('<tr><td><code>%s</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td><td align="right"><code>%i</code></td></tr>\n' \
+ % (b2a_hex(hash), c, d, n))
+ if self.config['allowed_dir'] and self.show_names:
+ s.write('<tr><td align="right" colspan="2">%i files</td><td align="right">%s</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td><td align="right">%s</td></tr>\n'
+ % (nf, size_format(ts), tc, td, tn, size_format(tt)))
+ else:
+ s.write('<tr><td align="right">%i files</td><td align="right">%i</td><td align="right">%i</td><td align="right">%i</td></tr>\n'
+ % (nf, tc, td, tn))
+ s.write('</table>\n' \
+ '<ul>\n' \
+ '<li><em>info hash:</em> SHA1 hash of the "info" section of the metainfo (*.dtorrent)</li>\n' \
+ '<li><em>complete:</em> number of connected clients with the complete file</li>\n' \
+ '<li><em>downloading:</em> number of connected clients still downloading</li>\n' \
+ '<li><em>downloaded:</em> reported complete downloads</li>\n' \
+ '<li><em>transferred:</em> torrent size * total downloaded (does not include partial transfers)</li>\n' \
+ '</ul>\n')
+
+ s.write('</body>\n' \
+ '</html>\n')
+ return (200, 'OK', {'Content-Type': 'text/html; charset=iso-8859-1'}, s.getvalue())
+ except:
+ print_exc()
+ return (500, 'Internal Server Error', {'Content-Type': 'text/html; charset=iso-8859-1'}, 'Server Error')
+
+
+ def scrapedata(self, hash, return_name = True):
+ l = self.downloads[hash]
+ n = self.completed.get(hash, 0)
+ c = self.seedcount[hash]
+ d = len(l) - c
+ f = {'complete': c, 'incomplete': d, 'downloaded': n}
+ if return_name and self.show_names and self.config['allowed_dir']:
+ f['name'] = self.allowed[hash]['name']
+ return (f)
+
+ def get_scrape(self, paramslist):
+ fs = {}
+ if paramslist.has_key('info_hash'):
+ if self.config['scrape_allowed'] not in ['specific', 'full']:
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'specific scrape function is not available with this tracker.'}))
+ for hash in paramslist['info_hash']:
+ if self.allowed is not None:
+ if self.allowed.has_key(hash):
+ fs[hash] = self.scrapedata(hash)
+ else:
+ if self.downloads.has_key(hash):
+ fs[hash] = self.scrapedata(hash)
+ else:
+ if self.config['scrape_allowed'] != 'full':
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'full scrape function is not available with this tracker.'}))
+ if self.allowed is not None:
+ keys = self.allowed.keys()
+ else:
+ keys = self.downloads.keys()
+ for hash in keys:
+ fs[hash] = self.scrapedata(hash)
+
+ return (200, 'OK', {'Content-Type': 'text/plain'}, bencode({'files': fs}))
+
+
+ def get_file(self, hash):
+ if not self.allow_get:
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ 'get function is not available with this tracker.')
+ if not self.allowed.has_key(hash):
+ return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+ fname = self.allowed[hash]['file']
+ fpath = self.allowed[hash]['path']
+ return (200, 'OK', {'Content-Type': 'application/x-debtorrent',
+ 'Content-Disposition': 'attachment; filename=' + fname},
+ open(fpath, 'rb').read())
+
+
+ def check_allowed(self, infohash, paramslist):
+ if ( self.aggregator_key is not None
+ and not ( paramslist.has_key('password')
+ and paramslist['password'][0] == self.aggregator_key ) ):
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'Requested download is not authorized for use with this tracker.'}))
+
+ if self.allowed is not None:
+ if not self.allowed.has_key(infohash):
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'Requested download is not authorized for use with this tracker.'}))
+ if self.config['allowed_controls']:
+ if self.allowed[infohash].has_key('failure reason'):
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason': self.allowed[infohash]['failure reason']}))
+
+ if paramslist.has_key('tracker'):
+ if ( self.config['multitracker_allowed'] == 'none' or # turned off
+ paramslist['peer_id'][0] == self.trackerid ): # oops! contacted myself
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason': 'disallowed'}))
+
+ if ( self.config['multitracker_allowed'] == 'autodetect'
+ and not self.allowed[infohash].has_key('announce-list') ):
+ return (200, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'Requested download is not authorized for multitracker use.'}))
+
+ return None
+
+
+ def add_data(self, infohash, event, ip, paramslist):
+ peers = self.downloads.setdefault(infohash, {})
+ ts = self.times.setdefault(infohash, {})
+ self.completed.setdefault(infohash, 0)
+ self.seedcount.setdefault(infohash, 0)
+
+ def params(key, default = None, l = paramslist):
+ if l.has_key(key):
+ return l[key][0]
+ return default
+
+ myid = params('peer_id','')
+ if len(myid) != 20:
+ raise ValueError, 'id not of length 20'
+ if event not in ['started', 'completed', 'stopped', 'snooped', None]:
+ raise ValueError, 'invalid event'
+ port = params('cryptoport')
+ if port is None:
+ port = params('port','')
+ port = long(port)
+ if port < 0 or port > 65535:
+ raise ValueError, 'invalid port'
+ left = long(params('left',''))
+ if left < 0:
+ raise ValueError, 'invalid amount left'
+ uploaded = long(params('uploaded',''))
+ downloaded = long(params('downloaded',''))
+ if params('supportcrypto'):
+ supportcrypto = 1
+ try:
+ s = int(params['requirecrypto'])
+ chr(s)
+ except:
+ s = 0
+ requirecrypto = s
+ else:
+ supportcrypto = 0
+ requirecrypto = 0
+
+ peer = peers.get(myid)
+ islocal = local_IPs.includes(ip)
+ mykey = params('key')
+ if peer:
+ auth = peer.get('key',-1) == mykey or peer.get('ip') == ip
+
+ gip = params('ip')
+ if is_valid_ip(gip) and (islocal or not self.only_local_override_ip):
+ ip1 = gip
+ else:
+ ip1 = ip
+
+ if params('numwant') is not None:
+ rsize = min(int(params('numwant')),self.response_size)
+ else:
+ rsize = self.response_size
+
+ if event == 'stopped':
+ if peer:
+ if auth:
+ self.delete_peer(infohash,myid)
+
+ elif not peer:
+ ts[myid] = clock()
+ peer = { 'ip': ip, 'port': port, 'left': left,
+ 'supportcrypto': supportcrypto,
+ 'requirecrypto': requirecrypto }
+ if mykey:
+ peer['key'] = mykey
+ if gip:
+ peer['given ip'] = gip
+ if port:
+ if not self.natcheck or islocal:
+ peer['nat'] = 0
+ self.natcheckOK(infohash,myid,ip1,port,peer)
+ else:
+ NatCheck(self.connectback_result,infohash,myid,ip1,port,
+ self.rawserver,encrypted=requirecrypto)
+ else:
+ peer['nat'] = 2**30
+ if event == 'completed':
+ self.completed[infohash] += 1
+ if not left:
+ self.seedcount[infohash] += 1
+
+ peers[myid] = peer
+
+ else:
+ if not auth:
+ return rsize # return w/o changing stats
+
+ ts[myid] = clock()
+ if not left and peer['left']:
+ self.completed[infohash] += 1
+ self.seedcount[infohash] += 1
+ if not peer.get('nat', -1):
+ for bc in self.becache[infohash]:
+ if bc[0].has_key(myid):
+ bc[1][myid] = bc[0][myid]
+ del bc[0][myid]
+ elif left and not peer['left']:
+ self.completed[infohash] -= 1
+ self.seedcount[infohash] -= 1
+ if not peer.get('nat', -1):
+ for bc in self.becache[infohash]:
+ if bc[1].has_key(myid):
+ bc[0][myid] = bc[1][myid]
+ del bc[1][myid]
+ peer['left'] = left
+
+ if port:
+ recheck = False
+ if ip != peer['ip']:
+ peer['ip'] = ip
+ recheck = True
+ if gip != peer.get('given ip'):
+ if gip:
+ peer['given ip'] = gip
+ elif peer.has_key('given ip'):
+ del peer['given ip']
+ recheck = True
+
+ natted = peer.get('nat', -1)
+ if recheck:
+ if natted == 0:
+ l = self.becache[infohash]
+ y = not peer['left']
+ for x in l:
+ del x[y][myid]
+ if natted >= 0:
+ del peer['nat'] # restart NAT testing
+ if natted and natted < self.natcheck:
+ recheck = True
+
+ if recheck:
+ if not self.natcheck or islocal:
+ peer['nat'] = 0
+ self.natcheckOK(infohash,myid,ip1,port,peer)
+ else:
+ NatCheck(self.connectback_result,infohash,myid,ip1,port,
+ self.rawserver,encrypted=requirecrypto)
+
+ return rsize
+
+
+ def peerlist(self, infohash, stopped, tracker, is_seed,
+ return_type, rsize, supportcrypto):
+ data = {} # return data
+ seeds = self.seedcount[infohash]
+ data['complete'] = seeds
+ data['incomplete'] = len(self.downloads[infohash]) - seeds
+
+ if ( self.config['allowed_controls']
+ and self.allowed[infohash].has_key('warning message') ):
+ data['warning message'] = self.allowed[infohash]['warning message']
+
+ if tracker:
+ data['interval'] = self.config['multitracker_reannounce_interval']
+ if not rsize:
+ return data
+ cache = self.cached_t.setdefault(infohash, None)
+ if ( not cache or len(cache[1]) < rsize
+ or cache[0] + self.config['min_time_between_cache_refreshes'] < clock() ):
+ bc = self.becache.setdefault(infohash,self.cache_default)
+ cache = [ clock(), bc[0][0].values() + bc[0][1].values() ]
+ self.cached_t[infohash] = cache
+ shuffle(cache[1])
+ cache = cache[1]
+
+ data['peers'] = cache[-rsize:]
+ del cache[-rsize:]
+ return data
+
+ data['interval'] = self.reannounce_interval
+ if stopped or not rsize: # save some bandwidth
+ data['peers'] = []
+ return data
+
+ bc = self.becache.setdefault(infohash,self.cache_default)
+ len_l = len(bc[2][0])
+ len_s = len(bc[2][1])
+ if not (len_l+len_s): # caches are empty!
+ data['peers'] = []
+ return data
+ l_get_size = int(float(rsize)*(len_l)/(len_l+len_s))
+ cache = self.cached.setdefault(infohash,[None,None,None])[return_type]
+ if cache and ( not cache[1]
+ or (is_seed and len(cache[1]) < rsize)
+ or len(cache[1]) < l_get_size
+ or cache[0]+self.config['min_time_between_cache_refreshes'] < self.cachetime ):
+ cache = None
+ if not cache:
+ peers = self.downloads[infohash]
+ if self.config['compact_reqd']:
+ vv = ([],[],[])
+ else:
+ vv = ([],[],[],[],[])
+ for key, ip, port in self.t2tlist.harvest(infohash): # empty if disabled
+ if not peers.has_key(key):
+ cp = compact_peer_info(ip, port)
+ vv[0].append(cp)
+ vv[2].append((cp,'\x00'))
+ if not self.config['compact_reqd']:
+ vv[3].append({'ip': ip, 'port': port, 'peer id': key})
+ vv[4].append({'ip': ip, 'port': port})
+ cache = [ self.cachetime,
+ bc[return_type][0].values()+vv[return_type],
+ bc[return_type][1].values() ]
+ shuffle(cache[1])
+ shuffle(cache[2])
+ self.cached[infohash][return_type] = cache
+ for rr in xrange(len(self.cached[infohash])):
+ if rr != return_type:
+ try:
+ self.cached[infohash][rr][1].extend(vv[rr])
+ except:
+ pass
+ if len(cache[1]) < l_get_size:
+ peerdata = cache[1]
+ if not is_seed:
+ peerdata.extend(cache[2])
+ cache[1] = []
+ cache[2] = []
+ else:
+ if not is_seed:
+ peerdata = cache[2][l_get_size-rsize:]
+ del cache[2][l_get_size-rsize:]
+ rsize -= len(peerdata)
+ else:
+ peerdata = []
+ if rsize:
+ peerdata.extend(cache[1][-rsize:])
+ del cache[1][-rsize:]
+ if return_type == 0:
+ data['peers'] = ''.join(peerdata)
+ elif return_type == 1:
+ data['crypto_flags'] = "0x01"*len(peerdata)
+ data['peers'] = ''.join(peerdata)
+ elif return_type == 2:
+ data['crypto_flags'] = ''.join([p[1] for p in peerdata])
+ data['peers'] = ''.join([p[0] for p in peerdata])
+ else:
+ data['peers'] = peerdata
+ return data
+
+
+ def get(self, connection, path, headers):
+ real_ip = connection.get_ip()
+ ip = real_ip
+ if is_ipv4(ip):
+ ipv4 = True
+ else:
+ try:
+ ip = ipv6_to_ipv4(ip)
+ ipv4 = True
+ except ValueError:
+ ipv4 = False
+
+ if ( (self.allowed_IPs and not self.allowed_IPs.includes(ip))
+ or (self.banned_IPs and self.banned_IPs.includes(ip)) ):
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason':
+ 'your IP is not allowed on this tracker'}))
+
+ nip = get_forwarded_ip(headers)
+ if nip and not self.only_local_override_ip:
+ ip = nip
+ try:
+ ip = to_ipv4(ip)
+ ipv4 = True
+ except ValueError:
+ ipv4 = False
+
+ paramslist = {}
+ def params(key, default = None, l = paramslist):
+ if l.has_key(key):
+ return l[key][0]
+ return default
+
+ try:
+ (scheme, netloc, path, pars, query, fragment) = urlparse(path)
+ if self.uq_broken == 1:
+ path = path.replace('+',' ')
+ query = query.replace('+',' ')
+ path = unquote(path)[1:]
+ for s in query.split('&'):
+ if s:
+ i = s.index('=')
+ kw = unquote(s[:i])
+ paramslist.setdefault(kw, [])
+ paramslist[kw] += [unquote(s[i+1:])]
+
+ if path == '' or path == 'index.html':
+ return self.get_infopage()
+ if (path == 'file'):
+ return self.get_file(params('info_hash'))
+ if path == 'favicon.ico' and self.favicon is not None:
+ return (200, 'OK', {'Content-Type' : 'image/x-icon'}, self.favicon)
+
+ # automated access from here on
+
+ if path in ('scrape', 'scrape.php', 'tracker.php/scrape'):
+ return self.get_scrape(paramslist)
+
+ if not path in ('announce', 'announce.php', 'tracker.php/announce'):
+ return (404, 'Not Found', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, alas)
+
+ # main tracker function
+
+ filtered = self.Filter.check(real_ip, paramslist, headers)
+ if filtered:
+ return (400, 'Not Authorized', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'failure reason': filtered}))
+
+ infohash = params('info_hash')
+ if not infohash:
+ raise ValueError, 'no info hash'
+
+ notallowed = self.check_allowed(infohash, paramslist)
+ if notallowed:
+ return notallowed
+
+ event = params('event')
+
+ rsize = self.add_data(infohash, event, ip, paramslist)
+
+ except ValueError, e:
+ return (400, 'Bad Request', {'Content-Type': 'text/plain'},
+ 'you sent me garbage - ' + str(e))
+
+ if self.aggregate_forward and not paramslist.has_key('tracker'):
+ self.aggregate_senddata(query)
+
+ if self.is_aggregator: # don't return peer data here
+ return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'},
+ bencode({'response': 'OK'}))
+
+ if params('compact') and ipv4:
+ if params('requirecrypto'):
+ return_type = 1
+ elif params('supportcrypto'):
+ return_type = 2
+ else:
+ return_type = 0
+ elif self.config['compact_reqd'] and ipv4:
+ return (400, 'Bad Request', {'Content-Type': 'text/plain'},
+ 'your client is outdated, please upgrade')
+ elif params('no_peer_id'):
+ return_type = 4
+ else:
+ return_type = 3
+
+ data = self.peerlist(infohash, event=='stopped',
+ params('tracker'), not params('left'),
+ return_type, rsize, params('supportcrypto'))
+
+ if paramslist.has_key('scrape'): # deprecated
+ data['scrape'] = self.scrapedata(infohash, False)
+
+ if self.dedicated_seed_id:
+ if params('seed_id') == self.dedicated_seed_id and params('left') == 0:
+ self.is_seeded[infohash] = True
+ if params('check_seeded') and self.is_seeded.get(infohash):
+ data['seeded'] = 1
+
+ return (200, 'OK', {'Content-Type': 'text/plain', 'Pragma': 'no-cache'}, bencode(data))
+
+
+ def natcheckOK(self, infohash, peerid, ip, port, peer):
+ seed = not peer['left']
+ bc = self.becache.setdefault(infohash,self.cache_default)
+ cp = compact_peer_info(ip, port)
+ reqc = peer['requirecrypto']
+ bc[2][seed][peerid] = (cp,chr(reqc))
+ if peer['supportcrypto']:
+ bc[1][seed][peerid] = cp
+ if not reqc:
+ bc[0][seed][peerid] = cp
+ if not self.config['compact_reqd']:
+ bc[3][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port,
+ 'peer id': peerid}))
+ bc[4][seed][peerid] = Bencached(bencode({'ip': ip, 'port': port}))
+
+
+ def natchecklog(self, peerid, ip, port, result):
+ year, month, day, hour, minute, second, a, b, c = localtime(time())
+ print '%s - %s [%02d/%3s/%04d:%02d:%02d:%02d] "!natcheck-%s:%i" %i 0 - -' % (
+ ip, quote(peerid), day, months[month], year, hour, minute, second,
+ ip, port, result)
+
+ def connectback_result(self, result, downloadid, peerid, ip, port):
+ record = self.downloads.get(downloadid,{}).get(peerid)
+ if ( record is None
+ or (record['ip'] != ip and record.get('given ip') != ip)
+ or record['port'] != port ):
+ if self.config['log_nat_checks']:
+ self.natchecklog(peerid, ip, port, 404)
+ return
+ if self.config['log_nat_checks']:
+ if result:
+ x = 200
+ else:
+ x = 503
+ self.natchecklog(peerid, ip, port, x)
+ if not record.has_key('nat'):
+ record['nat'] = int(not result)
+ if result:
+ self.natcheckOK(downloadid,peerid,ip,port,record)
+ elif result and record['nat']:
+ record['nat'] = 0
+ self.natcheckOK(downloadid,peerid,ip,port,record)
+ elif not result:
+ record['nat'] += 1
+
+
+ def remove_from_state(self, *l):
+ for s in l:
+ try:
+ del self.state[s]
+ except:
+ pass
+
+ def save_state(self):
+ self.rawserver.add_task(self.save_state, self.save_dfile_interval)
+ h = open(self.dfile, 'wb')
+ h.write(bencode(self.state))
+ h.close()
+
+
+ def parse_allowed(self):
+ self.rawserver.add_task(self.parse_allowed, self.parse_dir_interval)
+
+ if self.config['allowed_dir']:
+ r = parsedir( self.config['allowed_dir'], self.allowed,
+ self.allowed_dir_files, self.allowed_dir_blocked,
+ [".dtorrent"] )
+ ( self.allowed, self.allowed_dir_files, self.allowed_dir_blocked,
+ added, garbage2 ) = r
+
+ self.state['allowed'] = self.allowed
+ self.state['allowed_dir_files'] = self.allowed_dir_files
+
+ self.t2tlist.parse(self.allowed)
+
+ else:
+ f = self.config['allowed_list']
+ if self.allowed_list_mtime == os.path.getmtime(f):
+ return
+ try:
+ r = parsetorrentlist(f, self.allowed)
+ (self.allowed, added, garbage2) = r
+ self.state['allowed_list'] = self.allowed
+ except (IOError, OSError):
+ print '**warning** unable to read allowed torrent list'
+ return
+ self.allowed_list_mtime = os.path.getmtime(f)
+
+ for infohash in added.keys():
+ self.downloads.setdefault(infohash, {})
+ self.completed.setdefault(infohash, 0)
+ self.seedcount.setdefault(infohash, 0)
+
+
+ def read_ip_lists(self):
+ self.rawserver.add_task(self.read_ip_lists,self.parse_dir_interval)
+
+ f = self.config['allowed_ips']
+ if f and self.allowed_ip_mtime != os.path.getmtime(f):
+ self.allowed_IPs = IP_List()
+ try:
+ self.allowed_IPs.read_fieldlist(f)
+ self.allowed_ip_mtime = os.path.getmtime(f)
+ except (IOError, OSError):
+ print '**warning** unable to read allowed_IP list'
+
+ f = self.config['banned_ips']
+ if f and self.banned_ip_mtime != os.path.getmtime(f):
+ self.banned_IPs = IP_Range_List()
+ try:
+ self.banned_IPs.read_rangelist(f)
+ self.banned_ip_mtime = os.path.getmtime(f)
+ except (IOError, OSError):
+ print '**warning** unable to read banned_IP list'
+
+
+ def delete_peer(self, infohash, peerid):
+ dls = self.downloads[infohash]
+ peer = dls[peerid]
+ if not peer['left']:
+ self.seedcount[infohash] -= 1
+ if not peer.get('nat',-1):
+ l = self.becache[infohash]
+ y = not peer['left']
+ for x in l:
+ if x[y].has_key(peerid):
+ del x[y][peerid]
+ del self.times[infohash][peerid]
+ del dls[peerid]
+
+ def expire_downloaders(self):
+ for x in self.times.keys():
+ for myid, t in self.times[x].items():
+ if t < self.prevtime:
+ self.delete_peer(x,myid)
+ self.prevtime = clock()
+ if (self.keep_dead != 1):
+ for key, value in self.downloads.items():
+ if len(value) == 0 and (
+ self.allowed is None or not self.allowed.has_key(key) ):
+ del self.times[key]
+ del self.downloads[key]
+ del self.seedcount[key]
+ self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
+
+
+def size_format(s):
+ if (s < 1024):
+ r = str(s) + 'B'
+ elif (s < 1048576):
+ r = str(int(s/1024)) + 'KiB'
+ elif (s < 1073741824L):
+ r = str(int(s/1048576)) + 'MiB'
+ elif (s < 1099511627776L):
+ r = str(int((s/1073741824.0)*100.0)/100.0) + 'GiB'
+ else:
+ r = str(int((s/1099511627776.0)*100.0)/100.0) + 'TiB'
+ return(r)
+
Modified: debtorrent/branches/http-listen/DebTorrent/RawServer.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/http-listen/DebTorrent/RawServer.py?rev=46&op=diff
==============================================================================
--- debtorrent/branches/http-listen/DebTorrent/RawServer.py (original)
+++ debtorrent/branches/http-listen/DebTorrent/RawServer.py Thu May 10 06:41:07 2007
@@ -194,5 +194,8 @@
if not kbint: # don't report here if it's a keyboard interrupt
self.errorfunc(data.getvalue())
+ def set_handler(self, handler, port = None):
+ self.sockethandler.set_handler(handler, port)
+
def shutdown(self):
self.sockethandler.shutdown()
Modified: debtorrent/branches/http-listen/DebTorrent/SocketHandler.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/http-listen/DebTorrent/SocketHandler.py?rev=46&op=diff
==============================================================================
--- debtorrent/branches/http-listen/DebTorrent/SocketHandler.py (original)
+++ debtorrent/branches/http-listen/DebTorrent/SocketHandler.py Thu May 10 06:41:07 2007
@@ -135,6 +135,9 @@
self.max_connects = 1000
self.port_forwarded = None
self.servers = {}
+ self.interfaces = []
+ self.ports = []
+ self.handlers = {}
def scan_for_timeouts(self):
t = clock() - self.timeout
@@ -149,8 +152,9 @@
def bind(self, port, bind = '', reuse = False, ipv6_socket_style = 1, upnp = 0):
port = int(port)
addrinfos = []
- self.servers = {}
- self.interfaces = []
+ # Don't reinitialize to allow multiple binds
+ newservers = {}
+ newinterfaces = []
# if bind != "" thread it as a comma seperated list and bind to all
# addresses (can be ips or hostnames) else bind to default ipv6 and
# ipv4 address
@@ -178,34 +182,39 @@
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.setblocking(0)
server.bind(addrinfo[4])
- self.servers[server.fileno()] = server
+ newservers[server.fileno()] = server
if bind:
- self.interfaces.append(server.getsockname()[0])
+ newinterfaces.append(server.getsockname()[0])
server.listen(64)
self.poll.register(server, POLLIN)
except socket.error, e:
- for server in self.servers.values():
+ for server in newservers.values():
try:
server.close()
except:
pass
- if self.ipv6_enable and ipv6_socket_style == 0 and self.servers:
+ if self.ipv6_enable and ipv6_socket_style == 0 and newservers:
raise socket.error('blocked port (may require ipv6_binds_v4 to be set)')
raise socket.error(str(e))
- if not self.servers:
+ if not newservers:
raise socket.error('unable to open server port')
if upnp:
if not UPnP_open_port(port):
- for server in self.servers.values():
+ for server in newservers.values():
try:
server.close()
except:
pass
- self.servers = None
- self.interfaces = None
+ newservers = None
+ newinterfaces = None
raise socket.error(UPnP_ERROR)
self.port_forwarded = port
- self.port = port
+ self.ports.append(port)
+ # Save the newly created items
+ for key,value in newservers.items():
+ self.servers[key] = value
+ for item in newinterfaces:
+ self.interfaces.append(item)
def find_and_bind(self, minport, maxport, bind = '', reuse = False,
ipv6_socket_style = 1, upnp = 0, randomizer = False):
@@ -231,8 +240,11 @@
raise socket.error(str(e))
- def set_handler(self, handler):
- self.handler = handler
+ def set_handler(self, handler, port = None):
+ if port is None:
+ self.handler = handler
+ else:
+ self.handlers[port] = handler
def start_connection_raw(self, dns, socktype = socket.AF_INET, handler = None):
@@ -296,12 +308,14 @@
print "lost server socket"
elif len(self.single_sockets) < self.max_connects:
try:
+ port = s.getsockname()[1]
+ handler = self.handlers.get(port, self.handler)
newsock, addr = s.accept()
newsock.setblocking(0)
- nss = SingleSocket(self, newsock, self.handler)
+ nss = SingleSocket(self, newsock, handler)
self.single_sockets[newsock.fileno()] = nss
self.poll.register(newsock, POLLIN)
- self.handler.external_connection_made(nss)
+ handler.external_connection_made(nss)
except socket.error:
self._sleep()
else:
@@ -358,7 +372,7 @@
def get_stats(self):
return { 'interfaces': self.interfaces,
- 'port': self.port,
+ 'port': self.ports,
'upnp': self.port_forwarded is not None }
Modified: debtorrent/branches/http-listen/DebTorrent/download_bt1.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/http-listen/DebTorrent/download_bt1.py?rev=46&op=diff
==============================================================================
--- debtorrent/branches/http-listen/DebTorrent/download_bt1.py (original)
+++ debtorrent/branches/http-listen/DebTorrent/download_bt1.py Thu May 10 06:41:07 2007
@@ -73,6 +73,9 @@
('maxport', 60000, 'maximum port to listen on'),
('random_port', 1, 'whether to choose randomly inside the port range ' +
'instead of counting up linearly'),
+ ('port', 9988, 'port to listen for apt on'),
+ ('min_time_between_log_flushes', 3.0,
+ 'minimum time it must have been since the last flush to do another one'),
('responsefile', '',
'file the server response was stored in, alternative to url'),
('url', '',
Modified: debtorrent/branches/http-listen/DebTorrent/launchmanycore.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/http-listen/DebTorrent/launchmanycore.py?rev=46&op=diff
==============================================================================
--- debtorrent/branches/http-listen/DebTorrent/launchmanycore.py (original)
+++ debtorrent/branches/http-listen/DebTorrent/launchmanycore.py Thu May 10 06:41:07 2007
@@ -30,6 +30,8 @@
from __init__ import createPeerID, mapbase64, version
from cStringIO import StringIO
from traceback import print_exc
+from DebTorrent.BT1.AptListener import AptListener
+from DebTorrent.HTTPHandler import HTTPHandler
try:
True
@@ -188,6 +190,13 @@
self.failed("Couldn't listen - " + str(e))
return
+ self.aptlistener = AptListener(config, self.rawserver)
+ self.rawserver.bind(config['port'], config['bind'],
+ reuse = True, ipv6_socket_style = config['ipv6_binds_v4'])
+ self.rawserver.set_handler(HTTPHandler(self.aptlistener.get,
+ config['min_time_between_log_flushes']),
+ config['port'])
+
self.ratelimiter = RateLimiter(self.rawserver.add_task,
config['upload_unit_size'])
self.ratelimiter.set_upload_rate(config['max_upload_rate'])
Modified: debtorrent/branches/http-listen/btdownloadheadless.py
URL: http://svn.debian.org/wsvn/debtorrent/debtorrent/branches/http-listen/btdownloadheadless.py?rev=46&op=diff
==============================================================================
--- debtorrent/branches/http-listen/btdownloadheadless.py (original)
+++ debtorrent/branches/http-listen/btdownloadheadless.py Thu May 10 06:41:07 2007
@@ -30,6 +30,8 @@
from DebTorrent.clock import clock
from DebTorrent import createPeerID, version
from DebTorrent.ConfigDir import ConfigDir
+from DebTorrent.BT1.AptListener import AptListener
+from DebTorrent.HTTPHandler import HTTPHandler
assert sys.version >= '2', "Install Python 2.0 or greater"
try:
@@ -188,6 +190,13 @@
h.failed()
return
+ aptlistener = AptListener(config, rawserver)
+ rawserver.bind(config['port'], config['bind'],
+ reuse = True, ipv6_socket_style = config['ipv6_binds_v4'])
+ rawserver.set_handler(HTTPHandler(aptlistener.get,
+ config['min_time_between_log_flushes']),
+ config['port'])
+
response = get_response(config['responsefile'], config['url'], h.error)
if not response:
break
More information about the Debtorrent-commits
mailing list