[apt-proxy-devel] r613 - in trunk: . apt_proxy apt_proxy/test
debian/po doc doc/po
Chris Halls
halls at costa.debian.org
Thu Aug 3 23:54:49 UTC 2006
Author: halls
Date: Thu Aug 3 23:54:46 2006
New Revision: 613
Added:
trunk/apt_proxy/cache.py
trunk/apt_proxy/fetchers.py
trunk/apt_proxy/test/test_cache.py
trunk/apt_proxy/test/test_fetchers.py
trunk/apt_proxy/test/test_requests.py
Removed:
trunk/debian/TODO
Modified:
trunk/apt_proxy/apt_proxy.py
trunk/apt_proxy/apt_proxy_conf.py
trunk/apt_proxy/misc.py
trunk/apt_proxy/packages.py
trunk/apt_proxy/test/test_apt_proxy.py
trunk/apt_proxy/test/test_config.py
trunk/apt_proxy/test/test_packages.py
trunk/debian/changelog
trunk/debian/control
trunk/debian/po/cs.po
trunk/debian/po/da.po
trunk/debian/po/fr.po
trunk/debian/po/nl.po
trunk/debian/po/vi.po
trunk/debian/postinst
trunk/debian/rules
trunk/doc/TODO
trunk/doc/apt-proxy-import.8.inc
trunk/doc/apt-proxy.8
trunk/doc/apt-proxy.conf
trunk/doc/apt-proxy.conf.5
trunk/doc/po/apt-proxy.pot
trunk/doc/po/fr.po
trunk/doc/po4a.cfg
trunk/runtests
Log:
Merge branch people/halls/rework to trunk
Modified: trunk/apt_proxy/apt_proxy.py
==============================================================================
--- trunk/apt_proxy/apt_proxy.py (original)
+++ trunk/apt_proxy/apt_proxy.py Thu Aug 3 23:54:46 2006
@@ -14,21 +14,19 @@
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-from twisted.internet import reactor, defer, abstract, protocol
-from twisted.protocols import ftp, basic
-
import os, stat, signal, fcntl, exceptions
from os.path import dirname, basename
-import tempfile
-import glob
-import re
-import urlparse
-import time
-import string
-import packages
+import tempfile, glob, re, urlparse, time
+from twisted.internet import reactor
from twisted.python.failure import Failure
+from twisted.internet import error, protocol
+from twisted.web import http
+
import memleak
-from twisted.internet import error
+import fetchers, cache, packages
+from misc import log, MirrorRecycler
+import twisted_compat
+
#from posixfile import SEEK_SET, SEEK_CUR, SEEK_END
#since posixfile is considered obsolete I'll define the SEEK_* constants
#myself.
@@ -38,1173 +36,8 @@
from types import *
-#sibling imports
-import misc
-log = misc.log
-
-from twisted_compat import compat
-from twisted_compat import http
-
status_dir = '.apt-proxy'
-class FileType:
- """
- This is just a way to distinguish between different filetypes.
-
- self.regex: regular expression that files of this type should
- match. It could probably be replaced with something simpler,
- but... o well, it works.
-
- self.contype: mime string for the content-type http header.
-
- mutable: do the contents of this file ever change? Files such as
- .deb and .dsc are never changed once they are created.
-
- """
- def __init__ (self, regex, contype, mutable):
- self.regex = regex
- self.contype = contype
- self.mutable = mutable
-
- def check (self, name):
- "Returns true if name is of this filetype"
- if self.regex.search(name):
- return 1
- else:
- return 0
-
-# Set up the list of filetypes that we are prepared to deal with.
-# If it is not in this list, then we will ignore the file and
-# return an error.
-filetypes = (
- FileType(re.compile(r"\.deb$"), "application/dpkg", 0),
- FileType(re.compile(r"\.udeb$"), "application/dpkg", 0),
- FileType(re.compile(r"\.tar\.gz$"), "application/x-gtar", 0),
- FileType(re.compile(r"\.dsc$"),"text/plain", 0),
- FileType(re.compile(r"\.diff\.gz$"), "application/x-gzip", 0),
- FileType(re.compile(r"\.gz$"), "application/x-gzip", 1),
- FileType(re.compile(r"\.bin$"), "application/octet-stream", 0),
- FileType(re.compile(r"\.tgz$"), "application/x-gtar", 0),
- FileType(re.compile(r"\.txt$"), "application/plain-text", 1),
- FileType(re.compile(r"\.html$"), "application/text-html", 1),
-
- FileType(re.compile(r"/(Packages|Release(\.gpg)?|Sources|Contents-.*)"
- r"(\.(gz|bz2))?$"),
- "text/plain", 1),
-
- FileType(re.compile(r"\.rpm$"), "application/rpm", 0),
-
- FileType(re.compile(r"/(pkglist|release|srclist)(\.(\w|-)+)?"
- r"(\.(gz|bz2))?$"),
- "text/plain", 1),
- )
-
-class FileVerifier(protocol.ProcessProtocol):
- """
- Verifies the integrity of a file by running an external
- command.
-
- self.deferred: a deferred that will be triggered when the command
- completes, or if a timeout occurs.
-
- Sample:
-
- verifier = FileVerifier(self)
- verifier.deferred.addCallbacks(callback_if_ok, callback_if_fail)
-
- then either callback_if_ok or callback_if_fail will be called
- when the subprocess finishes execution.
-
- Checkout twisted.internet.defer.Deferred on how to use self.deferred
-
- """
- def __init__(self, request):
- self.factory = request.factory
- self.deferred = defer.Deferred() # Deferred that passes status back
- self.path = request.local_file
-
- if re.search(r"\.deb$", self.path):
- exe = '/usr/bin/dpkg'
- args = (exe, '--fsys-tarfile', self.path)
- elif re.search(r"\.gz$", self.path):
- exe = '/bin/gunzip'
- args = (exe, '-t', '-v', self.path)
- elif re.search(r"\.bz2$", self.path):
- exe = '/usr/bin/bunzip2'
- args = (exe, '--test', self.path)
- else:
- # Unknown file, just check it is not 0 size
- try:
- filesize = os.stat(self.path)[stat.ST_SIZE]
- except:
- filesize = 0
-
- if(os.stat(self.path)[stat.ST_SIZE]) < 1:
- log.debug('Verification failed for ' + self.path)
- self.failed()
- else:
- log.debug('Verification skipped for ' + self.path)
- self.deferred.callback(None)
- return
-
- log.debug("starting verification: " + exe + " " + str(args))
- self.nullhandle = open("/dev/null", "w")
- self.process = reactor.spawnProcess(self, exe, args, childFDs = { 0:"w", 1:self.nullhandle.fileno(), 2:"r" })
- self.laterID = reactor.callLater(self.factory.config.timeout, self.timedout)
-
- def connectionMade(self):
- self.data = ''
-
- def outReceived(self, data):
- #we only care about errors
- pass
-
- def errReceived(self, data):
- self.data = self.data + data
-
- def failed(self):
- log.debug("verification failed: %s"%(self.path), 'verify', 1)
- os.unlink(self.path)
- self.deferred.errback(None)
-
- def timedout(self):
- """
- this should not happen, but if we timeout, we pretend that the
- operation failed.
- """
- self.laterID=None
- log.debug("Process Timedout:",'verify')
- self.failed()
-
- def processEnded(self, reason=None):
- """
- This get's automatically called when the process finishes, we check
- the status and report through the Deferred.
- """
- __pychecker__ = 'unusednames=reason'
- #log.debug("Process Status: %d" %(self.process.status),'verify')
- #log.debug(self.data, 'verify')
- if self.laterID:
- self.laterID.cancel()
- if self.process.status == 0:
- self.deferred.callback(None)
- else:
- self.failed()
-
-def findFileType(name):
- "Look for the FileType of 'name'"
- for type in filetypes:
- if type.check(name):
- return type
- return None
-
-class TempFile (file):
- def __init__(self, mode='w+b', bufsize=-1):
- (fd, name) = tempfile.mkstemp('.apt-proxy')
- os.close(fd)
- file.__init__(self, name, mode, bufsize)
- os.unlink(name)
- def append(self, data):
- self.seek(0, SEEK_END)
- self.write(data)
- def size(self):
- return self.tell()
- def read_from(self, size=-1, start=None):
- if start != None:
- self.seek(start, SEEK_SET)
- data = file.read(self, size)
- return data
-
-
-class Fetcher:
- """
- This is the base class for all Fetcher*, it tries to hold as much
- common code as posible.
-
- Subclasses of this class are the ones responsible for contacting
- the backend servers and fetching the actual data.
- """
- gzip_convert = re.compile(r"/Packages$")
- post_convert = re.compile(r"/Packages.gz$")
- status_code = http.OK
- status_message = None
- requests = None
- request = None
- length = None
- transport = None
-
- def insert_request(self, request):
- """
- Request should be served through this Fetcher because it asked for
- the same uri that we are waiting for.
-
- We also have to get it up to date, give it all received data, send it
- the appropriate headers and set the response code.
- """
- if request in self.requests:
- raise RuntimeError, \
- 'this request is already assigned to this Fetcher'
- self.requests.append(request)
- request.apFetcher = self
- if (self.request):
- self.update_request(request)
-
- def update_request(self, request):
- """
- get a new request up to date
- """
- request.local_mtime = self.request.local_mtime
- request.local_size = self.request.local_size
- if(self.status_code != None):
- request.setResponseCode(self.status_code, self.status_message)
- for name, value in self.request.headers.items():
- request.setHeader(name, value)
- if self.transfered.size() != 0:
- request.write(self.transfered.read_from(start=0))
-
- def remove_request(self, request):
- """
- Request should NOT be served through this Fetcher, the client
- probably closed the connection.
-
- If this is our last request, we may also close the connection with the
- server depending on the configuration.
-
- We keep the last request for reference even if the client closed the
- connection.
- """
- self.requests.remove(request)
- if len(self.requests) == 0:
- log.debug("Last request removed",'Fetcher')
- if not self.factory.config.complete_clientless_downloads:
- if self.transport:
- log.debug(
- "telling the transport to loseConnection",'Fetcher')
- try:
- self.transport.loseConnection()
- except KeyError:
- # Rsync fetcher already loses conneciton for us
- pass
- if hasattr(self, 'loseConnection'):
- self.loseConnection()
- else:
- self.request = self.requests[0]
- request.apFetcher = None
-
- def transfer_requests(self, fetcher):
- "Transfer all requests from self to fetcher"
- for req in self.requests:
- self.remove_request(req)
- fetcher.insert_request(req)
-
- def setResponseCode(self, code, message=None):
- "Set response code for all requests"
- #log.debug('Response code: %d - %s' % (code, message),'Fetcher')
- self.status_code = code
- self.status_message = message
- for req in self.requests:
- req.setResponseCode(code, message)
-
- def setResponseHeader(self, name, value):
- "set 'value' for header 'name' on all requests"
- for req in self.requests:
- req.setHeader(name, value)
-
- def __init__(self, request=None):
- self.requests = []
- self.transfered = TempFile()
- if(request):
- self.activate(request)
-
- def activate(self, request):
- log.debug(str(request.backend) + request.uri, 'Fetcher.activate')
- self.local_file = request.local_file
- self.local_mtime = request.local_mtime
- self.factory = request.factory
- self.request = request
- request.content.read()
-
- for req in self.requests:
- self.update_request(req)
- self.requests.append(request)
-
- request.apFetcher = self
- if self.factory.runningFetchers.has_key(request.uri):
- raise RuntimeError, 'There already is a running fetcher'
- self.factory.runningFetchers[request.uri]=self
-
- def apDataReceived(self, data):
- """
- Should be called from the subclasses when data is available for
- streaming.
-
- Keeps all transfered data in 'self.transfered' for requests which arrive
- later and to write it in the cache at the end.
-
- Note: self.length if != None is the amount of data pending to be
- received.
- """
- if self.length != None:
- self.transfered.append(data[:self.length])
- for req in self.requests:
- req.write(data[:self.length])
- else:
- self.transfered.append(data)
- for req in self.requests:
- req.write(data)
-
- def apDataEnd(self, data, saveData=True):
- """
- Called by subclasses when the data transfer is over.
-
- -caches the received data if everyting went well (if saveData=True)
- -takes care of mtime and atime
- -finishes connection with server and the requests
-
- """
- import shutil
- log.debug("Finished receiving data, status:%d saveData:%d" %(self.status_code, saveData), 'Fetcher');
- if (self.status_code == http.OK):
- if saveData:
- dir = dirname(self.local_file)
- if(not os.path.exists(dir)):
- os.makedirs(dir)
- f = open(self.local_file, "w")
- fcntl.lockf(f.fileno(), fcntl.LOCK_EX)
- f.truncate(0)
- if type(data) is StringType:
- f.write(data)
- else:
- data.seek(0, SEEK_SET)
- shutil.copyfileobj(data, f)
- f.close()
- if self.local_mtime != None:
- os.utime(self.local_file, (time.time(), self.local_mtime))
- else:
- log.debug("no local time: "+self.local_file,'Fetcher')
- os.utime(self.local_file, (time.time(), 0))
-
- self.factory.file_served(self.request.uri)
-
- #self.request.backend.get_packages_db().packages_file(self.request.uri)
-
- if self.transport:
- try:
- self.transport.loseConnection()
- except exceptions.KeyError:
- # Couldn't close connection - already closed?
- log.debug("transport.loseConnection() - "
- "connection already closed", 'Fetcher')
- pass
-
- for req in self.requests:
- req.finish()
-
- self.transfered.close()
- self.apEnd()
-
- def apEnd(self):
- """
- Called by subclasses when apDataEnd does too many things.
-
- Let's everyone know that we are not the active Fetcher for our uri.
- """
- try:
- del self.factory.runningFetchers[self.request.uri]
- except exceptions.KeyError:
- log.debug("We are not on runningFetchers!!!",'Fetcher')
- log.debug("Class is not in runningFetchers: "+str(self.__class__),
- 'Fetcher')
- if self.request:
- log.debug(' URI:' + self.request.uri, 'Fetcher')
- log.debug('Running fetchers: '
- +str(self.factory.runningFetchers),'Fetcher')
- #raise exceptions.KeyError
- for req in self.requests[:]:
- self.remove_request(req)
-
- import gc
- #Cleanup circular references
- reactor.callLater(5, gc.collect)
-
- def apEndCached(self):
- """
- A backend has indicated that this file has not changed,
- so serve the file from the disk cache
- """
- self.setResponseCode(http.OK)
- self.apEndTransfer(FetcherCachedFile)
-
- def apEndTransfer(self, fetcher_class):
- """
- Remove this Fetcher and transfer all it's requests to a new instance of
- 'fetcher_class'.
- """
- #Consider something like this:
- #req = dummyFetcher.fix_ref_request()
- #fetcher = fetcher_class()
- #dummyFetcher.transfer_requests(fetcher)
- #dummyFetcher.apEnd()
- #fetcher.activate(req)
-
- #self.setResponseCode(http.OK)
- requests = self.requests[:]
- self.apEnd() # Remove requests from this fetcher
- fetcher = None
- for req in requests:
- if (fetcher_class != FetcherCachedFile or req.serve_if_cached):
- running = req.factory.runningFetchers
- if (running.has_key(req.uri)):
- #If we have an active Fetcher just use that
- log.debug("have active Fetcher",'Fetcher')
- running[req.uri].insert_request(req)
- fetcher = running[req.uri]
- else:
- fetcher = fetcher_class(req)
- else:
- req.finish()
- return fetcher
-
- def connectionFailed(self, reason=None):
- """
- Tell our requests that the connection with the server failed.
- """
- msg = '[%s] Connection Failed: %s/%s'%(
- self.request.backend.base,
- self.request.backendServer.path, self.request.backend_uri)
-
- if reason:
- msg = '%s (%s)'%(msg, reason.getErrorMessage())
- log.debug("Connection Failed: "+str(reason), 'Fetcher')
- log.err(msg)
-
- # Look for alternative fetchers
- if not self.request.activateNextBackendServer(self):
- # No more backends, send error response back to client
- if reason.check(error.ConnectError):
- self.setResponseCode(http.SERVICE_UNAVAILABLE, "Connect Error")
- else:
- self.setResponseCode(http.SERVICE_UNAVAILABLE)
- self.apDataReceived("")
- self.apDataEnd(self.transfered, False)
- #Because of a bug in tcp.Client we may be called twice,
- #Make sure that next time nothing will happen
- #FIXME: This hack is probably not anymore pertinent.
- self.connectionFailed = lambda : log.debug('connectionFailed(2)',
- 'Fetcher','9')
-
-
-class FetcherDummy(Fetcher):
- """
- """
- gzip_convert = re.compile(r"^Nothing should match this$")
- post_convert = re.compile(r"^Nothing should match this$")
- status_code = http.INTERNAL_SERVER_ERROR
- status_message = None
-
- def insert_request(self, request):
- """
- """
- if request in self.requests:
- raise RuntimeError, \
- 'this request is already assigned to this Fetcher'
- self.requests.append(request)
- request.apFetcher = self
-
- def remove_request(self, request):
- """
- """
- #make sure that it has updated values, since the requests
- #may be cached and we need them to serve it.
- request.local_mtime = self.request.local_mtime
- request.local_size = self.request.local_size
-
- self.requests.remove(request)
- request.apFetcher = None
-
- def fix_ref_request(self):
- if self.requests != []:
- if self.request not in self.requests:
- request = self.requests[0]
- request.local_mtime = self.request.local_mtime
- request.local_size = self.request.local_size
- self.request = request
- self.remove_request(self.request)
- else:
- self.request = None
-
- return self.request
-
-class FetcherFile(Fetcher):
-
- def activate(self, request):
- Fetcher.activate(self, request)
- log.debug("FetcherFile.activate(): uri='%s' server='%s'" % (request.uri, request.backendServer.uri))
- if not request.apFetcher:
- log.debug("no request.apFetcher")
- return
-
- self.factory.file_served(request.uri)
-
- # start the transfer
- self.local_file = request.backendServer.uri[len("file:"):]+ request.uri
- if not os.path.exists(self.local_file):
- log.debug("not found: %s" % self.local_file)
- request.setResponseCode(http.NOT_FOUND)
- request.write("")
- request.finish()
- self.remove_request(request)
- Fetcher.apEnd(self)
- return
- self.local_size = os.stat(self.local_file)[stat.ST_SIZE]
-
- log.debug("Serving local file: " + self.local_file + " size:" + str(self.local_size), 'FetcherCachedFile')
- file = open(self.local_file,'rb')
- fcntl.lockf(file.fileno(), fcntl.LOCK_SH)
-
- request.setHeader("Content-Length", self.local_size)
- #request.setHeader("Last-modified",
- # http.datetimeToString(request.local_mtime))
- basic.FileSender().beginFileTransfer(file, request) \
- .addBoth(self.file_transfer_complete, request) \
- .addBoth(lambda r: file.close())
-
- # A file transfer has completed
- def file_transfer_complete(self, result, request):
- log.debug("transfer complete", 'FetcherCachedFile')
- request.finish()
- # Remove this client from request list
- self.remove_request(request)
- if len(self.requests) == 0:
- Fetcher.apEnd(self)
-
-class FetcherHttp(Fetcher, http.HTTPClient):
-
- forward_headers = [
- 'last-modified',
- 'content-length'
- ]
- log_headers = None
-
- proxy_host = None
- proxy_port = None
-
- def activate(self, request):
- Fetcher.activate(self, request)
-
- if not self.factory.config.http_proxy is '':
- (self.proxy_host, self.proxy_port) = request.factory.config.http_proxy.split(':')
-
- if not request.apFetcher:
- return
-
- class ClientFactory(protocol.ClientFactory):
- "Dummy ClientFactory to comply with current twisted API"
- #FIXME: Double check this, haggai thinks it is to blame for the
- #hangs.
- def __init__(self, instance):
- self.instance = instance
- def buildProtocol(self, addr):
- return self.instance
- def clientConnectionFailed(self, connector, reason):
- self.instance.connectionFailed(reason)
- def clientConnectionLost(self, connector, reason):
- log.debug("XXX clientConnectionLost", "http-client")
-
- if not self.proxy_host:
- reactor.connectTCP(request.backendServer.host, request.backendServer.port,
- ClientFactory(self), request.backend.config.timeout)
- else:
- reactor.connectTCP(self.proxy_host, int(self.proxy_port),
- ClientFactory(self), request.backend.config.timeout)
- def connectionMade(self):
- if not self.proxy_host:
- self.sendCommand(self.request.method, self.request.backendServer.path
- + "/" + self.request.backend_uri)
- else:
- self.sendCommand(self.request.method, "http://"
- + self.request.backendServer.host + ":" + str(self.request.backendServer.port)
- + "/" + self.request.backendServer.path
- + "/" + self.request.backend_uri)
-
- self.sendHeader('host', self.request.backendServer.host)
-
- if self.local_mtime != None:
- datetime = http.datetimeToString(self.local_mtime)
- self.sendHeader('if-modified-since', datetime)
-
- self.endHeaders()
-
- def handleStatus(self, version, code, message):
- __pychecker__ = 'unusednames=version,message'
- log.debug('handleStatus %s - %s' % (code, message), 'http_client')
- self.status_code = int(code)
-
- # Keep a record of server response even if overriden later by setReponseCode
- self.http_status = self.status_code
-
- self.setResponseCode(self.status_code)
-
- def handleHeader(self, key, value):
-
- log.debug("Received: " + key + " " + str(value))
- key = string.lower(key)
-
- if key == 'last-modified':
- self.local_mtime = http.stringToDatetime(value)
-
- if key in self.forward_headers:
- self.setResponseHeader(key, value)
-
- def handleEndHeaders(self):
- if self.http_status == http.NOT_MODIFIED:
- log.debug("NOT_MODIFIED " + str(self.status_code),'http_client')
- self.apEndCached()
-
- def rawDataReceived(self, data):
- self.apDataReceived(data)
-
- def handleResponse(self, buffer):
- if self.length == 0:
- self.setResponseCode(http.NOT_FOUND)
- # print "length: " + str(self.length), "response:", self.status_code
- if self.http_status == http.NOT_MODIFIED:
- self.apDataEnd(self.transfered, False)
- else:
- self.apDataEnd(self.transfered, True)
-
- def lineReceived(self, line):
- """
- log the line and handle it to the appropriate the base classe.
-
- The location header gave me trouble at some point, so I filter it just
- in case.
-
- Note: when running a class method directly and not from an object you
- have to give the 'self' parameter manualy.
- """
- #log.debug(line,'http_client')
- if self.log_headers == None:
- self.log_headers = line
- else:
- self.log_headers += ", " + line;
- if not re.search('^Location:', line):
- http.HTTPClient.lineReceived(self, line)
-
- def sendCommand(self, command, path):
- "log the line and handle it to the base class."
- log.debug(command + ":" + path,'http_client')
- http.HTTPClient.sendCommand(self, command, path)
-
- def endHeaders(self):
- "log and handle to the base class."
- if self.log_headers != None:
- log.debug(" Headers: " + self.log_headers, 'http_client')
- self.log_headers = None;
- http.HTTPClient.endHeaders(self)
-
- def sendHeader(self, name, value):
- "log and handle to the base class."
- log.debug(name + ":" + value,'http_client')
- http.HTTPClient.sendHeader(self, name, value)
-
-class FetcherFtp(Fetcher, protocol.Protocol):
- """
- This is the secuence here:
-
- -Start and connect the FTPClient
- -Ask for mtime
- -Ask for size
- -if couldn't get the size
- -try to get it by listing
- -get all that juicy data
-
- NOTE: Twisted's FTPClient code uses it's own timeouts here and there,
- so the timeout specified for the backend may not always be used
- """
- def activate (self, request):
- Fetcher.activate(self, request)
- if not request.apFetcher:
- return
-
- self.passive_ftp = self.request.backend.config.passive_ftp
-
- self.remote_file = (self.request.backendServer.path + "/"
- + self.request.backend_uri)
-
- from twisted.internet.protocol import ClientCreator
-
- if not request.backendServer.username:
- creator = ClientCreator(reactor, ftp.FTPClient, passive=0)
- else:
- creator = ClientCreator(reactor, ftp.FTPClient, request.backendServer.username,
- request.backendServer.password, passive=0)
- d = creator.connectTCP(request.backendServer.host, request.backendServer.port,
- request.backend.config.timeout)
- d.addCallback(self.controlConnectionMade)
- d.addErrback(self.connectionFailed)
-
- def controlConnectionMade(self, ftpclient):
- self.ftpclient = ftpclient
-
- if(self.passive_ftp):
- log.debug('Got control connection, using passive ftp', 'ftp_client')
- self.ftpclient.passive = 1
- else:
- log.debug('Got control connection, using active ftp', 'ftp_client')
- self.ftpclient.passive = 0
-
- if log.isEnabled('ftp_client'):
- self.ftpclient.debug = 1
-
- self.ftpFetchMtime()
-
- def ftpFinish(self, code, message=None):
- "Finish the transfer with code 'code'"
- self.ftpclient.quit()
- self.setResponseCode(code, message)
- self.apDataReceived("")
- self.apDataEnd(self.transfered)
-
- def ftpFinishCached(self):
- "Finish the transfer giving the requests the cached file."
- self.ftpclient.quit()
- self.apEndCached()
-
- def ftpFetchMtime(self):
- "Get the modification time from the server."
- def apFtpMtimeFinish(msgs, fetcher, fail):
- """
- Got an answer to the mtime request.
-
- Someone should check that this is timezone independent.
- """
- code = None
- if not fail:
- code, msg = msgs[0].split()
- mtime = None
- if code == '213':
- time_tuple=time.strptime(msg[:14], "%Y%m%d%H%M%S")
- #replace day light savings with -1 (current)
- time_tuple = time_tuple[:8] + (-1,)
- #correct the result to GMT
- mtime = time.mktime(time_tuple) - time.altzone
- if (fetcher.local_mtime and mtime
- and fetcher.local_mtime >= mtime):
- fetcher.ftpFinishCached()
- else:
- fetcher.local_mtime = mtime
- fetcher.ftpFetchSize()
-
- d = self.ftpclient.queueStringCommand('MDTM ' + self.remote_file)
- d.addCallbacks(apFtpMtimeFinish, apFtpMtimeFinish,
- (self, 0), None, (self, 1), None)
-
- def ftpFetchSize(self):
- "Get the size of the file from the server"
- def apFtpSizeFinish(msgs, fetcher, fail):
- code = None
- if not fail:
- code, msg = msgs[0].split()
- if code != '213':
- log.debug("SIZE FAILED",'ftp_client')
- fetcher.ftpFetchList()
- else:
- fetcher.setResponseHeader('content-length', msg)
- fetcher.ftpFetchFile()
-
- d = self.ftpclient.queueStringCommand('SIZE ' + self.remote_file)
- d.addCallbacks(apFtpSizeFinish, apFtpSizeFinish,
- (self, 0), None, (self, 1), None)
-
- def ftpFetchList(self):
- "If ftpFetchSize didn't work try to get the size with a list command."
- def apFtpListFinish(msg, filelist, fetcher, fail):
- __pychecker__ = 'unusednames=msg'
- if fail:
- fetcher.ftpFinish(http.INTERNAL_SERVER_ERROR)
- return
- if len(filelist.files)== 0:
- fetcher.ftpFinish(http.NOT_FOUND)
- return
- file = filelist.files[0]
- fetcher.setResponseHeader('content-length', file['size'])
- fetcher.ftpFetchFile()
- filelist = ftp.FTPFileListProtocol()
- d = self.ftpclient.list(self.remote_file, filelist)
- d.addCallbacks(apFtpListFinish, apFtpListFinish,
- (filelist, self, 0), None,
- (filelist, self, 1), None)
-
- def ftpFetchFile(self):
- "And finally, we ask for the file."
- def apFtpFetchFinish(msg, code, status, fetcher):
- __pychecker__ = 'unusednames=msg,status'
- fetcher.ftpFinish(code)
- log.debug('ftpFetchFile: ' + self.remote_file, 'ftp_client')
- d = self.ftpclient.retrieveFile(self.remote_file, self)
- d.addCallbacks(apFtpFetchFinish, apFtpFetchFinish,
- (http.OK, "good", self), None,
- (http.NOT_FOUND, "fail", self), None)
-
- def dataReceived(self, data):
- self.setResponseCode(http.OK)
- self.apDataReceived(data)
-
- def connectionLost(self, reason=None):
- """
- Maybe we should do some recovery here, I don't know, but the Deferred
- should be enough.
- """
- log.debug("lost connection: %s"%(reason),'ftp_client')
-
-class FetcherGzip(Fetcher, protocol.ProcessProtocol):
- """
- This is a fake Fetcher, it uses the real Fetcher from the request's
- backend via LoopbackRequest to get the data and gzip's or gunzip's as
- needed.
-
- NOTE: We use the serve_cached=0 parameter to Request.fetch so if
- it is cached it doesn't get uselessly read, we just get it from the cache.
- """
- post_convert = re.compile(r"^Should not match anything$")
- gzip_convert = post_convert
-
- exe = '/bin/gzip'
- def activate(self, request, postconverting=0):
- log.debug("FetcherGzip request:" + str(request.uri) + " postconvert:" + str(postconverting), 'gzip')
- Fetcher.activate(self, request)
- if not request.apFetcher:
- return
-
- self.args = (self.exe, '-c', '-9', '-n')
- if(log.isEnabled('gzip',9)):
- self.args += ('-v',)
-
- if request.uri[-3:] == '.gz':
- host_uri = request.uri[:-3]
- else:
- host_uri = request.uri+'.gz'
- self.args += ('-d',)
- self.host_file = self.factory.config.cache_dir + host_uri
- self.args += (self.host_file,)
-
- running = self.factory.runningFetchers
- if not postconverting or running.has_key(host_uri):
- #Make sure that the file is there
- loop = LoopbackRequest(request, self.host_transfer_done)
- loop.uri = host_uri
- loop.local_file = self.host_file
- loop.process()
- self.loop_req = loop
- loop.serve_if_cached=0
- if running.has_key(host_uri):
- #the file is on it's way, wait for it.
- running[host_uri].insert_request(loop)
- else:
- #we are not postconverting, so we need to fetch the host file.
- loop.fetch(serve_cached=0)
- else:
- #The file should be there already.
- self.loop_req = None
- self.host_transfer_done()
-
- def host_transfer_done(self):
- """
- Called by our LoopbackRequest when the real Fetcher calls
- finish() on it.
-
- If everything went well, check mtimes and only do the work if needed.
-
- If posible arrange things so the target file gets the same mtime as
- the host file.
- """
- log.debug('transfer done', 'gzip')
- if self.loop_req and self.loop_req.code != http.OK:
- self.setResponseCode(self.loop_req.code,
- self.loop_req.code_message)
- self.apDataReceived("")
- self.apDataEnd("")
- return
-
- if os.path.exists(self.host_file):
- self.local_mtime = os.stat(self.host_file)[stat.ST_MTIME]
- old_mtime = None
- if os.path.exists(self.local_file):
- old_mtime = os.stat(self.local_file)[stat.ST_MTIME]
- if self.local_mtime == old_mtime:
- self.apEndCached()
- else:
- log.debug("Starting process: " + self.exe + " " + str(self.args), 'gzip')
- self.process = reactor.spawnProcess(self, self.exe, self.args)
-
- def outReceived(self, data):
- self.setResponseCode(http.OK)
- self.apDataReceived(data)
-
- def errReceived(self, data):
- log.debug('gzip: ' + data,'gzip')
-
- def loseConnection(self):
- """
- This is a bad workaround Process.loseConnection not doing it's
- job right.
- The problem only happends when we try to finish the process
- while decompresing.
- """
- if hasattr(self, 'process') and self.process.pid:
- try:
- os.kill(self.process.pid, signal.SIGTERM)
- self.process.connectionLost()
- except exceptions.OSError, Error:
- import errno
- (Errno, Errstr) = Error
- if Errno != errno.ESRCH:
- log.debug('Passing OSError exception '+Errstr)
- raise
- else:
- log.debug('Threw away exception OSError no such process')
-
- def processEnded(self, reason=None):
- __pychecker__ = 'unusednames=reason'
- log.debug("Status: %d" %(self.process.status),'gzip')
- if self.process.status != 0:
- self.setResponseCode(http.NOT_FOUND)
-
- self.apDataReceived("")
- self.apDataEnd(self.transfered)
-
-class FetcherRsync(Fetcher, protocol.ProcessProtocol):
- """
- I frequently am not called directly, Request.fetch makes the
- arrangement for FetcherGzip to use us and gzip the result if needed.
- """
- post_convert = re.compile(r"^Should not match anything$")
- gzip_convert = re.compile(r"/Packages.gz$")
-
- "Temporary filename that rsync streams to"
- rsyncTempFile = None
-
- "Number of bytes sent to client already"
- bytes_sent = 0
-
- def activate (self, request):
- Fetcher.activate(self, request)
- if not request.apFetcher:
- return
-
- # Change /path/to/FILE -> /path/to/.FILE.* to match rsync tempfile
- self.globpattern = re.sub(r'/([^/]*)$', r'/.\1.*', self.local_file)
-
- for file in glob.glob(self.globpattern):
- log.msg('Deleting stale tempfile:' + file)
- unlink(file)
-
- uri = 'rsync://'+request.backendServer.host\
- +request.backendServer.path+'/'+request.backend_uri
- self.local_dir=re.sub(r"/[^/]*$", "", self.local_file)+'/'
-
- exe = '/usr/bin/rsync'
- if(log.isEnabled('rsync',9)):
- args = (exe, '--partial', '--progress', '--verbose', '--times',
- '--timeout', "%d"%(request.backend.config.timeout),
- uri, '.',)
- else:
- args = (exe, '--quiet', '--times', uri, '.',
- '--timeout', "%d"%(request.backend.config.timeout),
- )
- if(not os.path.exists(self.local_dir)):
- os.makedirs(self.local_dir)
- self.process = reactor.spawnProcess(self, exe, args, None,
- self.local_dir)
-
- def findRsyncTempFile(self):
- """
- Look for temporary file created by rsync during streaming
- """
- files = glob.glob(self.globpattern)
-
- if len(files)==1:
- self.rsyncTempFile = files[0]
- log.debug('tempfile: ' + self.rsyncTempFile, 'rsync_client')
- elif not files:
- # No file created yet
- pass
- else:
- log.err('found more than one tempfile, abort rsync')
- self.transport.loseConnection()
-
- def connectionMade(self):
- pass
-
- "Data received from rsync process to stdout"
- def outReceived(self, data):
- for s in string.split(data, '\n'):
- if len(s):
- log.debug('rsync: ' + s, 'rsync_client')
- #self.apDataReceived(data)
- if not self.rsyncTempFile:
- self.findRsyncTempFile()
- # Got tempfile?
- if self.rsyncTempFile:
- self.setResponseCode(http.OK)
- if self.rsyncTempFile:
- self.sendData()
-
-
- "Data received from rsync process to stderr"
- def errReceived(self, data):
- for s in string.split(data, '\n'):
- if len(s):
- log.err('rsync error: ' + s, 'rsync_client')
-
- def sendData(self):
- f = None
- if self.rsyncTempFile:
- try:
- f = open(self.rsyncTempFile, 'rb')
- except IOError:
- return
- else:
- # Tempfile has gone, stream main file
- #log.debug("sendData open dest " + str(self.bytes_sent))
- f = open(self.local_file, 'rb')
-
- if f:
- f.seek(self.bytes_sent)
- data = f.read(abstract.FileDescriptor.bufferSize)
- #log.debug("sendData got " + str(len(data)))
- f.close()
- if data:
- self.apDataReceived(data)
- self.bytes_sent = self.bytes_sent + len(data)
- reactor.callLater(0, self.sendData)
- elif not self.rsyncTempFile:
- # Finished reading final file
- #self.transport = None
- log.debug("sendData complete")
- # Tell clients, but data is already saved by rsync so don't
- # write file again
- self.apDataEnd(self.transfered, False)
-
-
- def processEnded(self, status_object):
- __pychecker__ = 'unusednames=reason'
- log.debug("Status: %d" %(status_object.value.exitCode)
- ,'rsync_client')
- self.rsyncTempFile = None
-
- # Success?
- exitcode = status_object.value.exitCode
-
- if exitcode == 0:
- # File received. Send to clients.
- self.local_mtime = os.stat(self.local_file)[stat.ST_MTIME]
- reactor.callLater(0, self.sendData)
- else:
- if exitcode == 10:
- # Host not found
- self.setResponseCode(http.INTERNAL_SERVER_ERROR)
- else:
- self.setResponseCode(http.NOT_FOUND)
-
- if not os.path.exists(self.local_file):
- try:
- os.removedirs(self.local_dir)
- except:
- pass
- self.apDataReceived("")
- self.apDataEnd(self.transfered)
-
- def loseConnection(self):
- "Kill rsync process"
- if self.transport:
- if self.transport.pid:
- log.debug("killing rsync child" +
- str(self.transport.pid), 'rsync_client')
- os.kill(self.transport.pid, signal.SIGTERM)
- #self.transport.loseConnection()
-
-
-
-class FetcherCachedFile(Fetcher):
- """
- Sends the cached file or tells the client that the file was not
- 'modified-since' if appropriate.
- """
- post_convert = re.compile(r"/Packages.gz$")
- gzip_convert = re.compile(r"^Should not match anything$")
-
- request = None
- def if_modified(self, request):
- """
- Check if the file was 'modified-since' and tell the client if it
- wasn't.
- """
- if_modified_since = request.getHeader('if-modified-since')
- if if_modified_since != None:
- if_modified_since = http.stringToDatetime(
- if_modified_since)
-
- if request.local_mtime <= if_modified_since:
- request.setResponseCode(http.NOT_MODIFIED)
- request.setHeader("Content-Length", 0)
- request.write("")
- request.finish()
- self.remove_request(request)
-
- def insert_request(self, request):
- if not request.serve_if_cached:
- request.finish()
- return
- Fetcher.insert_request(self, request)
-
- log.debug("Serving from cache for additional client: " + self.local_file + " size:" + str(self.size))
- self.start_transfer(request)
-
- def activate(self, request):
- Fetcher.activate(self, request)
- if not request.apFetcher:
- return
- self.factory.file_served(request.uri)
- self.size = request.local_size
-
- self.start_transfer(request)
-
- def start_transfer(self, request):
- self.if_modified(request)
-
- if len(self.requests) == 0:
- #we had a single request and didn't have to send it
- self.apEnd()
- return
-
- if self.size:
- log.debug("Serving from cache: " + self.local_file + " size:" + str(self.size), 'FetcherCachedFile')
- file = open(self.local_file,'rb')
- fcntl.lockf(file.fileno(), fcntl.LOCK_SH)
-
- request.setHeader("Content-Length", request.local_size)
- request.setHeader("Last-modified",
- http.datetimeToString(request.local_mtime))
- basic.FileSender().beginFileTransfer(file, request) \
- .addBoth(self.file_transfer_complete, request) \
- .addBoth(lambda r: file.close())
-# .addBoth(lambda r: request.transport.loseConnection())
- else:
- log.debug("Zero length file! " + self.local_file, 'FetcherCachedFile')
- self.file_transfer_complete(None, request)
- request.finish()
-
- # A file transfer has completed
- def file_transfer_complete(self, result, request):
- log.debug("transfer complete", 'FetcherCachedFile')
- request.finish()
- # Remove this client from request list
- self.remove_request(request)
- if len(self.requests) == 0:
- Fetcher.apEnd(self)
-
class Backend:
"""
A backend repository. There is one Backend for each [...] section
@@ -1214,25 +47,40 @@
"Sequence of BackendServers, in order of preference"
uris = []
+ "Hash of active cache entries"
+ entries = {}
+
"Packages database for this backend"
packages = None
- base = None
+ name = None
+
+ downloadQueuePerClient = True # Set to true if a download queue should be created per client
def __init__(self, factory, config):
+ log.debug("Creating Backend: " + config.name)
self.factory = factory
self.config = config # apBackendConfig configuration information
self.base = config.name # Name of backend
- self.uris=[]
+ self.uris = [] # Sequence of BackendServers, in order of preference
+
+ if self.downloadQueuePerClient:
+ self.queue = fetchers.DownloadQueuePerClient()
+ else:
+ self.queue = fetchers.DownloadQueue()
+
+ self.entries = {} # Hash of active cache entries
+ self.packages = None # Packages database for this backend
for uri in config.backends:
self.addURI(uri)
+
#self.get_packages_db().load()
def addURI(self, uri):
newBackend = BackendServer(self, uri)
self.uris.append(newBackend)
- def get_first_server(self):
+ def get_first_server(self):
"Provide first BackendServer for this Backend"
return self.uris[0]
@@ -1246,6 +94,25 @@
def __str__(self):
return '('+self.base+')'+' servers:'+str(len(self.uris))
+ def get_cache_entry(self, path):
+ """
+ Return CacheEntry for given path
+ a new object is created if it does not already exist
+ """
+ if self.entries.has_key(path):
+ log.debug("Cache entry exists: %s, %s entries" %(path,len(self.entries)))
+ return self.entries[path]
+ else:
+ log.debug("New Cache entry: "+path)
+ e = cache.CacheEntry(self, path)
+ self.entries[path] = e
+ return e
+ def entry_done(self, entry):
+ "A cache entry is finished and clients are disconnected"
+ #if self.entries.has_key(entry.path):
+ log.debug("entry_done: %s" %(entry.path), 'Backend')
+ del self.entries[entry.path]
+
def get_packages_db(self):
"Return packages parser object for the backend, creating one if necessary"
if self.packages == None:
@@ -1255,12 +122,22 @@
def get_path(self, path):
"""
'path' is the original uri of the request.
-
+
We return the path to be appended to the backend path to
request the file from the backend server
"""
return path[len(self.base)+2:]
-
+
+ def file_served(self, entry):
+ "A cache entry has served a file in this backend"
+ self.get_packages_db().file_updated(entry)
+
+ def start_download(self, entry):
+ """
+ A CacheEntry has requested that a file should be downloaded from the backend
+ """
+ self.queue.addFile(entry)
+
class BackendServer:
"""
A repository server. A BackendServer is created for each URI defined in 'backends'
@@ -1271,10 +148,10 @@
uri = None # URI of server
fetchers = {
- 'http' : FetcherHttp,
- 'ftp' : FetcherFtp,
- 'rsync': FetcherRsync,
- 'file' : FetcherFile,
+ 'http' : fetchers.HttpFetcher,
+ 'ftp' : fetchers.FtpFetcher,
+ 'rsync': fetchers.RsyncFetcher,
+ 'file' : fetchers.FileFetcher,
}
ports = {
'http' : 80,
@@ -1289,7 +166,7 @@
log.debug("Created new BackendServer: " + uri)
# hack because urlparse doesn't support rsync
- if uri[0:5] == 'rsync':
+ if uri[0:6] == 'rsync:':
uri = 'http'+uri[5:]
is_rsync=1
else:
@@ -1297,6 +174,8 @@
self.scheme, netloc, self.path, parameters, \
query, fragment = urlparse.urlparse(uri)
+ if is_rsync:
+ self.scheme = 'rsync'
if '@' in netloc:
auth = netloc[:netloc.rindex('@')]
@@ -1304,13 +183,12 @@
self.username, self.password = auth.split(':')
else:
self.username = None
+ self.password = None
if ':' in netloc:
self.host, self.port = netloc.split(':')
else:
self.host = netloc
self.port = self.ports[self.scheme]
- if is_rsync:
- self.scheme = 'rsync'
self.fetcher = self.fetchers[self.scheme]
try:
self.port = int(self.port)
@@ -1320,21 +198,23 @@
def __str__(self):
return ('(' + self.backend.base + ') ' + self.scheme + '://' +
self.host + ':' + str(self.port))
-
+
class Request(http.Request):
"""
Each new request from connected clients generates a new instance of this
class, and process() is called.
"""
- local_mtime = None
+ if_modified_since = None
local_size = None
serve_if_cached = 1
apFetcher = None
uriIndex = 0 # Index of backend URI
backend = None # Backend for this request
backendServer = None # Current server to be tried
+ cacheEntry = None # Cache entry for file requested
def __init__(self, channel, queued):
+ log.debug("New Request, queued=%s" % (queued),'Request');
self.factory=channel.factory
http.Request.__init__(self, channel, queued)
@@ -1342,298 +222,118 @@
"""
Each new request begins processing here
"""
- log.debug("Request: " + self.method + " " + self.uri);
- # Clean up URL
- self.uri = self.simplify_path(self.uri)
+ self.uri = self.clean_path(self.uri)
- self.local_file = self.factory.config.cache_dir + self.uri
- backendName = self.uri[1:].split('/')[0]
- log.debug("Request: %s %s backend=%s local_file=%s"%(self.method, self.uri, backendName, self.local_file))
+ if_modified_since = self.getHeader('if-modified-since')
+ if if_modified_since != None:
+ self.if_modified_since = http.stringToDatetime(
+ if_modified_since)
+
+ if self.uri[0] != '/':
+ log.debug("Request must include at least one '/'")
+ self.finishCode(http.FORBIDDEN, "Request must include at least one '/'")
+ return
- if self.factory.config.disable_pipelining:
- self.setHeader('Connection','close')
- self.channel.persistent = 0
+ backendName = self.uri[1:].split('/')[0]
+ log.debug("Request: %s %s backend=%s uri=%s"
+ % (self.method, self.uri, backendName, self.uri),'Request')
if self.method != 'GET':
#we currently only support GET
- log.debug("abort - method not implemented")
+ log.debug("abort - method not implemented", 'Request')
self.finishCode(http.NOT_IMPLEMENTED)
return
if re.search('/\.\./', self.uri):
- log.debug("/../ in simplified uri ("+self.uri+")")
+ log.debug("/../ in simplified uri ("+self.uri+")", 'Request')
self.finishCode(http.FORBIDDEN)
return
self.backend = self.factory.getBackend(backendName)
if self.backend is None:
- if not self.factory.config.dynamic_backends:
- log.debug("abort - non existent Backend")
- self.finishCode(http.NOT_FOUND, "NON-EXISTENT BACKEND")
- return
-
- # We are using dynamic backends so we will use the name as
- # the hostname to get the files.
- backendName = self.uri[1:].split('/')[0]
- backendServer = "http://" + backendName
- log.debug("Adding " + backendName + " backend dynamicaly")
- backendConfig = self.factory.config.addBackend(None, backendName, (backendServer,))
- self.backend = Backend(self.factory, backendConfig)
- self.backend_uri = self.backend.get_path(self.uri)
+ self.finishCode(http.NOT_FOUND, "NON-EXISTENT BACKEND")
+ return None
log.debug("backend: %s %s" % (self.backend.base, self.backend.uris))
- self.backendServer = self.backend.get_first_server()
- self.filetype = findFileType(self.uri)
- if not self.filetype:
- log.debug("abort - unknown extension")
- self.finishCode(http.NOT_FOUND)
+ backend_path = self.uri.split('/',2)[2]
+ self.cacheEntry = self.backend.get_cache_entry(backend_path)
+
+ if not self.cacheEntry.filetype:
+ log.debug("abort - unknown extension for file %s" % (backend_path), 'Request')
+ self.finishCode(http.FORBIDDEN, 'File not found - unknown extension')
return
- self.setHeader('content-type', self.filetype.contype)
+ self.setHeader('content-type', self.cacheEntry.filetype.contype)
- if os.path.isdir(self.local_file):
- log.debug("abort - Directory listing not allowed")
- self.finishCode(http.FORBIDDEN)
+ if os.path.isdir(self.cacheEntry.file_path):
+ log.debug("abort - Directory listing not allowed", 'Request')
+ self.finishCode(http.FORBIDDEN, 'Directory listing not permitted')
return
- self.fetch()
+ self.cacheEntry.add_request(self)
+
+ def clean_path(self, uri):
+ # Clean up URL given
+ scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
+ return os.path.normpath(path)
- def fetch(self, serve_cached=1):
+
+ def start_streaming(self, size, mtime):
"""
- Serve 'self' from cache or through the appropriate Fetcher
- depending on the asociated backend.
-
- Use post_convert and gzip_convert regular expresions of the Fetcher
- to gzip/gunzip file before and after download.
-
- 'serve_cached': this is somewhat of a hack only useful for
- LoopbackRequests (See LoopbackRequest class for more information).
+ Prepare client to stream file
+ Return false if streaming is not necessary (i.e. cache hit)
"""
- def fetch_real(result, dummyFetcher, cached, running):
- """
- This is called after verifying if the file is properly cached.
-
- If 'cached' the requested file is properly cached.
- If not 'cached' the requested file was not there, didn't pass the
- integrity check or may be outdated.
- """
- __pychecker__ = 'unusednames=result'
-
- if len(dummyFetcher.requests)==0:
- #The request's are gone, the clients probably closed the
- #conection
- log.debug("THE REQUESTS ARE GONE (Clients closed conection)",
- 'fetch')
- dummyFetcher.apEnd()
- return
-
-
- req = dummyFetcher.request
-
- log.debug("cached: %s" % cached)
-
- if cached:
- msg = ("Using cached copy of %s"
- %(dummyFetcher.request.local_file))
- fetcher_class = FetcherCachedFile
- else:
- msg = ("Consulting server about %s"
- %(dummyFetcher.request.local_file))
- fetcher_class = req.backendServer.fetcher
-
- if fetcher_class.gzip_convert.search(req.uri):
- msg = ("Using gzip/gunzip to get %s"
- %(dummyFetcher.request.local_file))
- fetcher_class = FetcherGzip
-
- log.debug(msg, 'fetch_real')
- fetcher = dummyFetcher.apEndTransfer(fetcher_class)
- if (fetcher and fetcher.post_convert.search(req.uri)
- and not running.has_key(req.uri[:-3])):
- log.debug("post converting: "+req.uri,'convert')
- loop = LoopbackRequest(req)
- loop.uri = req.uri[:-3]
- loop.local_file = req.local_file[:-3]
- loop.process()
- loop.serve_if_cached=0
- #FetcherGzip will attach as a request of the
- #original Fetcher, efectively waiting for the
- #original file if needed
- gzip = FetcherGzip()
- gzip.activate(loop, postconverting=1)
-
- self.serve_if_cached = serve_cached
- running = self.factory.runningFetchers
- if (running.has_key(self.uri)):
- #If we have an active fetcher just use that
- log.debug("have active fetcher: "+self.uri,'client')
- running[self.uri].insert_request(self)
- return running[self.uri]
+ if self.if_modified_since is None or self.if_modified_since < mtime:
+ log.debug("start_streaming size=%s mtime=%s if_modified_since=%s" % (size, mtime, self.if_modified_since) , 'Request')
+ self.setResponseCode(http.OK, 'Streaming file')
+ if mtime is not None:
+ self.setHeader('last-modified', http.datetimeToString(mtime))
+ if size is not None:
+ self.setHeader('content-length', size)
+ return True
else:
- #we make a FetcherDummy instance to hold other requests for the
- #same file while the check is in process. We will transfer all
- #the requests to a real fetcher when the check is done.
- dummyFetcher = FetcherDummy(self)
- #Standard Deferred practice
- d = self.check_cached()
- d.addCallbacks(fetch_real, fetch_real,
- (dummyFetcher, 1, running,), None,
- (dummyFetcher, 0, running,), None)
- return None
-
- def simplify_path(self, old_path):
- """
- change //+ with /
- change /directory/../ with /
- More than three ocurrences of /../ together will not be
- properly handled
-
- NOTE: os.path.normpath could probably be used here.
- """
- path = re.sub(r"//+", "/", old_path)
- path = re.sub(r"/\./+", "/", path)
- new_path = re.sub(r"/[^/]+/\.\./", "/", path)
- while (new_path != path):
- path = new_path
- new_path = re.sub(r"/[^/]+/\.\./", "/", path)
- if (new_path != old_path):
- log.debug("simplified path from " + old_path +
- " to " + new_path,'simplify_path')
- return path
+ log.debug("file not modified: mtime=%s if_modified_since=%s" % (mtime, self.if_modified_since) , 'Request')
+ self.setHeader("content-length", 0)
+ self.finishCode(http.NOT_MODIFIED, 'File is up to date')
+ return False
def finishCode(self, responseCode, message=None):
- "Finish the request with an status code"
+ "Finish the request with a status code and no streamed data"
+ log.debug("finishCode: %s, %s" % (responseCode, message), 'Request')
self.setResponseCode(responseCode, message)
self.write("")
self.finish()
def finish(self):
+ "Finish request after streaming"
+ log.debug("finish. Queued: %s" % (self.queued) , 'Request')
http.Request.finish(self)
- if self.factory.config.disable_pipelining:
- if hasattr(self.transport, 'loseConnection'):
- self.transport.loseConnection()
-
- def check_cached(self):
- """
- check the existence and ask for the integrity of the requested file and
- return a Deferred to be trigered when we find out.
- """
- def file_ok(result, deferred, self):
- """
- called if FileVerifier has determined that the file is cached and
- in good shape.
-
- Now we check NOTE: The file may still be too old or not fresh
- enough.
- """
- __pychecker__ = 'unusednames=result'
- stat_tuple = os.stat(self.local_file)
-
- self.local_mtime = stat_tuple[stat.ST_MTIME]
- self.local_size = stat_tuple[stat.ST_SIZE]
- log.debug("Modification time:" +
- time.asctime(time.localtime(self.local_mtime)),
- "file_ok")
- update_times = self.factory.update_times
-
- if update_times.has_key(self.uri):
- last_access = update_times[self.uri]
- log.debug("last_access from db: " +
- time.asctime(time.localtime(last_access)),
- "file_ok")
- else:
- last_access = self.local_mtime
-
- cur_time = time.time()
- min_time = cur_time - self.factory.config.min_refresh_delay
+ if self.cacheEntry:
+ self.cacheEntry.remove_request(self)
+ self.cacheEntry = None
- if not self.filetype.mutable:
- log.debug("file is immutable: "+self.local_file, 'file_ok')
- deferred.callback(None)
- elif last_access < min_time:
- log.debug("file is too old: "+self.local_file, 'file_ok')
- update_times[self.uri] = cur_time
- deferred.errback()
- else:
- log.debug("file is ok: "+self.local_file, 'file_ok')
- deferred.callback(None)
-
- log.debug("check_cached: "+self.local_file, 'file_ok')
- deferred = defer.Deferred()
- if os.path.exists(self.local_file):
- verifier = FileVerifier(self)
- verifier.deferred.addCallbacks(file_ok, deferred.errback,
- (deferred, self), None,
- None, None)
- else:
- deferred.errback()
- return deferred
-
def connectionLost(self, reason=None):
"""
The connection with the client was lost, remove this request from its
Fetcher.
"""
- __pychecker__ = 'unusednames=reason'
- #If it is waiting for a file verification it may not have an
- #apFetcher assigned
- if self.apFetcher:
- self.apFetcher.remove_request(self)
- self.finish()
+ log.debug("connectionLost" , 'Request')
+ if self.cacheEntry:
+ self.cacheEntry.remove_request(self)
+ #self.finish()
- def activateNextBackendServer(self, fetcher):
+ def getFileno(self):
"""
- The attempt to retrieve a file from the BackendServer failed.
- Look for the next possible BackendServer and transfer requests to that
- Returns true if another BackendServer was found
- """
- self.backendServer = self.backend.get_next_server(self.backendServer)
- if(self.backendServer == None):
- log.debug("no more Backends", "fetcher")
- return False
-
- fetcher_class = self.backendServer.fetcher
- log.debug('Trying next backendServer', 'fetcher')
- fetcher.apEndTransfer(fetcher_class)
-
- return True
-
-
-class LoopbackRequest(Request):
- """
- This is just a fake Request so a Fetcher can attach to another
- Fetcher and be notified when then transaction is completed.
-
- Look at FetcherGzip for a sample.
- """
- __pychecker__ = 'no-callinit'
- import cStringIO
- local_mtime = None
- headers = {}
- content = cStringIO.StringIO()
-
- def __init__(self, other_req, finish=None):
-
- self.finish_cb = finish
- http.Request.__init__(self, None, 1)
- self.backend = other_req.backend
- self.factory = other_req.factory
- self.filetype = other_req.filetype
- self.method = other_req.method
- self.clientproto = other_req.clientproto
- def process(self):
- self.backend_uri = self.backend.get_path(self.uri)
- def write(self, data):
- "We don't care for the data, just want to know then it is served."
- pass
- def finish(self):
- "If he wanted to know, tell daddy that we are served."
- if self.finish_cb:
- self.finish_cb()
- self.transport = None
- pass
+ Get identifier which is unique per apt client
+ """
+ try:
+ fileno = self.channel.transport.fileno()
+ except:
+ fileno = -1
+ log.msg("could not get transport's file descriptor", 'Request')
+ return fileno
class Channel(http.HTTPChannel):
"""
@@ -1647,7 +347,7 @@
def headerReceived(self, line):
"log and pass over to the base class"
- #log.debug("Header: " + line)
+ log.debug("Header: " + line)
if self.log_headers == None:
self.log_headers = line
else:
@@ -1663,13 +363,16 @@
def connectionLost(self, reason=None):
"If the connection is lost, notify all my requests"
__pychecker__ = 'unusednames=reason'
- for req in self.requests:
- req.connectionLost()
- log.debug("Client connection closed")
+ log.debug("Client connection closed", 'Channel')
+ http.HTTPChannel.connectionLost(self, reason)
if log.isEnabled('memleak'):
memleak.print_top_10()
#reactor.stop() # use for shutting down apt-proxy when a client disconnects
+ #def requestDone(self, request):
+ #log.debug("========Request Done=========", 'Channel')
+ #http.HTTPChannel.requestDone(self, request)
+
class Factory(protocol.ServerFactory):
"""
This is the center of apt-proxy, it holds all configuration and global data
@@ -1691,81 +394,55 @@
self.packages: all versions of a certain package name.
"""
- databases=('update_times', 'access_times', 'packages')
+
+
+
+ def __init__ (self, config):
+ self.runningFetchers = {}
+ self.backends = {}
+ self.config = config
+ self.periodicCallback = None
+ self.databases = databaseManager(self)
+ self.recycler = None
+
+ def __del__(self):
+ pass
+ #self.closeDatabases()
def periodic(self):
"Called periodically as configured mainly to do mirror maintanace."
log.debug("Doing periodic cleaning up")
+ self.periodicCallback = None
self.clean_old_files()
self.recycler.start()
log.debug("Periodic cleaning done")
- if (self.config.cleanup_freq != None):
- reactor.callLater(self.config.cleanup_freq, self.periodic)
- def __del__(self):
- for f in self.databases:
- try:
- if hasattr(self, f):
- getattr(self, f).close()
- except Exception:
- pass
- def __init__ (self, config):
- self.runningFetchers = {}
- self.backends = []
- self.config = config
+ self.startPeriodic()
+
+ def startPeriodic(self):
+ if (self.config.cleanup_freq != None and self.periodicCallback is None):
+ log.debug("Will do periodic cleaup in %s sec" % (self.config.cleanup_freq))
+ self.periodicCallback = reactor.callLater(self.config.cleanup_freq, self.periodic)
+
+ def stopPeriodic(self):
+ if self.periodicCallback is not None:
+ self.periodicCallback.cancel()
+ self.periodicCallback = None
def __getattr__ (self, name):
- def open_shelve(dbname):
- from bsddb3 import db,dbshelve
-
- shelve = dbshelve.DBShelf()
- db_dir = self.config.cache_dir+'/'+status_dir+'/db'
- if not os.path.exists(db_dir):
- os.makedirs(db_dir)
-
- filename = db_dir + '/' + dbname + '.db'
- if os.path.exists(filename):
- try:
- log.debug('Verifying database: ' + filename)
- shelve.verify(filename)
- except:
- os.rename(filename, filename+'.error')
- log.msg(filename+' could not be opened, moved to '+filename+'.error','db', 1)
- log.msg('Recreating '+ filename,'db', 1)
- try:
- log.debug('Opening database ' + filename)
- shelve = dbshelve.open(filename)
-
- # Handle upgrade to new format included on 1.9.20.
- except db.DBInvalidArgError:
- log.msg('Upgrading from previous database format: %s' % filename + '.previous')
- import bsddb.dbshelve
- os.rename(filename, filename + '.previous')
- previous_shelve = bsddb.dbshelve.open(filename + '.previous')
- shelve = dbshelve.open(filename)
-
- for k in previous_shelve.keys():
- shelve[k] = previous_shelve[k]
- log.msg('Upgrade complete')
-
- return shelve
-
- if name == 'update_times':
- self.update_times = open_shelve('update')
- return self.update_times
- elif name == 'access_times':
- self.access_times = open_shelve('access')
- return self.access_times
- elif name == 'packages':
- self.packages = open_shelve('packages')
- return self.packages
+ # Auto open database if requested
+ if name in self.databases.table_names:
+ db = self.databases.get(name)
+ setattr(self, name, db)
+ return db
else:
raise AttributeError(name)
def startFactory(self):
#start periodic updates
self.configurationChanged()
- self.recycler = misc.MirrorRecycler(self, 1)
- self.recycler.start()
+ self.dumpdbs()
+ self.recycler = MirrorRecycler(self, 1)
+ #self.recycler.start()
def configurationChanged(self, oldconfig = None):
"""
@@ -1779,7 +456,7 @@
setattr(self.config, param, getattr(oldconfig, param))
if self.config.cleanup_freq != None and (oldconfig is None or oldconfig.cleanup_freq == None):
- reactor.callLater(self.config.cleanup_freq, self.periodic)
+ self.startPeriodic()
self.createBackends()
def createBackends(self):
@@ -1796,7 +473,18 @@
"""
if self.backends.has_key(name):
return self.backends[name]
- return None
+
+ if not self.config.dynamic_backends:
+ return None
+
+ # We are using dynamic backends so we will use the name as
+ # the hostname to get the files.
+ backendServer = "http://" + name
+ log.debug("Adding dynamic backend:" + name)
+ backendConfig = self.config.addBackend(None, name, (backendServer,))
+ backend = Backend(self, backendConfig)
+ self.backends[name] = backend
+ return backend
def clean_versions(self, packages):
"""
@@ -1825,6 +513,8 @@
from packages import AptDpkgInfo, get_mirror_versions
for uri in packages[:]:
if not os.path.exists(cache_dir +'/'+ uri):
+ log.debug("clean_versions: file %s no longer exists"%(uri),
+ 'versions')
packages.remove(uri)
else:
try:
@@ -1833,17 +523,17 @@
package_name = info['Package']
except SystemError:
log.msg("Found problems with %s, aborted cleaning"%(uri),
- 'max_versions')
+ 'versions')
return
- if len(info):
+ if len(cached_packages) > 0:
import apt_pkg
cached_packages.sort(reverse_compare)
- log.debug(str(cached_packages), 'max_versions')
+ log.debug(str(cached_packages), 'versions')
current_packages = get_mirror_versions(self, package_name)
current_packages.sort(reverse_compare)
- log.debug("Current Versions: " + str(current_packages), 'max_versions')
+ log.debug("Current Versions: " + str(current_packages), 'versions')
version_count = 0
@@ -1865,6 +555,7 @@
if version_count > self.config.max_versions:
log.msg("Deleting " + cache_dir +'/'+ cached_packages[0][1], 'max_versions')
os.unlink(cache_dir +'/'+ cached_packages[0][1])
+ packages.remove(cached_packages[0][1])
del cached_packages[0]
def clean_old_files(self):
@@ -1896,30 +587,52 @@
log.debug("old_file: non-existent "+file)
del self.update_times[file]
- def file_served(self, uri):
- "Update the databases, this file has just been served."
- self.access_times[uri]=time.time()
- if re.search("\.deb$", uri):
- package = re.sub("^.*/", "", uri)
+ def file_served(self, cache_path):
+ """
+ Update the databases, this file has just been served.
+ @param cache_path: path of file within cache e.g. debian/dists/stable/Release.gpg
+ """
+ log.debug("File served: %s" % (cache_path))
+ path = os.sep + cache_path # Backwards compat
+ #path = cache_path
+ self.access_times[path]=time.time()
+ if re.search("\.deb$", path):
+ package = re.sub("^.*/", "", path)
package = re.sub("_.*$", "", package)
if not self.packages.has_key(package):
- packages = [uri]
- self.packages[package] = packages
+ packages = [path]
else:
packages = self.packages[package]
- if not uri in packages:
- packages.append(uri)
+ if not path in packages:
+ packages.append(path)
self.clean_versions(packages)
- self.packages[package] = packages
+ self.packages[package] = packages
self.dumpdbs()
+ def closeDatabases(self):
+ for db in self.databases.table_names:
+ if getattr(self.databases, db) is not None:
+ log.debug("closing " + db, 'db')
+ getattr(self,db).close()
+ delattr(self,db)
+ setattr(self.databases, db, None)
+
def stopFactory(self):
+ log.debug('Main factory stop', 'factory')
import packages
- self.dumpdbs()
- self.update_times.close()
- self.access_times.close()
- self.packages.close()
+ # self.dumpdbs()
+
+ # Stop all DownloadQueues and their fetchers
+ for b in self.backends.values():
+ b.queue.stop()
+ b.queue = None
+ self.backends = {}
packages.cleanup(self)
+ if self.recycler is not None:
+ self.recycler.stop()
+ self.recycler = None
+ self.stopPeriodic()
+ #self.closeDatabases()
def dumpdbs (self):
def dump_update(key, value):
@@ -1960,3 +673,56 @@
def debug(self, message):
log.debug(message)
+
+class databaseManager:
+ update_times = None
+ access_times = None
+ packages = None
+ table_names=['update_times', 'access_times', 'packages']
+ database_files=['update', 'access', 'packages']
+
+ def __init__(self, factory):
+ self.factory = factory
+
+ def get(self, name):
+ idx = self.table_names.index(name)
+ db = getattr(self,name)
+ if db is None:
+ db = self.open_shelve(self.database_files[idx])
+ setattr(self, name, db)
+ return db
+
+ def open_shelve(self, dbname):
+ from bsddb import db,dbshelve
+
+ shelve = dbshelve.DBShelf()
+ db_dir = self.factory.config.cache_dir+'/'+status_dir+'/db'
+ if not os.path.exists(db_dir):
+ os.makedirs(db_dir)
+
+ filename = db_dir + '/' + dbname + '.db'
+ if os.path.exists(filename):
+ try:
+ log.debug('Verifying database: ' + filename)
+ shelve.verify(filename)
+ except:
+ os.rename(filename, filename+'.error')
+ log.msg(filename+' could not be opened, moved to '+filename+'.error','db', 1)
+ log.msg('Recreating '+ filename,'db', 1)
+ try:
+ log.debug('Opening database ' + filename)
+ shelve = dbshelve.open(filename)
+
+ # Handle upgrade to new format included on 1.9.20.
+ except db.DBInvalidArgError:
+ log.msg('Upgrading from previous database format: %s' % filename + '.previous')
+ import bsddb.dbshelve
+ os.rename(filename, filename + '.previous')
+ previous_shelve = bsddb.dbshelve.open(filename + '.previous')
+ shelve = dbshelve.open(filename)
+
+ for k in previous_shelve.keys():
+ shelve[k] = previous_shelve[k]
+ log.msg('Upgrade complete')
+
+ return shelve
Modified: trunk/apt_proxy/apt_proxy_conf.py
==============================================================================
--- trunk/apt_proxy/apt_proxy_conf.py (original)
+++ trunk/apt_proxy/apt_proxy_conf.py Thu Aug 3 23:54:46 2006
@@ -49,6 +49,8 @@
def gettime(self, section, option):
mult = 1
value = self.get(section, option)
+ if len(value) == 0:
+ raise ConfigError("Configuration parse error: [%s] %s" % (section, option))
suffix = value[-1].lower()
if suffix in self.time_multipliers.keys():
mult = self.time_multipliers[suffix]
@@ -58,6 +60,13 @@
return self.get(section,option)
def getstringlist(self, section, option):
return self.get(section,option).split()
+ def getproxyspec(self, section, option):
+ "Get http proxy info from string"
+ p = ProxyConfig(self.get(section,option))
+ if p.host is not None:
+ return p
+ else:
+ return None
class apConfig:
"""
@@ -77,7 +86,7 @@
['address', '', 'string'],
['port', 9999, 'int'],
['min_refresh_delay', 30, 'time'],
- ['complete_clientless_downloads', '0', 'boolean'],
+ ['complete_clientless_downloads', False, 'boolean'],
['telnet_port', 0, 'int'],
['telnet_user', '', 'string'],
['telnet_pass', '', 'string'],
@@ -87,11 +96,11 @@
['max_versions', 3, '*int'],
['max_age', 10, '*time'],
['import_dir', '/var/cache/apt-proxy/import', 'string'],
- ['disable_pipelining', '1', 'boolean'],
['passive_ftp', 'on', 'boolean'],
['dynamic_backends', 'on', 'boolean'],
- ['http_proxy', '' , 'string'],
- ['username', 'aptproxy', 'string']
+ ['http_proxy', None , 'proxyspec'],
+ ['username', 'aptproxy', 'string'],
+ ['bandwidth_limit', None, '*int']
]
"""
@@ -104,7 +113,9 @@
BACKEND_CONFIG_ITEMS = [
['timeout', None, 'time'],
['passive_ftp', None, 'boolean'],
- ['backends', '', 'stringlist']
+ ['backends', '', 'stringlist'],
+ ['http_proxy', None , 'proxyspec'],
+ ['bandwidth_limit', None, '*int']
]
DEFAULT_CONFIG_FILE = ['/etc/apt-proxy/apt-proxy-v2.conf',
@@ -160,9 +171,8 @@
filehandle.close()
return conf
- def setDebug(self, levels):
+ def setDebug(self):
"Set logger debug level"
- self.debug = levels
for domain in self.debug.split():
#print "domain:",domain
if domain.find(':') != -1:
@@ -180,7 +190,7 @@
self.debug=config.get(DEFAULTSECT, 'debug')
else:
self.debug='all:3'
- self.setDebug(self.debug)
+ self.setDebug()
# read default values
for name,default,getmethod in self.CONFIG_ITEMS:
@@ -262,3 +272,23 @@
name = "UNKNOWN"
def __init__(self, name):
self.name = name
+
+class ProxyConfig:
+ """
+ Configuration information for backend server proxies
+ """
+ host = None
+ port = None
+ user = None
+ password = None
+
+ def __init__(self, proxyspec):
+ if proxyspec=='':
+ return
+ m = re.match('^((?P<user>.*):(?P<password>.*)@)?(?P<host>[a-zA-Z0-9_.+=-]+):(?P<port>[0-9]+)',
+ proxyspec)
+ if m:
+ self.host = m.group('host')
+ self.port = m.group('port')
+ self.user = m.group('user')
+ self.password = m.group('password')
Added: trunk/apt_proxy/cache.py
==============================================================================
--- (empty file)
+++ trunk/apt_proxy/cache.py Thu Aug 3 23:54:46 2006
@@ -0,0 +1,595 @@
+#
+# Copyright (C) 2005 Chris Halls <halls at debian.org>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+# -*- test-case-name: apt_proxy.test.test_cache -*-
+
+"""
+Cache management for apt-proxy
+
+These classes implement functionality for managing apt-proxy's cache. The most
+important of these is CacheEntry, which manages the lifecycle of a file in ap's cache
+"""
+
+from twisted.internet import protocol, defer, reactor
+from twisted.web import http
+from twisted.protocols import basic
+import os, re, stat, time, sys
+from misc import log
+
+class CacheEntry:
+ """
+ This class manages operations on a file in the cache. Each physical
+ file on the disk corresponds to one CacheEntry. Normally a CacheEntry
+ is created when the first Request for this file is received
+
+ Active CacheEntries are managed in their corresponding Backend
+ """
+
+ # Define lifecyle of cache entry
+ STATE_NEW = 1 # Entry is not yet being sent
+ STATE_CONNECTING = 2 # Waiting for connection to download file
+ STATE_DOWNLOAD = 3 # File is in process of downloading
+ STATE_SENDFILE = 4 # File is being sent from cache
+ STATE_SENT = 5 # Post download processing / waiting for clients to complete
+ STATE_FAILED = 6 # Download failed
+
+
+ bytesDownloaded = 0
+
+ def __init__(self, backend, path):
+ """
+ Create a new cache entry
+ @param backend Backend where this entry belongs
+ @param path Path to file within backend directory
+ """
+ self.backend = backend
+ self.factory = backend.factory
+ self.requests = [] # Active client requests for this cache entry
+ self.streamfile = None
+ self.state = self.STATE_NEW
+
+ # Path of file within backend e.g. 'dists/stable/Release.gpg'
+ self.path = path
+
+ # Path of file within cache e.g. 'debian/dists/stable/Release.gpg'
+ self.cache_path = backend.base + os.sep + path
+
+ # File in cache '/var/cache/apt-proxy/debian/dists/stable/Release.gpg'
+ self.file_path = (self.factory.config.cache_dir + os.sep +
+ self.cache_path)
+
+ # Directory of cache file '/var/cache/apt-proxy/debian/dists/stable'
+ self.filedir = os.path.dirname(self.file_path)
+
+ self.filetype = findFileType(path)
+ self.filename = os.path.basename(path) # 'Release.gpg'
+
+ # filebase='Release' fileext='gpg'
+ (self.filebase, self.fileext) = os.path.splitext(self.filename)
+
+ # self.create_directory()
+ self.file_mtime = None
+ self.file_size = None
+
+ self.fetcher = None
+
+ def add_request(self, request):
+ """
+ A new request has been received for this file
+ """
+ if request in self.requests:
+ raise RuntimeError, \
+ 'this request is already assigned to this CacheEntry'
+ self.requests.append(request)
+ if(len(self.requests)==1):
+ # First request
+ self.get()
+ else:
+ # Subsequent request - client must be brought up to date
+ if self.state == self.STATE_DOWNLOAD:
+ raise RuntimeError, \
+ 'TODO: multiple clients not implemented yet'
+
+ def remove_request(self,request):
+ """
+ Remove request, either because streaming is complete or
+ the client has disconnected
+
+ If parameter request is None, downloading has been aborted early
+ """
+ if request is not None and request in self.requests:
+ self.requests.remove(request)
+ if len(self.requests) != 0:
+ return
+
+ log.debug("Last request removed",'cacheEntry')
+ self.backend.entry_done(self)
+
+ # TODO - fixme
+ #if (self.factory.config.complete_clientless_downloads == False
+ #and self.state == self.STATE_DOWNLOAD
+ #and self.fetcher is not None):
+ ## Cancel download in progress
+ #log.debug("cancelling download (set complete_clientless_downloads to continue)",'cacheEntry')
+ #self.fetcher.cancel_download()
+
+ if self.streamfile is not None:
+ # File was streamed to clients
+ self.streamfile.close()
+ self.streamfile = None
+
+ def start_request_stream(self, request):
+ """
+ Prepare a request for streaming
+ """
+ log.msg("start_request_stream:" + self.file_path, "CacheEntry")
+ request.startStreaming(self.size, self.mtime)
+
+ if self.streamfile.size() != 0:
+ request.write(self.streamfile.read_from(start=0)) # TODO - is this efficient?
+
+
+ def get(self):
+ """
+ Update current version of file in cache
+ """
+ if self.state == self.STATE_NEW:
+ if os.path.exists(self.file_path):
+ self.stat_file()
+ if self.check_age():
+ self.verify()
+ return
+
+ self.start_download()
+
+ def verify(self):
+ """
+ check the existence and ask for the integrity of the requested file and
+ return a Deferred to be trigered when we find out.
+ """
+ log.debug("check_cached: "+self.path, 'CacheEntry')
+ verifier = FileVerifier(self.file_path, self.factory.config)
+ d = verifier.verify()
+ d.addCallback(self.send_cached_file)
+ d.addErrback(self.verify_failed)
+
+ def verify_failed(self, parm=None):
+ self.file_mtime = None
+ self.file_size = None
+ self.start_download()
+
+ def stat_file(self):
+ """
+ Read file age
+ """
+ stat_tuple = os.stat(self.file_path)
+
+ self.file_mtime = stat_tuple[stat.ST_MTIME]
+ self.file_size = stat_tuple[stat.ST_SIZE]
+ log.debug("Modification time:" +
+ time.asctime(time.localtime(self.file_mtime)),
+ "CacheEntry")
+
+ def check_age(self):
+ """
+ Read file age and check if file should be updated / refreshed
+
+ @return True if file is still valid, False if file is out of date
+ """
+
+ update_times = self.factory.update_times
+
+ if update_times.has_key(self.cache_path):
+ last_access = update_times[self.cache_path]
+ log.debug("last_access from db: " +
+ time.asctime(time.localtime(last_access)),
+ "CacheEntry")
+ else:
+ last_access = self.file_mtime
+
+
+ cur_time = time.time()
+ min_time = cur_time - self.factory.config.min_refresh_delay
+
+ if not self.filetype.mutable:
+ log.debug("file is immutable: "+self.file_path, 'CacheEntry')
+ return True
+ elif last_access < min_time:
+ log.debug("file is too old: "+self.file_path, 'CacheEntry')
+ return False
+ else:
+ log.debug("file is ok: "+self.file_path, 'CacheEntry')
+ return True
+
+ def send_cached_file(self, unused=None):
+ """
+ File is up to date - send complete file from cache to clients
+ """
+ log.msg("sending file from cache:" + self.file_path, "CacheEntry")
+ self.transfer_file(self.file_path)
+
+ def end_send_cached(self):
+ """
+ Processing continues here when the file has been sent from the cache
+ """
+ self.file_sent()
+
+ def transfer_file(self, filename):
+ """
+ Send given file to clients
+ """
+ log.msg("transfer_file:" + filename, "CacheEntry")
+ try:
+ stat_tuple = os.stat(filename)
+ mtime = stat_tuple[stat.ST_MTIME]
+ size = stat_tuple[stat.ST_SIZE]
+
+ self.state = self.STATE_SENDFILE
+ if size > 0:
+ log.debug("Sending file to clients:%s size:%s" % (filename, size), 'CacheEntry')
+ self.streamfile = open(filename,'rb')
+ #fcntl.lockf(file.fileno(), fcntl.LOCK_SH)
+
+ for request in self.requests:
+ if request.start_streaming(size, mtime):
+ basic.FileSender().beginFileTransfer(self.streamfile, request) \
+ .addBoth(self.file_transfer_complete, request, filename)
+ else:
+ log.debug("Sending empty file to clients:%s" % (filename), 'CacheEntry')
+ for request in self.requests:
+ if request.start_streaming(size, mtime):
+ request.finish()
+ except Exception, e:
+ log.debug("Unexpected error: %s" % (e), 'CacheEntry')
+ raise
+
+ def file_transfer_complete(self, result, request, filename):
+ log.debug("transfer complete: " + filename, 'CacheEntry')
+ request.finish()
+ if len(self.requests)==0:
+ # Last file was sent
+ self.file_sent()
+
+ def create_directory(self):
+ """
+ Create directory for cache entry's file
+ """
+ if(not os.path.exists(self.filedir)):
+ os.makedirs(self.filedir)
+
+ def start_download(self):
+ """
+ Start file transfer from backend server
+ """
+ log.msg("start download:" + self.path, "CacheEntry")
+ self.backend.start_download(self)
+
+ def download_started(self, fetcher, size, mtime):
+ """
+ Callback from Fetcher
+ A fetcher has begun streaming this file
+ """
+ log.msg("download started:" + self.file_path, "CacheEntry")
+ self.state = self.STATE_DOWNLOAD
+ self.create_directory()
+ self.fetcher = fetcher
+ self.file_mtime = mtime
+
+ """
+ Use post_convert and gzip_convert regular expresions of the Fetcher
+ to gzip/gunzip file before and after download.
+ """
+
+ if self.filename == 'Packages.gz':
+ log.msg('TODO postconvert Packages.gz',CacheEntry)
+# if (fetcher and fetcher.post_convert.search(req.uri)
+# and not running.has_key(req.uri[:-3])):
+# log.debug("post converting: "+req.uri,'convert')
+# loop = LoopbackRequest(req)
+# loop.uri = req.uri[:-3]
+# loop.local_file = req.local_file[:-3]
+# loop.process()
+# loop.serve_if_cached=0
+# #FetcherGzip will attach as a request of the
+# #original Fetcher, efectively waiting for the
+# #original file if needed
+# gzip = FetcherGzip()
+# gzip.activate(loop, postconverting=1)
+
+
+ for req in self.requests:
+ req.start_streaming(size, mtime)
+
+
+ def download_data_received(self, data):
+ """
+ Callback from Fetcher
+ A block of data has been received from the streaming backend server
+ """
+ #log.msg("download_data_received:" + self.file_path, "CacheEntry")
+ for req in self.requests:
+ req.write(data)
+
+ if self.streamfile:
+ # save to tempfile (if it in use)
+ self.streamfile.append(data)
+
+ def download_data_end(self):
+ """
+ Callback from Fetcher
+ File streaming is complete
+ """
+ log.msg("download_data_end:" + self.file_path, "CacheEntry")
+ self.state = self.STATE_SENT
+
+ if self.streamfile is not None:
+ # File was streamed to clients
+ self.streamfile.close_and_rename(self.file_path)
+ self.streamfile = None
+
+ if self.file_mtime != None:
+ os.utime(self.file_path, (time.time(), self.file_mtime))
+ else:
+ log.debug("no local time: "+self.file_path,'Fetcher')
+ os.utime(self.file_path, (time.time(), 0))
+
+ for req in self.requests:
+ req.finish()
+
+ self.file_sent()
+
+ def download_failure(self, http_code, reason):
+ """
+ Download is not possible
+ """
+ log.msg("download_failure %s: (%s) %s"% (self.file_path, http_code, reason), "CacheEntry")
+
+ for request in self.requests:
+ request.finishCode(http_code, reason)
+ self.state = self.STATE_FAILED
+ ## Remove directory if file was not created
+ #if not os.path.exists(self.file_path):
+ #try:
+ #os.removedirs(self.factory.config.cache_dir + os.sep + self.backend.base)
+ #except:
+ #pass
+
+
+ def file_sent(self):
+ """
+ File has been sent successfully to at least one client
+ Update databases with statistics for this file
+ """
+ log.msg("file_sent:" + self.file_path, "CacheEntry")
+
+ self.state = self.STATE_SENT
+ self.fetcher = None
+ self.backend.file_served(self)
+ self.factory.file_served(self.cache_path)
+ self.factory.update_times[self.cache_path] = time.time()
+ self.state = self.STATE_NEW
+
+ def init_tempfile(self):
+ #log.msg("init_tempfile:" + self.file_path, "CacheEntry")
+ self.create_directory()
+ self.streamFilename = self.file_path + ".apDownload"
+ self.streamfile = StreamFile(self.streamFilename)
+
+class FileType:
+ """
+ This is just a way to distinguish between different filetypes.
+
+ self.regex: regular expression that files of this type should
+ match. It could probably be replaced with something simpler,
+ but... o well, it works.
+
+ self.contype: mime string for the content-type http header.
+
+ mutable: do the contents of this file ever change? Files such as
+ .deb and .dsc are never changed once they are created.
+
+ """
+ def __init__ (self, regex, contype, mutable):
+ self.regex = regex
+ self.contype = contype
+ self.mutable = mutable
+
+ def check (self, name):
+ "Returns true if name is of this filetype"
+ if self.regex.search(name):
+ return 1
+ else:
+ return 0
+
+# Set up the list of filetypes that we are prepared to deal with.
+# If it is not in this list, then we will ignore the file and
+# return an error.
+filetypes = (
+ FileType(re.compile(r"\.u?deb$"), "application/dpkg", 0),
+ FileType(re.compile(r"\.tar\.gz$"), "application/x-gtar", 0),
+ FileType(re.compile(r"\.dsc$"),"text/plain", 0),
+ FileType(re.compile(r"\.diff\.gz$"), "x-gzip", 0),
+ FileType(re.compile(r"\.bin$"), "application/octet-stream", 0),
+ FileType(re.compile(r"\.tgz$"), "application/x-gtar", 0),
+ FileType(re.compile(r"\.txt$"), "text/plain", 1),
+ FileType(re.compile(r"\.html$"), "text/html", 1),
+
+ FileType(re.compile(r"(?:^|/)(?:Packages|Release(?:\.gpg)?|Sources|(?:Contents|Translation)-[a-z0-9]+)"
+ r"(?:\.(?:gz|bz2))?$"),
+ "text/plain", 1),
+ FileType(re.compile(r"(?:^|/)(?:Packages|Sources|Contents-[a-z0-9]+)\.diff/Index$"),
+ "text/plain", 1),
+ FileType(re.compile(r"(?:^|/)(?:Packages|Sources|Contents-[a-z0-9]+)\.diff/[a-z0-9.-]+"
+ r"(?:\.(?:gz|bz2))?$"),
+ "text/plain", 0),
+
+ FileType(re.compile(r"\.rpm$"), "application/rpm", 0),
+
+ FileType(re.compile(r"(?:^|/)(?:pkglist|release|srclist)(?:\.(?:\w|-)+)?"
+ r"(?:\.(?:gz|bz2))?$"),
+ "text/plain", 1),
+ FileType(re.compile(r"\.gz$"), "x-gzip", 1)
+ )
+
+
+def findFileType(name):
+ "Look for the FileType of 'name'"
+ for type in filetypes:
+ if type.check(name):
+ return type
+ return None
+
+class StreamFile:
+ """
+ A temporary file used to stream to during download
+ """
+ CHUNKSIZE = 16384
+ def __init__(self, name, mode='w+b'):
+ log.debug("Creating file: " + name, 'cache')
+ self.file = file(name, mode, self.CHUNKSIZE)
+ self.name = name
+ def append(self, data):
+ self.file.write(data)
+ def size(self):
+ return self.file.tell()
+ def read_from(self, size=-1, start=None):
+ if start != None:
+ self.file.seek(start, SEEK_SET)
+ data = self.file.read(self, size)
+ self.file.seek(0, SEEK_END)
+ return data
+ def close(self):
+ log.debug("Closing file: " + self.name, 'cache')
+ self.file.close()
+ self.file = None
+ def close_and_rename(self, new_name):
+ """
+ File was successfully downloaded - close and rename to final destination
+ """
+ self.close()
+ if self.name == new_name:
+ return
+ log.debug("renaming file: %s->%s " % (self.name, new_name), 'cache')
+ os.rename(self.name, new_name)
+ self.name = new_name
+
+class FileVerifier:
+ """
+ Verifies the integrity of a cached file
+
+ self.deferred: a deferred that will be triggered when the command
+ completes, or if a timeout occurs.
+
+ Sample:
+
+ verifier = FileVerifier(self)
+ verifier.deferred.addCallbacks(callback_if_ok, callback_if_fail)
+ verifier.deferred.arm()
+
+ then either callback_if_ok or callback_if_fail will be called
+ when the subprocess finishes execution.
+
+ Checkout twisted.internet.defer.Deferred on how to use self.deferred
+
+ """
+ def __init__(self, path, config):
+ """
+ Initialise verificatoin
+ @param path: filename to be verified (absolute path)
+ @param config apConfig configuration (timeout paramter defines max time)
+ """
+ self.path = path
+ self.timeout = config.timeout
+ self.deferred = defer.Deferred() # Deferred that passes status back
+
+ def verify(self):
+ if re.search(r"\.deb$", self.path):
+ self.worker = FileVerifierProcess(self, '/usr/bin/dpkg', '--fsys-tarfile', self.path)
+ elif re.search(r"\.gz$", self.path):
+ self.worker = FileVerifierProcess(self, '/bin/gunzip', '-t', '-v', self.path)
+ elif re.search(r"\.bz2$", self.path):
+ self.worker = FileVerifierProcess(self, '/usr/bin/bunzip2', '--test', self.path)
+ else:
+ # Unknown file, just check it is not 0 size
+ try:
+ filesize = os.stat(self.path)[stat.ST_SIZE]
+ except:
+ filesize = 0
+
+ if(os.stat(self.path)[stat.ST_SIZE]) < 1:
+ self.failed("Zero length file")
+ else:
+ log.debug('Verification skipped for ' + self.path)
+ self.deferred.callback(None)
+ return self.deferred
+
+ class VerificationFailure:
+ def __init__(self, path, reason):
+ self.path = path
+ self.reason = reason
+ def failed(self, reason):
+ log.msg("cache file verification FAILED for %s: %s"%(self.path, reason), 'verify')
+ os.unlink(self.path)
+ self.deferred.errback(self.VerificationFailure(self.path, reason))
+
+ def passed(self):
+ log.debug("cache file verification passed: %s"%(self.path), 'verify')
+ self.parent.deferred.callback(None)
+
+class FileVerifierProcess(protocol.ProcessProtocol):
+ """
+ Verifies the integrity of a file by running an external command.
+ """
+ def __init__(self, verifier, *args):
+ self.parent = verifier
+
+ self.exe = args[0]
+ log.debug("starting verification: " + self.exe + " " + str(args),'FileVerifierProcess',8)
+ nullhandle = open("/dev/null", "w")
+ self.process = reactor.spawnProcess(self, self.exe, args, childFDs = { 0:"w", 1:nullhandle.fileno(), 2:"r" })
+ self.laterID = reactor.callLater(self.parent.timeout, self.timedout)
+
+ def connectionMade(self):
+ self.data = ''
+
+ def outReceived(self, data):
+ #we only care about errors
+ pass
+
+ def errReceived(self, data):
+ self.data = self.data + data
+
+ def timedout(self):
+ """
+ this should not happen, but if we timeout, we pretend that the
+ operation failed.
+ """
+ self.laterID=None
+ self.parent.failed("Verification process timed out")
+
+ def processEnded(self, reason=None):
+ """
+ This get's automatically called when the process finishes, we check
+ the status and report through the Deferred.
+ """
+ __pychecker__ = 'unusednames=reason'
+ #log.debug("Process Status: %d" %(self.process.status),'verify')
+ #log.debug(self.data, 'verify')
+ if self.laterID:
+ self.laterID.cancel()
+ if self.process.status == 0:
+ self.parent.deferred.callback(None)
+ else:
+ self.parent.failed(os.path.basename(self.exe)+ " failed")
Added: trunk/apt_proxy/fetchers.py
==============================================================================
--- (empty file)
+++ trunk/apt_proxy/fetchers.py Thu Aug 3 23:54:46 2006
@@ -0,0 +1,1103 @@
+#
+# Copyright (C) 2005 Chris Halls <halls at debian.org>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""
+Fetchers for apt-proxy
+
+These classes implement the network code for fetching files from apt-proxy
+network backends
+"""
+
+import re, os, string, time, glob, signal, stat
+from twisted.web import static, http
+from twisted.internet import protocol, reactor, defer, error, abstract
+from twisted.python import failure
+from twisted.protocols import policies, ftp
+
+from misc import log
+
+
+class Fetcher:
+ """
+ This class manages the selection of a BackendServer and downloading from
+ that backend
+
+ """
+ cacheEntry = None
+ fetcher = None # connection-specific fetcher
+
+ def __init__(self):
+ self.backendServer = None
+ self.size = None # Size of file notified by fetcher's server
+ self.mtime = None # Mtime of file notified by fetcher's server
+
+ def start(self, cacheEntry):
+ self.cacheEntry = cacheEntry
+ log.debug("fetcher start:" + self.cacheEntry.filename, "fetcher")
+ self.backend = cacheEntry.backend
+ self.len_received = 0
+ self.deferred = defer.Deferred()
+ self.start_download()
+ return self.deferred
+
+ def activateNextBackendServer(self, fetcher):
+ """
+ Returns true if another BackendServer was found
+ """
+ if self.backendServer is None:
+ self.backendServer = self.backend.get_first_server()
+ if(self.backendServer == None):
+ log.err("No backend server found for backend " + self.backend.name, "fetcher")
+ return False
+ else:
+ # Look for the next possible BackendServer
+ self.backendServer = self.backend.get_next_server(self.backendServer)
+
+ if(self.backendServer == None):
+ # The attempt to retrieve a file from the BackendServer failed.
+ log.debug("no more Backends", "fetcher")
+ return False
+ self.connectToBackend()
+
+ def connectToBackend(self):
+ log.debug('Connecting to backend server %s' % (self.backendServer), 'fetcher')
+ self.fetcher = self.backendServer.fetcher(self.backendServer)
+ d = self.fetcher.connect()
+ d.addCallback(self.connected)
+ d.addErrback(self.connection_failed)
+ #fetcher.apEndTransfer(fetcher_class)
+
+ return True
+
+ def __str__(self):
+ return 'Fetcher server=%s file=%s' % (str(self.backendServer), self.cacheEntry.path)
+
+ def start_download(self):
+ """
+ Begin streaming file
+ Serve from cache or through the appropriate Fetcher
+ depending on the asociated backend.
+
+ Use post_convert and gzip_convert regular expresions of the Fetcher
+ to gzip/gunzip file before and after download.
+ """
+ log.debug("Downloading: " + self.cacheEntry.file_path, 'Fetcher')
+ #init_tempfile()
+ if self.backendServer is None:
+ self.activateNextBackendServer(self.fetcher)
+ elif self.fetcher is None:
+ self.connectToBackend()
+ else:
+ self.download()
+
+ def download_complete(self):
+ """
+ Download was successful
+ """
+ log.debug("download complete. Sent:%s bytes" % (self.len_received), "Fetcher")
+ if self.fetcher is not None and not self.fetcher.pipelining:
+ self.connection_closed(self.fetcher)
+ if self.len_received==0:
+ self.download_started() # Send status code to clients
+ self.cacheEntry.download_data_end()
+ self.deferred.callback((True, ""))
+
+ def fail_over(self, reason_code, reason_msg):
+ """
+ A non-fatal download has occured. Attempt download from next
+ backend
+ """
+ if not self.activateNextBackendServer(self.fetcher):
+ self.download_failed(reason_code, reason_msg)
+
+ def download_failed(self, reason_code, reason_msg):
+ #self.cacheEntry.download_data_end()
+ log.debug("download_failed: (%s) %s " %(reason_code, reason_msg), "Fetcher")
+ if self.fetcher is not None and not self.fetcher.pipelining:
+ self.connection_closed(self.fetcher)
+ self.cacheEntry.download_failure(reason_code, reason_msg)
+ self.deferred.callback((False, reason_msg))
+
+ def cancel_download(self):
+ if self.fetcher:
+ log.debug(
+ "telling fetchers to disconnect",'Fetcher')
+ self.fetcher.disconnect()
+ self.download_failed(None, "Download canceled")
+
+ def data_received(self, data, save=True):
+ """
+ File Data has been received from the backend server
+ @param data: raw data received from server
+ @param save: if true, save to disk (rsync saves file itself)
+ """
+ #log.debug("data_received: %s bytes" % len(data), 'Fetcher');
+ if not self.len_received:
+ self.download_started(save)
+ self.len_received = self.len_received + len(data)
+ self.cacheEntry.download_data_received(data)
+
+ def download_started(self, save=True):
+ if save:
+ self.cacheEntry.init_tempfile()
+ self.cacheEntry.download_started(self, self.size, self.mtime)
+
+
+ def server_size(self, len):
+ """
+ The server has sent the expected length of the file
+ """
+ self.size = len
+ log.debug("File size: " + str(len), 'Fetcher');
+
+ def server_mtime(self, mtime):
+ """
+ The server has sent the modification time of the file
+ """
+ self.mtime = mtime
+ log.debug("File mtime: " + str(mtime), 'Fetcher');
+
+ def transfer_complete(self):
+ """
+ All data has been transferred
+ """
+ log.debug("Finished receiving data: " + self.cacheEntry.filename, 'Fetcher');
+ self.download_complete()
+
+ def connection_failed(self, reason = None):
+ """
+ A fetcher has failed to connect to the backend server
+ """
+ msg = '[%s] Connection Failed: %s/%s'%(
+ self.backend.name,
+ self.backendServer.path, self.cacheEntry.path)
+
+ if reason:
+ msg = '%s (%s)'%(msg, reason.getErrorMessage())
+ log.debug("Connection Failed: "+str(reason), 'Fetcher')
+ log.err(msg)
+ self.fail_over(http.SERVICE_UNAVAILABLE, reason)
+
+ def connected(self, result):
+ log.debug("Connected to "+ self.backendServer.uri, 'Fetcher')
+ self.download()
+
+ def download(self):
+ log.debug('downloading:%s mtime:%s' % (self.cacheEntry.path, self.cacheEntry.file_mtime), 'Fetcher')
+ self.fetcher.download(self, self.cacheEntry.path, self.cacheEntry.file_mtime)
+
+ def disconnect(self):
+ if self.fetcher is not None:
+ log.debug('disconnect %s' % (self.cacheEntry.path), 'Fetcher')
+ self.fetcher.disconnect()
+ self.fetcher = None
+
+ def connection_closed(self, fetcher):
+ """
+ A protocol fetcher's connection has closed - we must reopen the connection
+ next time
+ """
+ log.debug("Connection closed for %s, state=%s" %(self.cacheEntry.path, self.cacheEntry.state), 'Fetcher')
+ #if self.cacheEntry.state in \
+ # (self.cacheEntry.STATE_CONNECTING, self.cacheEntry.STATE_DOWNLOAD, self.cacheEntry.STATE_SENDFILE):
+ # self.fetcher_internal_error("Backend connection closed")
+ if fetcher == self.fetcher:
+ self.fetcher = None
+
+ def file_not_found(self):
+ log.msg("(%s) file not found: %s" % (self.backendServer.path, self.cacheEntry.path), 'fetcher')
+ # TODO - failover?
+ self.download_failed(http.NOT_FOUND, "file not found on backend")
+
+ def fetcher_internal_error(self, reason):
+ log.msg("(%s) internal error: %s" % (self.backendServer.path, reason), 'fetcher')
+ self.download_failed(http.INTERNAL_SERVER_ERROR, reason)
+
+ def send_complete_file(self, filename):
+ """
+ Send a complete file (used by FileFetcher)
+ """
+ self.cacheEntry.transfer_file(filename)
+
+ def up_to_date(self):
+ """
+ Fetcher has determined that our cached file is up to date
+ so the file is sent from our cache
+ """
+ log.msg("(%s) up_to_date" % (self.cacheEntry.path), 'fetcher')
+ self.cacheEntry.send_cached_file()
+ if not self.fetcher.pipelining:
+ self.connection_closed(self.fetcher)
+ self.deferred.callback((True, ""))
+
+class FileFetcher:
+ """
+ A Fetcher that simply copies files from disk
+ """
+ pipelining = True
+ def __init__(self, backendServer):
+ self.backendServer = backendServer
+ self.isConnected = True # Always connected
+
+ def connect(self):
+ # We always conect
+ return defer.succeed(True)
+
+ def download(self, fetcher, uri, mtime):
+ """
+ Request download
+ %param fetcher: Fetcher class to receive callbacks
+ %param uri: URI of file to be downloaded within backend
+ %param mtime: Modification time of current file in cache
+ """
+ self.parent = fetcher
+ self.cache_mtime = mtime
+ self.request_uri = uri
+
+ self.local_file = self.backendServer.uri[len("file://"):] + '/' + uri
+ if not os.path.exists(self.local_file):
+ self.parent.file_not_found()
+ return
+
+ # start the transfer
+ self.parent.send_complete_file(self.local_file)
+
+ def disconnect(self):
+ pass
+
+class FetcherHttpClient(http.HTTPClient):
+ """
+ This class represents an Http conncetion to a backend
+ server. It is generated by the HttpFetcher class when
+ a connection is made to an http server
+ """
+ def __init__(self, parent):
+ self.parent = parent # HttpFetcher
+ self.proxy = self.parent.proxy
+ self.fetcher = None
+
+ def connectionMade(self):
+ """
+ Http connection made - inform parent, which will
+ trigger callbacks
+ """
+ self.parent.connected(self)
+
+ def download(self, fetcher, uri, mtime):
+ # Request file from backend
+ self.log_headers = None
+ self.close_on_completion = True
+ self.server_mtime = None
+ self.server_size = None
+ self.fetcher = fetcher
+ self.uri = uri
+ self.finished = False
+ backendServer = self.parent.backendServer
+ if self.proxy is None:
+ serverpath = backendServer.path
+ else:
+ serverpath = "http://" + backendServer.host
+ if backendServer.port != 80:
+ serverpath = serverpath + ":" + str(backendServer.port)
+ serverpath = serverpath + "/" + backendServer.path
+
+ #self.sendCommand(self.request.method,
+ self.sendCommand("GET", serverpath + "/" + uri)
+
+ self.sendHeader('host', backendServer.host)
+ if self.proxy is not None and self.proxy.user is not None:
+ self.sendHeader('Proxy-Authorization', "Basic " +
+ encodestring(self.proxy.user + ":" + self.proxy.password))
+
+ if mtime is not None:
+ datetime = http.datetimeToString(mtime)
+ self.sendHeader('if-modified-since', datetime)
+
+ self.endHeaders()
+
+ def download_complete(self):
+ if self.finished:
+ return
+ log.debug("File transfer complete",'http_client')
+ self.finished = True
+ #if self.close_on_completion:
+ #self.fetcher.disconnect()
+ #self.parent.connection_closed() # We don't have a persistent connection
+ #self.fetcher.disconnect()
+ #self.transport.loseConnection()
+ self.fetcher.download_complete()
+
+ def handleStatus(self, version, code, message):
+ __pychecker__ = 'unusednames=version,message'
+ log.debug('handleStatus %s - %s' % (code, message), 'http_client')
+ self.http_status = int(code)
+
+ #self.setResponseCode(self.http_status)
+
+ def handleResponse(self, buffer):
+ #log.debug('handleResponse, %s bytes' % (len(buffer)), 'http_client')
+ log.debug('handleResponse status=%s' % (self.http_status), 'http_client')
+ if self.http_status == http.NOT_MODIFIED:
+ log.debug("Backend server reported file is not modified: " + self.uri,'http_client')
+ self.fetcher.up_to_date()
+ elif self.http_status == http.NOT_FOUND:
+ log.debug("Not found on backend server",'http_client')
+ self.fetcher.file_not_found()
+ elif self.http_status == http.OK:
+ self.download_complete()
+ else:
+ log.debug("Unknown status code: %s" % (self.http_status),'http_client')
+ self.fetcher.fetcher_internal_error("Unknown status code: %s" % (self.http_status))
+
+ def handleHeader(self, key, value):
+
+ log.debug("Received: " + key + " " + str(value), 'http_client')
+ key = string.lower(key)
+
+ if key == 'last-modified':
+ self.server_mtime = http.stringToDatetime(value)
+ self.fetcher.server_mtime(self.server_mtime)
+ elif key == 'content-length':
+ self.server_size = int(value)
+ self.fetcher.server_size(self.server_size)
+ elif key == 'connection':
+ if value == "close":
+ log.debug('will close on completion', 'http_client')
+ self.close_on_completion = True
+ elif value == "keep-alive":
+ log.debug('will not close on completion', 'http_client')
+ self.close_on_completion = False
+
+ #def handleEndHeaders(self):
+ #if self.http_status == http.NOT_MODIFIED:
+ #log.debug("Backend server reported file is not modified: " + self.uri,'http_client')
+ #self.fetcher.up_to_date()
+ #elif self.http_status == http.NOT_FOUND:
+ #log.debug("Not found on backend server",'http_client')
+ #self.fetcher.file_not_found()
+ #else:
+ #log.debug("Unknown status code: %s" % (self.http_status),'http_client')
+
+ def rawDataReceived(self, data):
+ if self.http_status == http.OK:
+ self.fetcher.data_received(data)
+ #log.debug("Recieved: %s expected: %s" % (self.fetcher.len_received, self.server_size),'http_client')
+ if self.server_size is not None:
+ if self.fetcher.len_received >= self.server_size:
+ if self.fetcher.len_received == self.server_size:
+ pass
+ #self.download_complete()
+ else:
+ log.err("File transfer overrun! Expected size:%s Received size:%s" %
+ (self.server_size, self.fetcher.len_received), 'http_client')
+ self.parent.fetcher_internal_error("Data overrun")
+
+# def handleResponse(self, buffer):
+# if self.length == 0:
+# self.setResponseCode(http.NOT_FOUND)
+# # print "length: " + str(self.length), "response:", self.status_code
+# if self.http_status == http.NOT_MODIFIED:
+# self.apDataEnd(self.transfered, False)
+# else:
+# self.apDataEnd(self.transfered, True)
+
+ def lineReceived(self, line):
+ """
+ log each line as received from server
+ """
+ if self.log_headers is None:
+ self.log_headers = line
+ else:
+ self.log_headers += ", " + line;
+ http.HTTPClient.lineReceived(self, line)
+
+ def sendCommand(self, command, path):
+ "log the line and handle it to the base class."
+ log.debug(command + ":" + path,'http_client')
+ http.HTTPClient.sendCommand(self, command, path)
+
+ def endHeaders(self):
+ "log and handle to the base class."
+ if self.log_headers != None:
+ log.debug(" Headers: " + self.log_headers, 'http_client')
+ self.log_headers = None;
+ http.HTTPClient.endHeaders(self)
+
+ def sendHeader(self, name, value):
+ "log and handle to the base class."
+ log.debug(name + " sendHeader:" + value,'http_client')
+ http.HTTPClient.sendHeader(self, name, value)
+
+ def disconnect(self):
+ log.debug("DISCONNECT:",'http_client')
+ import traceback
+ traceback.print_stack()
+
+
+class HttpFetcher(protocol.ClientFactory):
+ """
+ A Fetcher factory that retrieves files via HTTP
+ """
+ pipelining = False # twisted's HTTP client does not support pipelining
+ def __init__(self, backendServer):
+ self.backendServer = backendServer
+ self.isConnected = False
+ self.instance = None
+
+ def connect(self):
+ self.connectCallback = defer.Deferred()
+ self.proxy = self.backendServer.backend.config.http_proxy
+ if self.proxy is None:
+ host = self.backendServer.host
+ port = self.backendServer.port
+ else:
+ host = self.proxy.host
+ port = self.proxy.port
+ self.read_limit = self.backendServer.backend.config.bandwidth_limit
+ if self.read_limit is None:
+ factory = self
+ else:
+ # Limit download rate
+ factory = policies.ThrottlingFactory(self, readLimit=self.read_limit)
+ reactor.connectTCP(host, port, factory, self.backendServer.backend.config.timeout)
+ return self.connectCallback
+
+ def buildProtocol(self, addr):
+ return FetcherHttpClient(self)
+
+ def connected(self, connection):
+ "Connection was made to HTTP backend (callback from HTTP client)"
+ self.connection = connection
+ self.isConnected = True
+ self.connectCallback.callback(None)
+
+ def clientConnectionFailed(self, connector, reason):
+ #self.instance.connectionFailed(reason)
+ log.debug("clientConnectionFailed reason: %s" % (reason), "http-client")
+ self.connectCallback.errback(reason)
+ def clientConnectionLost(self, connector, reason):
+ log.debug("clientConnectionLost reason=%s" %(reason), "http-client")
+ if self.connection is not None and self.connection.fetcher is not None:
+ self.connection.fetcher.connection_closed(self)
+
+ def download(self, fetcher, uri, mtime):
+ """
+ Request download
+ %param fetcher: Fetcher class to receive callbacks
+ %param uri: URI of file to be downloaded within backend
+ %param mtime: Modification time of current file in cache
+ """
+ self.connection.download(fetcher, uri, mtime)
+
+ def disconnect(self):
+ if self.isConnected:
+ self.connection.transport.loseConnection()
+ self.isConnected = False
+
+class FtpFetcher(protocol.Protocol):
+ """
+ This is the secuence here:
+
+ -Start and connect the FTPClient
+ -Ask for mtime
+ -Ask for size
+ -if couldn't get the size
+ -try to get it by listing
+ -get all that juicy data
+
+ NOTE: Twisted's FTPClient code uses it's own timeouts here and there,
+ so the timeout specified for the backend may not always be used
+ """
+
+ pipelining = True
+ def __init__(self, backendServer):
+ self.backendServer = backendServer
+ self.isConnected = False
+ self.instance = None
+ self.ftpclient = None
+
+ def connect(self):
+ """
+ Establish connection to ftp server specified by backendServer
+ """
+ self.connectCallback = defer.Deferred()
+ if not self.backendServer.username:
+ creator = protocol.ClientCreator(reactor, ftp.FTPClient, passive=0)
+ else:
+ creator = protocol.ClientCreator(reactor, ftp.FTPClient, request.backendServer.username,
+ request.backendServer.password, passive=0)
+ d = creator.connectTCP(self.backendServer.host, self.backendServer.port,
+ self.backendServer.backend.config.timeout)
+ d.addCallback(self.controlConnectionMade)
+ d.addErrback(self.clientConnectionFailed)
+ return self.connectCallback
+
+ def controlConnectionMade(self, ftpclient):
+ self.ftpclient = ftpclient
+
+ if(self.backendServer.backend.config.passive_ftp):
+ log.debug('Got control connection, using passive ftp', 'ftp_client')
+ self.ftpclient.passive = 1
+ else:
+ log.debug('Got control connection, using active ftp', 'ftp_client')
+ self.ftpclient.passive = 0
+
+ if log.isEnabled('ftp_client'):
+ self.ftpclient.debug = 1
+ self.connectCallback.callback(None)
+
+ def clientConnectionFailed(self, reason):
+ #self.instance.connectionFailed(reason)
+ log.debug("clientConnectionFailed reason: %s" % (reason), "ftp_client")
+ self.connectCallback.errback(reason)
+
+ def download(self, fetcher, uri, mtime):
+ """
+ Request download
+ %param fetcher: Fetcher class to receive callbacks
+ %param uri: URI of file to be downloaded within backend
+ %param mtime: Modification time of current file in cache
+ """
+ self.parent = fetcher
+ self.cache_mtime = mtime
+ self.request_uri = uri
+ self.remote_file = (self.parent.backendServer.path + '/'
+ + uri)
+ self.ftpFetchMtime()
+
+ def ftpFetchMtime(self):
+ "Get the modification time from the server."
+ d = self.ftpclient.queueStringCommand('MDTM ' + self.remote_file)
+ d.addCallback(self.ftpMtimeResult)
+ d.addErrback(self.ftpMtimeFailed)
+
+ def ftpMtimeResult(self, msgs):
+ """
+ Got an answer to the mtime request.
+
+ Someone should check that this is timezone independent.
+ """
+ code, msg = msgs[0].split()
+ if code == '213':
+ time_tuple=time.strptime(msg[:14], "%Y%m%d%H%M%S")
+ #replace day light savings with -1 (current)
+ time_tuple = time_tuple[:8] + (-1,)
+ #correct the result to GMT
+ mtime = time.mktime(time_tuple) - time.altzone
+ self.parent.server_mtime(mtime)
+
+ if (self.cache_mtime
+ and self.cache_mtime >= mtime):
+ self.parent.up_to_date()
+ return
+ self.ftpFetchSize()
+
+ def ftpMtimeFailed(self, msgs):
+ if msgs.check(ftp.CommandFailed):
+ code = msgs.getErrorMessage()[2:5]
+ log.debug("ftp fetch of Mtime failed: %s code:%s" % (msgs.getErrorMessage(), code), 'ftp_client')
+ if code == '550':
+ # Not found
+ self.parent.file_not_found()
+ return
+ log.debug("ftp fetch of Mtime for %s unknown failure: %s" % (self.remote_file, msgs), 'ftp_client')
+ self.ftpFetchSize()
+
+ def ftpFetchSize(self):
+ "Get the size of the file from the server"
+
+ d = self.ftpclient.queueStringCommand('SIZE ' + self.remote_file)
+ d.addCallback(self.ftpSizeResult)
+ d.addErrback(self.ftpSizeFailed)
+
+ def ftpSizeResult(self, msgs):
+ code, msg = msgs[0].split()
+ if code == '213':
+ size = int(msg)
+ self.parent.server_size(size)
+ self.ftpFetchFile()
+ else:
+ self.ftpSizeFailed()
+ def ftpSizeFailed(self, msgs):
+ log.debug("ftp size failed: %s" % (msgs), 'ftp_client')
+ self.ftpFetchList()
+
+ def ftpFetchList(self):
+ "If ftpFetchSize didn't work try to get the size with a list command."
+ self.filelist = ftp.FTPFileListProtocol()
+ d = self.ftpclient.list(self.remote_file, self.filelist)
+ d.addCallback(self.ftpListResult)
+ d.addErrback(self.ftpListFailed)
+
+ def ftpListResult(self, msg):
+ __pychecker__ = 'unusednames=msg'
+ if len(filelist.files)== 0:
+ log.debug("Not found on backend server",'ftp_client')
+ self.parent.file_not_found()
+ return
+ file = filelist.files[0]
+ self.parent.server_size(file['size'])
+ fetcher.ftpFetchFile()
+
+ def ftpListFailed(self, msgs):
+ log.debug("ftp list failed: %s" % (msgs), 'ftp_client')
+ self.parent.fetcher_internal_error("Could not list directory")
+
+ def ftpFetchFile(self):
+ "And finally, we ask for the file."
+ log.debug('ftpFetchFile: ' + self.remote_file, 'ftp_client')
+ d = self.ftpclient.retrieveFile(self.remote_file, self)
+ d.addCallback(self.ftpFetchResult)
+ d.addErrback(self.ftpFetchFailed)
+ def ftpFetchResult(self, msg):
+ self.parent.download_complete()
+ def ftpFetchFailed(self, msgs):
+ log.debug("ftp fetch failed: %s" % (msgs), 'ftp_client')
+ self.parent.file_not_found()
+
+ def dataReceived(self, data):
+ self.parent.data_received(data)
+
+ def disconnect(self):
+ if self.ftpclient is not None:
+ log.debug('disconnecting', 'ftp_client')
+ self.ftpclient.quit()
+ self.ftpclient.transport.loseConnection()
+ self.ftpclient = None
+
+ def connectionLost(self, reason=None):
+ """
+ Maybe we should do some recovery here, I don't know, but the Deferred
+ should be enough.
+ """
+ log.debug("lost connection: %s"%(reason),'ftp_client')
+
+class GzipFetcher(Fetcher, protocol.ProcessProtocol):
+ """
+ This is a fake Fetcher, it uses the real Fetcher from the request's
+ backend via LoopbackRequest to get the data and gzip's or gunzip's as
+ needed.
+
+ NOTE: We use the serve_cached=0 parameter to Request.fetch so if
+ it is cached it doesn't get uselessly read, we just get it from the cache.
+ """
+ post_convert = re.compile(r"^Should not match anything$")
+ gzip_convert = post_convert
+
+ exe = '/bin/gzip'
+ def activate(self, request, postconverting=0):
+ log.debug("FetcherGzip request:" + str(request.uri) + " postconvert:" + str(postconverting), 'gzip')
+ Fetcher.activate(self, request)
+ if not request.apFetcher:
+ return
+
+ self.args = (self.exe, '-c', '-9', '-n')
+ if(log.isEnabled('gzip',9)):
+ self.args += ('-v',)
+
+ if request.uri[-3:] == '.gz':
+ host_uri = request.uri[:-3]
+ else:
+ host_uri = request.uri+'.gz'
+ self.args += ('-d',)
+ self.host_file = self.factory.config.cache_dir + host_uri
+ self.args += (self.host_file,)
+
+ running = self.factory.runningFetchers
+ if not postconverting or running.has_key(host_uri):
+ #Make sure that the file is there
+ loop = LoopbackRequest(request, self.host_transfer_done)
+ loop.uri = host_uri
+ loop.local_file = self.host_file
+ loop.process()
+ self.loop_req = loop
+ loop.serve_if_cached=0
+ if running.has_key(host_uri):
+ #the file is on it's way, wait for it.
+ running[host_uri].insert_request(loop)
+ else:
+ #we are not postconverting, so we need to fetch the host file.
+ loop.fetch(serve_cached=0)
+ else:
+ #The file should be there already.
+ self.loop_req = None
+ self.host_transfer_done()
+
+ def host_transfer_done(self):
+ """
+ Called by our LoopbackRequest when the real Fetcher calls
+ finish() on it.
+
+ If everything went well, check mtimes and only do the work if needed.
+
+ If posible arrange things so the target file gets the same mtime as
+ the host file.
+ """
+ log.debug('transfer done', 'gzip')
+ if self.loop_req and self.loop_req.code != http.OK:
+ self.setResponseCode(self.loop_req.code,
+ self.loop_req.code_message)
+ self.apDataReceived("")
+ self.apDataEnd("")
+ return
+
+ if os.path.exists(self.host_file):
+ self.local_mtime = os.stat(self.host_file)[stat.ST_MTIME]
+ old_mtime = None
+ if os.path.exists(self.local_file):
+ old_mtime = os.stat(self.local_file)[stat.ST_MTIME]
+ if self.local_mtime == old_mtime:
+ self.apEndCached()
+ else:
+ log.debug("Starting process: " + self.exe + " " + str(self.args), 'gzip')
+ self.process = reactor.spawnProcess(self, self.exe, self.args)
+
+ def outReceived(self, data):
+ self.setResponseCode(http.OK)
+ self.apDataReceived(data)
+
+ def errReceived(self, data):
+ log.debug('gzip: ' + data,'gzip')
+
+ def loseConnection(self):
+ """
+ This is a bad workaround Process.loseConnection not doing it's
+ job right.
+ The problem only happends when we try to finish the process
+ while decompresing.
+ """
+ if hasattr(self, 'process') and self.process.pid:
+ try:
+ os.kill(self.process.pid, signal.SIGTERM)
+ self.process.connectionLost()
+ except exceptions.OSError, Error:
+ import errno
+ (Errno, Errstr) = Error
+ if Errno != errno.ESRCH:
+ log.debug('Passing OSError exception '+Errstr)
+ raise
+ else:
+ log.debug('Threw away exception OSError no such process')
+
+ def processEnded(self, reason=None):
+ __pychecker__ = 'unusednames=reason'
+ log.debug("Status: %d" %(self.process.status),'gzip')
+ if self.process.status != 0:
+ self.setResponseCode(http.NOT_FOUND)
+
+ self.apDataReceived("")
+ self.apDataEnd(self.transfered)
+
+class RsyncFetcher(protocol.ProcessProtocol):
+ """
+ Fetch a file using the rsync protocol
+ rsync is run as an external process
+ """
+ rsyncCommand = '/usr/bin/rsync'
+ pipelining = False
+ def __init__(self, backendServer):
+ self.backendServer = backendServer
+ self.rsyncProcess = None
+
+ def connect(self):
+ # We can't connect seperately so just return true
+ return defer.succeed(True)
+
+ def download(self, fetcher, uri, mtime):
+ """
+ Request download
+ %param fetcher: Fetcher class to receive callbacks
+ %param uri: URI of file to be downloaded within backend
+ %param mtime: Modification time of current file in cache
+ """
+ self.rsyncTempFile = None # Temporary filename that rsync streams to
+ self.bytes_sent = 0 #Number of bytes sent to client already
+ self.parent = fetcher
+ self.cache_mtime = mtime
+ self.request_uri = uri
+ self.cache_path = fetcher.cacheEntry.cache_path
+ self.file_path = fetcher.cacheEntry.file_path # Absolute path of file
+ self.cache_dir = fetcher.cacheEntry.filedir
+ self.remote_file = (self.backendServer.path + '/'
+ + uri)
+
+ # Change /path/to/FILE -> /path/to/.FILE.* to match rsync tempfile
+ self.globpattern = re.sub(r'/([^/]*)$', r'/.\1.*', self.file_path)
+
+ for file in glob.glob(self.globpattern):
+ log.msg('Deleting stale tempfile:' + file, 'rsyncFetcher')
+ unlink(file)
+
+ # rsync needs the destination directory in place, so create it if necessary
+ if not os.path.exists(self.cache_dir):
+ os.makedirs(self.cache_dir)
+
+ if self.backendServer.port:
+ portspec = ':' + str(self.backendServer.port)
+ else:
+ portspec = ''
+
+ uri = 'rsync://'+ self.backendServer.host + portspec \
+ +self.backendServer.path+'/' + self.request_uri
+
+ args = [self.rsyncCommand, '--partial', '--progress', '--times',
+ '--timeout=%s' %(self.backendServer.backend.config.timeout)]
+ if log.isEnabled('rsync',9):
+ args.append('--verbose')
+ else:
+ args.append('--quiet')
+ bwlimit = self.backendServer.backend.config.bandwidth_limit
+ if bwlimit:
+ bwlimit = bwlimit / 1000 # rsync wants kbps
+ if bwlimit < 1:
+ bwlimit = 1
+ args.append('--bwlimit=%d' % (bwlimit))
+ args.extend([uri, '.'])
+ log.debug('rsync command: (%s) %s' %(self.cache_dir, string.join(args,' ')), 'rsyncFetcher')
+ self.rsyncProcess = reactor.spawnProcess(self, self.rsyncCommand, args, None,
+ self.cache_dir)
+
+ def findRsyncTempFile(self):
+ """
+ Look for temporary file created by rsync during streaming
+ """
+ files = glob.glob(self.globpattern)
+
+ if len(files)==1:
+ self.rsyncTempFile = files[0]
+ log.debug('tempfile: ' + self.rsyncTempFile, 'rsync_client')
+ elif not files:
+ # No file created yet
+ pass
+ else:
+ log.err('found more than one tempfile, abort rsync')
+ self.parent.fetcher_internal_error("Found more than one rsync temporary file")
+
+ #def connectionMade(self):
+ # pass
+
+ def outReceived(self, data):
+ "Data received from rsync process to stdout"
+ for s in string.split(data, '\n'):
+ if len(s):
+ log.debug('rsync: ' + s, 'rsync_client')
+ #self.apDataReceived(data)
+ if not self.rsyncTempFile:
+ self.findRsyncTempFile()
+ # Got tempfile?
+ #if self.rsyncTempFile:
+ # self.setResponseCode(http.OK)
+ if self.rsyncTempFile:
+ self.sendData()
+
+ def errReceived(self, data):
+ "Data received from rsync process to stderr"
+ for s in string.split(data, '\n'):
+ if len(s):
+ log.err('rsync error: ' + s, 'rsync_client')
+
+ def sendData(self):
+ f = None
+ if self.rsyncTempFile:
+ try:
+ f = open(self.rsyncTempFile, 'rb')
+ except IOError:
+ return
+ else:
+ # Tempfile has gone, stream main file
+ log.debug("sendData open dest (sent: %s bytes)"% (self.bytes_sent), 'rsync_client')
+ f = open(self.file_path, 'rb')
+
+ if f:
+ f.seek(self.bytes_sent)
+ data = f.read(abstract.FileDescriptor.bufferSize)
+ #log.debug("sendData got " + str(len(data)))
+ f.close()
+ if data:
+ self.parent.data_received(data, save=False)
+ self.bytes_sent = self.bytes_sent + len(data)
+ reactor.callLater(0, self.sendData)
+ elif not self.rsyncTempFile:
+ # Finished reading final file
+ log.debug("sendData complete. Bytes sent: %s" %(self.bytes_sent))
+ # Tell clients, but data is already saved by rsync so don't
+ # write file again
+ self.parent.download_complete()
+ #self.parent.connection_closed() # We don't have a persistent connection
+
+ def processEnded(self, status_object):
+ __pychecker__ = 'unusednames=reason'
+ self.rsyncTempFile = None
+ self.rsyncProcess = None
+
+ r = status_object.trap(error.ProcessTerminated, error.ProcessDone)
+ if r == error.ProcessDone:
+ log.debug("rsync process complete", 'rsync_client')
+ # File received. Send to clients.
+ self.parent.server_mtime(os.stat(self.file_path)[stat.ST_MTIME])
+ reactor.callLater(0, self.sendData)
+ elif r == error.ProcessTerminated:
+ log.debug("Status: %s" %(status_object.value.exitCode)
+ ,'rsync_client')
+ exitcode = status_object.value.exitCode
+ if exitcode == 10:
+ # Host not found
+ self.parent.connection_failed('rsync connection to %s failed'
+ % (self.backendServer.host))
+ elif exitcode == 23:
+ self.parent.file_not_found()
+ else:
+ self.parent.fetcher_internal_error("Error in rsync")
+
+ def disconnect(self):
+ "Kill rsync process"
+ if self.rsyncProcess and self.rsyncProcess.pid:
+ log.debug("disconnect: killing rsync child pid " +
+ str(self.rsyncProcess.pid), 'rsync_client')
+ os.kill(self.rsyncProcess.pid, signal.SIGTERM)
+ self.transport.loseConnection()
+
+class DownloadQueue:
+ """
+ This class manages a list of files to download and schedules downloads
+ """
+ closeTimeout = 5 # Time to close fetcher connections after last download (seconds)
+ def __init__(self, parent = None):
+ """
+ Initialise download queue
+ @param parent Class to notify (see downloadQueueEmpty) when queue is empty [class, data]
+ """
+ #import traceback
+ #traceback.print_stack()
+ self.queue = [] # List of cacheEntry classes waiting
+ self.activeFile = None
+ self.fetcher = None
+ self.timeoutCB = None
+ if parent is not None:
+ self.parent, self.parentId = parent
+ else:
+ self.parent = None
+
+ def addFile(self, cacheEntry):
+ """
+ Add a file to the queue and start downloading if necessary
+ @param cacheEntry Cache entry of file to download
+ @return Deferred that is triggered when file has been downloaded
+ """
+ if len(self.queue) == 0 and self.timeoutCB is not None:
+ self.timeoutCB.cancel()
+ self.timeoutCB = None
+ self.queue.append(cacheEntry)
+ if self.activeFile is None:
+ self.startNextDownload()
+ else:
+ log.debug("queue file " + cacheEntry.cache_path, 'DownloadQueue')
+
+ def downloadFinished(self, result):
+ success, message = result
+ if success:
+ log.debug("download complete: %s" % (self.activeFile.cache_path), 'DownloadQueue')
+ else:
+ log.debug("download failed: %s" % (message), 'DownloadQueue')
+ self.activeFile = None
+ self.startNextDownload()
+
+ def startNextDownload(self):
+ while len(self.queue)>0:
+ self.activeFile = self.queue[0]
+ self.queue = self.queue[1:]
+
+ if self.activeFile.state != self.activeFile.STATE_NEW:
+ log.debug("active download skipped (%s)" % (self.activeFile.cache_path), 'DownloadQueue')
+ self.activeFile = None
+ continue # Go to next file
+
+ log.debug("start next download (%s)" % (self.activeFile.cache_path), 'DownloadQueue')
+
+ if self.fetcher is not None:
+ if self.fetcher.backendServer.backend != self.activeFile.backend:
+ log.debug("old:%s new:%s" %(self.fetcher.backendServer.backend,self.activeFile.backend)
+ , 'DownloadQueue')
+ log.debug("changing backend server", 'DownloadQueue')
+ self.fetcher.disconnect()
+ self.fetcher = Fetcher()
+ else:
+ log.debug("keeping backend server", 'DownloadQueue')
+ else:
+ log.debug("creating new fetcher", 'DownloadQueue')
+ self.fetcher = Fetcher()
+ d = self.fetcher.start(self.activeFile)
+ d.addCallback(self.downloadFinished)
+ return
+
+ # Download queue was empty
+ #twisted.internet.base.DelayedCall.debug = True
+ log.debug("download queue is empty", 'DownloadQueue')
+ if self.closeTimeout and self.fetcher is not None:
+ self.timeoutCB = reactor.callLater(self.closeTimeout, self.closeFetcher)
+ else:
+ self.closeFetcher()
+
+ def closeFetcher(self):
+ "Close active fetcher - called after queue has been empty for closeTimeout seconds"
+
+ self.timeoutCB = None
+ if self.fetcher is not None:
+ log.debug("closing fetcher [%s]" % (self.fetcher.backendServer), 'DownloadQueue')
+ self.fetcher.disconnect()
+ self.fetcher = None
+
+ if self.parent is not None:
+ self.parent.downloadQueueEmpty(self, self.parentId)
+
+ def stop(self):
+ log.debug("queue stop", 'DownloadQueue')
+ if self.timeoutCB is not None:
+ self.timeoutCB.cancel()
+ self.closeFetcher()
+
+class DownloadQueuePerClient:
+ """
+ DownloadQueue that creates several queues, one per client
+ """
+ def __init__(self):
+ self.queues = {}
+
+ def addFile(self, cacheEntry):
+ # Add queue entries for all clients. The client
+ # queue that is ready first will start the download
+ for req in cacheEntry.requests:
+ clientId = req.getFileno()
+
+ if self.queues.has_key(clientId):
+ q = self.queues[clientId]
+ else:
+ q = DownloadQueue([self, clientId])
+ self.queues[clientId] = q
+ log.debug("Adding new queue for client id %s" % (clientId), 'DownloadQueuePerClient')
+
+ q.addFile(cacheEntry)
+
+ def downloadQueueEmpty(self, queue, data):
+ """
+ DownloadQueue notifies that it is empty
+ """
+ log.debug("Removing queue for client id %s" % (data), 'DownloadQueuePerClient')
+ del self.queues[data]
+
+ def stop(self):
+ for q in self.queues.values():
+ q.stop()
\ No newline at end of file
Modified: trunk/apt_proxy/misc.py
==============================================================================
--- trunk/apt_proxy/misc.py (original)
+++ trunk/apt_proxy/misc.py Thu Aug 3 23:54:46 2006
@@ -14,8 +14,8 @@
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-import os
-from twisted.internet import reactor
+import os, time
+from twisted.internet import reactor, defer
from twisted import python
class DomainLogger:
@@ -113,6 +113,8 @@
def __init__(self, factory, timer):
self.timer = timer
self.factory = factory
+ self.callback = None
+ self.working = None # Deferred triggered when recycler finishes
def start(self):
"""
Starts the Recycler if it is not working, it will use
@@ -120,22 +122,32 @@
tree.
"""
if not self.working:
+ self.working = defer.Deferred()
if self.factory.backends == []:
log.msg("NO BACKENDS FOUND",'recycle')
- return
+ self.working.errback(python.failure.Failure())
+ return self.working
self.cur_uri = '/'
self.cur_dir = self.factory.config.cache_dir
self.pending = []
for backend in self.factory.backends.values():
self.pending.append(backend.base)
self.stack = []
- reactor.callLater(self.timer, self.process)
- self.working = 1
+ self.callback = reactor.callLater(self.timer, self.process)
+ return self.working
+ def stop(self):
+ if self.callback is not None:
+ self.callback.cancel()
+ self.callback = None
+ if self.working:
+ self.working.callback(None)
+ self.working = None
def pop(self):
if self.stack:
(self.cur_dir, self.cur_uri, self.pending) = self.stack.pop()
else:
- self.working = 0
+ self.working.callback(None)
+ self.working = None
def push(self):
if self.pending:
self.stack.append((self.cur_dir, self.cur_uri, self.pending))
@@ -144,6 +156,7 @@
"""
Process the next entry, is called automatically via callLater.
"""
+ self.callback = None
entry = self.pending.pop()
uri = os.path.join(self.cur_uri, entry)
path = os.path.join(self.cur_dir, entry)
@@ -155,22 +168,26 @@
self.cur_uri = uri
self.pending = os.listdir(self.cur_dir)
if not self.pending:
- log.msg("Pruning empty directory: "+path,'recycle')
- os.removedirs(path)
- else:
- if os.path.isfile(path):
+ # Prune directory if it has not just been created
+ pathage = time.time() - os.path.getctime(path)
+ if pathage > 60:
+ log.msg("Pruning empty directory: "+path,'recycle')
+ os.removedirs(path)
+ elif os.path.isfile(path):
+ ext = os.path.splitext(path)[1]
+ if not ext == 'apDownload':
#print "PATH:", path
#print "URI: ", uri
if not self.factory.access_times.has_key(uri):
log.msg("Adopting new file: "+ uri,'recycle')
self.factory.access_times[uri] = os.path.getatime(path)
- else:
- log.msg("UNKNOWN:"+path,'recycle')
+ else:
+ log.msg("UNKNOWN:"+path,'recycle')
if not self.pending:
self.pop()
if self.working:
- reactor.callLater(self.timer, self.process)
+ self.callback = reactor.callLater(self.timer, self.process)
if __name__ == '__main__':
#Just for testing purposes.
Modified: trunk/apt_proxy/packages.py
==============================================================================
--- trunk/apt_proxy/packages.py (original)
+++ trunk/apt_proxy/packages.py Thu Aug 3 23:54:46 2006
@@ -67,17 +67,18 @@
except:
pass
- def update_file(self, uri):
+ def update_file(self, entry):
"""
Called from apt_proxy.py when files get updated so we can update our
fake lists/ directory and sources.list.
- @param uri Filename of cached file (without cache_dir prefix)
+ @param entry CacheEntry for cached file
"""
- if basename(uri)=="Packages" or basename(uri)=="Release":
- log.msg("REGISTERING PACKAGE:"+uri,'apt_pkg',4)
- stat_result = os.stat(self.cache_dir+'/'+uri)
- self.packages[uri] = stat_result
+ if entry.filename=="Packages" or entry.filename=="Release":
+ log.msg("Registering package file: "+entry.cache_path, 'apt_pkg', 4)
+ stat_result = os.stat(entry.file_path)
+ self.packages[entry.cache_path] = stat_result
+
def get_files(self):
"""
Get list of files in database. Each file will be checked that it exists
@@ -85,7 +86,7 @@
files = self.packages.keys()
#print self.packages.keys()
for f in files:
- if not os.path.exists(self.cache_dir + '/' + f):
+ if not os.path.exists(self.cache_dir + os.sep + f):
log.debug("File in packages database has been deleted: "+f, 'apt_pkg')
del files[files.index(f)]
del self.packages[f]
@@ -170,11 +171,11 @@
#print "start aptPackages [%s] %s " % (self.backendName, self.cache_dir)
del self.packages
#print "Deleted aptPackages [%s] %s " % (self.backendName, self.cache_dir)
- def file_updated(self, uri):
+ def file_updated(self, entry):
"""
A file in the backend has changed. If this affects us, unload our apt database
"""
- if self.packages.update_file(uri):
+ if self.packages.update_file(entry):
self.unload()
def __save_stdout(self):
@@ -294,21 +295,21 @@
def cleanup(factory):
- for backend in factory.backends:
+ for backend in factory.backends.values():
backend.get_packages_db().cleanup()
-def get_mirror_path(factory, file):
- """
- Look for the path of 'file' in all backends.
- """
- info = AptDpkgInfo(file)
- paths = []
- for backend in factory.backends.values():
- path = backend.get_packages_db().get_mirror_path(info['Package'],
- info['Version'])
- if path:
- paths.append('/'+backend.base+'/'+path)
- return paths
+#def get_mirror_path(factory, file):
+ #"""
+ #Look for the path of 'file' in all backends.
+ #"""
+ #info = AptDpkgInfo(file)
+ #paths = []
+ #for backend in factory.backends:
+ #path = backend.get_packages_db().get_mirror_path(info['Package'],
+ #info['Version'])
+ #if path:
+ #paths.append('/'+backend.base+'/'+path)
+ #return paths
def get_mirror_versions(factory, package):
"""
@@ -369,7 +370,7 @@
if not stat.S_ISDIR(mode):
import_file(factory, dir, file)
- for backend in factory.backends.values():
+ for backend in factory.backends:
backend.get_packages_db().unload()
Modified: trunk/apt_proxy/test/test_apt_proxy.py
==============================================================================
--- trunk/apt_proxy/test/test_apt_proxy.py (original)
+++ trunk/apt_proxy/test/test_apt_proxy.py Thu Aug 3 23:54:46 2006
@@ -22,7 +22,8 @@
from StringIO import StringIO
from apt_proxy.apt_proxy_conf import apConfig
-from apt_proxy.apt_proxy import Factory
+from apt_proxy.apt_proxy import Factory, Request
+from apt_proxy.misc import log
config1="""
[DEFAULT]
@@ -30,6 +31,7 @@
port=9999
address=
cleanup_freq=off
+max_versions=off
[backend1]
backends = http://a.b.c/d
@@ -68,13 +70,35 @@
"""
class apTestHelper(unittest.TestCase):
- default_config = "[DEFAULT]\ndebug=all:9 apt:0\n" # Config string to use
+ default_config = "[DEFAULT]\ndebug=all:9 apt:0 memleak:0\ncleanup_freq=off\n" # Config string to use
def setUp(self):
self.cache_dir = tempfile.mkdtemp('.aptproxy')
self.config = self.default_config.replace('[DEFAULT]','[DEFAULT]\ncache_dir=' + self.cache_dir)
def tearDown(self):
+ log.debug('Removing temporary directory: ' + self.cache_dir)
shutil.rmtree(self.cache_dir)
-
+ self.assertRaises(OSError, os.stat, self.cache_dir)
+
+class FactoryTestHelper(apTestHelper):
+ """
+ Set up a cache dir and a factory
+ """
+ def setUp(self, config):
+ """
+ Set up a factory using the additional config given
+ """
+ apTestHelper.setUp(self)
+ config = self.config + '\n' + config
+ self.apConfig = apConfig(StringIO(config))
+ self.factory = Factory(self.apConfig)
+ self.factory.configurationChanged()
+
+ def tearDown(self):
+ self.factory.stopFactory()
+ del(self.factory)
+ apTestHelper.tearDown(self)
+ self.assertRaises(OSError, os.stat, self.cache_dir)
+
class FactoryInitTest(apTestHelper):
def setUp(self):
self.default_config = config1
@@ -101,7 +125,14 @@
shutil.rmtree(self.cache_dir)
def testFactoryStart(self):
factory = Factory(self.c)
+ self.assertEquals(factory.recycler, None)
+ factory.startFactory
+ self.assertEquals(factory.recycler, None)
+ def testPeriodicOff(self):
+ "Verify periodic callback is off"
+ factory = Factory(self.c)
factory.startFactory
+ self.assertEquals(factory.periodicCallback, None)
class ConfigChangeTest(unittest.TestCase):
def setUp(self):
@@ -135,3 +166,160 @@
self.loadNewConfig()
self.assertEquals(self.factory.backends.keys(), ['backend2', 'backend3', 'backend4', 'backend5'])
self.assertEquals(self.factory.backends['backend3'].uris[0].host, 'l.m.n')
+
+class FactoryFnsTest(FactoryTestHelper):
+ """
+ Set up a cache dir and a factory
+ """
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ FactoryTestHelper.setUp(self, config1.replace("cleanup_freq=off", "cleanup_freq=1h"))
+
+ def testPeriodicControl(self):
+ "Start & stop periodic callback"
+ self.assertNotEquals(self.factory.periodicCallback, None)
+ self.factory.stopPeriodic()
+ self.assertEquals(self.factory.periodicCallback, None)
+ self.factory.startPeriodic()
+ self.assertNotEquals(self.factory.periodicCallback, None)
+ self.factory.stopPeriodic()
+ self.assertEquals(self.factory.periodicCallback, None)
+ def testPeriodic(self):
+ "Run periodic cleaning"
+ self.factory.startFactory() # Start recycler
+ self.factory.stopPeriodic() # Stop periodic callback
+ self.factory.periodic() # And trigger it manually
+ self.assertNotEquals(self.factory.periodicCallback, None)
+ self.factory.stopPeriodic() # Cancel new callback
+ self.assertEquals(self.factory.periodicCallback, None)
+
+
+ def testDumpDbs(self):
+ "Test that factory.dumpdbs() runs to completion"
+ self.factory.dumpdbs()
+
+class FactoryVersionsTest(FactoryTestHelper):
+ """
+ Set up a cache dir and a factory
+ """
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ FactoryTestHelper.setUp(self, config1.replace("max_versions=off", "max_versions=2"))
+
+ def testFirstFileServed(self):
+ "Add non-.deb to databases"
+ file = 'debian/dists/stable/Release.gpg'
+ path = os.sep + file
+ self.failIf(self.factory.access_times.has_key(path))
+ self.factory.file_served(file)
+ self.failUnless(self.factory.access_times.has_key(path))
+ # This is not a versioned file
+ self.failIf(self.factory.packages.has_key(path))
+
+ def testDebServed1(self):
+ "Add new .deb to databases"
+ file = 'debian/nonexistent_1.0.deb'
+ path = os.sep + file
+ packagename = 'nonexistent'
+ self.failIf(self.factory.access_times.has_key(path))
+ self.failIf(self.factory.packages.has_key(packagename))
+ self.factory.file_served(file)
+ self.failUnless(self.factory.access_times.has_key(path))
+ # This is not a versioned file
+ self.failUnless(self.factory.packages.has_key(packagename))
+ pkgs = self.factory.packages[packagename]
+ self.assertEquals(len(pkgs), 1)
+
+ def testDebServed2(self):
+ "Add two .debs to databases"
+ file1 = 'debian/nonexistent_1.0.deb'
+ file2 = file1.replace('1.0', '1.1')
+ packagename = 'nonexistent'
+ self.factory.file_served(file1)
+ self.factory.file_served(file2)
+ self.failUnless(self.factory.packages.has_key(packagename))
+ pkgs = self.factory.packages[packagename]
+ self.assertEquals(len(pkgs), 2)
+
+ def testDebServed3(self):
+ "Test max_versions algorithm"
+ files = []
+ versions = ['0.0.1', '0.0.2', '0.0.3']
+ packagename = 'apt'
+ os.mkdir(self.cache_dir + os.sep + 'backend1')
+ for ver in versions:
+ package_filename='apt_'+ver+'_test.deb'
+ file = 'backend1'+os.sep+package_filename
+ shutil.copy2('../test_data/apt/'+package_filename, self.cache_dir + os.sep + file)
+ self.factory.file_served(file)
+ files.append(file)
+ pkgs = self.factory.packages[packagename]
+ # Max versions should have deleted one file
+ self.assertEquals(len(pkgs), 2)
+
+backendServerConfig = """
+[test_servers]
+backends=http://server1/path1
+ ftp://server2/path2
+ rsync://server3/path3
+ file://server4/path4
+[test_usernames]
+backends=http://myUser:thePassword@server/path
+"""
+class BackendServerTest(FactoryTestHelper):
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ FactoryTestHelper.setUp(self, backendServerConfig)
+ self.backend = self.factory.getBackend('test_servers')
+
+ def testServerHosts(self):
+ values = ['server1','server2','server3','server4']
+ for server in self.backend.uris:
+ value = values[self.backend.uris.index(server)]
+ self.assertEquals(server.host, value)
+ def testServerPaths(self):
+ values = ['/path1','/path2','/path3','/path4']
+ for server in self.backend.uris:
+ value = values[self.backend.uris.index(server)]
+ self.assertEquals(server.path, value)
+ def testServerProtocols(self):
+ values = ['http','ftp','rsync','file']
+ for server in self.backend.uris:
+ value = values[self.backend.uris.index(server)]
+ self.assertEquals(server.scheme, value)
+ def testServerDefaultPorts(self):
+ values = [80,21,873,0]
+ for server in self.backend.uris:
+ value = values[self.backend.uris.index(server)]
+ self.assertEquals(server.port, value)
+ def testStr(self):
+ "__str__ operator"
+ for server in self.backend.uris:
+ self.assertNotEquals(server.__str__(), None)
+ def testNoUser(self):
+ self.assertEquals(self.backend.uris[0].username,None)
+ def testNoPassword(self):
+ self.assertEquals(self.backend.uris[0].password,None)
+ def testUser(self):
+ backend = self.factory.getBackend('test_usernames')
+ self.assertEquals(backend.uris[0].username,'myUser')
+ def testPassword(self):
+ backend = self.factory.getBackend('test_usernames')
+ self.assertEquals(backend.uris[0].password,'thePassword')
+
+class testRequests(unittest.TestCase):
+ def setUp(self):
+ class DummyChannel:
+ factory = None
+ transport = None
+ self.req = Request(DummyChannel(), None)
+ def testSimplifyPath(self):
+ self.assertEquals(self.req.clean_path('/foo/bar/../baz'), '/foo/baz')
+ def testRemoveHost(self):
+ self.assertEquals(self.req.clean_path('http://test:1234/foo/bar'), '/foo/bar')
Added: trunk/apt_proxy/test/test_cache.py
==============================================================================
--- (empty file)
+++ trunk/apt_proxy/test/test_cache.py Thu Aug 3 23:54:46 2006
@@ -0,0 +1,231 @@
+#
+# Copyright (C) 2006 Chris Halls <halls at debian.org>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""Unit test for cache.py"""
+
+import os, time, shutil
+from twisted.trial import unittest
+from twisted.internet import reactor, defer
+from StringIO import StringIO
+
+from apt_proxy.apt_proxy_conf import apConfig
+from apt_proxy.test.test_apt_proxy import apTestHelper
+from apt_proxy.cache import CacheEntry, findFileType
+from apt_proxy.apt_proxy import Factory
+from apt_proxy.misc import log
+from apt_proxy.fetchers import DownloadQueue
+
+class DummyRequest:
+ def __init__(self):
+ self.finished = False
+ self.streamed = 0
+ def finishCode(self, code, reason):
+ self.finished = True
+ def start_streaming(self, file_size, file_mtime):
+ self.streamed = self.streamed + 1
+ def getFileno(self):
+ return 0
+
+class CacheEntryTest(apTestHelper):
+ def setUp(self):
+ """
+ Make a configuration with a single backend
+ [files]
+ backends=file:///<path to test packages directory>
+ """
+ DownloadQueue.closeTimeout = 0 # Close fetcher immediately
+ apTestHelper.setUp(self)
+ packagedir = self.cache_dir+"/packages"
+ filedir = os.path.normpath(os.getcwd()+"/../test_data/packages")
+ config = (self.config +
+ "[files]\n" +
+ "backends=file://" + filedir)
+ #print "config: " + config
+ self.c = apConfig(StringIO(config))
+ self.factory = Factory(self.c)
+ self.factory.createBackends()
+ self.backend = self.factory.getBackend("files")
+ self.entry = self.backend.get_cache_entry("testdir/testfile.deb")
+ self.request = DummyRequest()
+
+ def tearDown(self):
+ del(self.factory)
+ apTestHelper.tearDown(self)
+
+ def testInit(self):
+ entry = self.entry
+ self.assertEquals(entry.backend, self.backend, "CacheEntry did not initialise backend")
+ self.assertEquals(entry.factory, self.factory, "CacheEntry did not initialise factory")
+ self.assertEquals(entry.path, "testdir/testfile.deb")
+ self.assertEquals(entry.file_path, self.cache_dir+"/files/testdir/testfile.deb")
+ self.assertEquals(entry.filedir, self.cache_dir+"/files/testdir")
+ self.assertEquals(entry.filetype.contype, "application/dpkg")
+ self.assertEquals(entry.filename, "testfile.deb")
+ self.assertEquals(entry.filebase, "testfile")
+ self.assertEquals(entry.fileext, ".deb")
+ self.assertEquals(len(entry.requests), 0)
+
+ def testAddClient(self):
+ self.entry.add_request(self.request)
+ self.assertEquals(len(self.entry.requests), 1)
+
+ def testAddDuplicate(self):
+ self.entry.add_request(self.request)
+ self.assertRaises(RuntimeError, self.entry.add_request, self.request)
+
+ def testRemove(self):
+ self.entry.add_request(self.request)
+ self.entry.remove_request(self.request)
+ self.assertEquals(len(self.entry.requests), 0)
+
+ def testStartDownload(self):
+ def start_download(entry):
+ # This test function replaces the normal
+ # Backend.start_download so we can see that
+ # it was called without starting the download
+ entry.entry_download_triggered = True
+ self.backend.start_download = start_download
+ self.entry.add_request(self.request)
+ # Check that our special function was called
+ self.failUnless(self.entry.entry_download_triggered)
+
+ def testCachedFile(self):
+ """
+ CacheEntry starts streaming a text file
+ """
+ def start_download(entry):
+ # This test function replaces the normal
+ # Backend.start_download so we can see that
+ # it was called without starting the download
+ entry.test_download = True
+ self.backend.start_download = start_download
+ entry = CacheEntry(self.backend, "testdir/test.txt")
+ entry.test_download = False
+ entry.create_directory()
+ f = open(entry.file_path, 'w')
+ f.write('12345')
+ f.close()
+ entry.add_request(self.request)
+ while not entry.test_download and not self.request.streamed:
+ #print "iterate.."
+ reactor.iterate(0.1)
+ # Check that our special function was not called
+ self.failIf(entry.test_download)
+ self.failUnless(self.request.streamed)
+
+ def testVerifyFail(self):
+ """
+ Create a bogus .deb and check that CacheEntry starts
+ a download
+ """
+ self.testResult = defer.Deferred()
+ class VerifySizeError:
+ pass
+ class VerifyMtimeError:
+ pass
+ class StreamedError:
+ pass
+ def start_download(entry):
+ # This test function replaces the normal
+ # Backend.start_download so we can see that
+ # it was called without starting the download
+ if entry.file_mtime is not None:
+ self.testResult.errback(failure.Failure(VerifyMtimeError()))
+ if entry.file_size is not None:
+ self.testResult.errback(failure.Failure(VerifySizeError()))
+ if self.request.streamed:
+ self.testResult.errback(failure.Failure(StreamedError()))
+ self.testResult.callback(None)
+ self.backend.start_download = start_download
+ entry = CacheEntry(self.backend, "testdir/test.deb")
+ entry.test_download = False
+ entry.create_directory()
+ f = open(entry.file_path, 'w')
+ f.write('this is not a real .deb')
+ f.close()
+ entry.add_request(self.request)
+ return self.testResult
+ testVerifyFail.timeout = 2
+
+ def testCheckAgeImmutable(self):
+ # testfile.deb is immutable
+ self.entry.file_mtime = 0
+ self.failUnless(self.entry.check_age())
+
+ self.entry.file_mtime = time.time()+1000
+ self.failUnless(self.entry.check_age())
+
+ def testCheckAgeMmutable(self):
+ # pretend that testfile.deb is immutable, i.e.
+ # it will be updated like Packages, Release
+ self.entry.filetype.mutable = True
+ self.entry.file_mtime = 0
+ self.failIf(self.entry.check_age())
+
+ self.entry.file_mtime = time.time()+1000
+ self.failUnless(self.entry.check_age())
+
+ def testCreateDirectory(self):
+ dirname = self.cache_dir+"/files/testdir"
+ self.assertRaises(OSError, os.stat, dirname) # Will return exception if directory does not exist
+ self.entry.create_directory()
+ os.stat(dirname) # Will return exception if directory does not exist
+
+ def testStatFile(self):
+ filename = self.cache_dir+"/files/testdir/testfile.deb"
+ self.entry.create_directory()
+ f = open(filename, 'w')
+ f.write('12345')
+ f.close()
+ close_time = time.time()
+ self.entry.stat_file()
+ self.assertApproximates(self.entry.file_mtime, close_time, 3)
+ self.assertEquals(self.entry.file_size, 5)
+
+class FileTypeTest(unittest.TestCase):
+ def testUnknownFiletype(self):
+ self.assertEquals(findFileType('unknownfile.xxx'), None)
+
+ def testFileTypes(self):
+ # Test filname recognition
+ # First entry - filename to test
+ # Second entry - mime type
+ # Third entry - mutable (can this file be changed in the archive?)
+ tests = [ ('test.deb', 'application/dpkg', False),
+ ('test2.udeb', 'application/dpkg', False),
+ ('Release.dsc', 'text/plain', False),
+ ('file.diff.gz', 'x-gzip', False),
+ ('Packages.gz', 'text/plain', True),
+ ('Packages.bz2', 'text/plain', True),
+ ('Sources.bz2', 'text/plain', True),
+ ('dists/sid/main/binary-i386/Packages.diff/Index', 'text/plain', True),
+ ('dists/sid/main/binary-i386/Packages.diff/2006-06-05-1427.58.gz', 'text/plain', False),
+ ('dists/sid/main/source/Sources.diff/Index', 'text/plain', True),
+ ('dists/sid/main/source/Sources.diff/2006-06-05-1427.58.gz', 'text/plain', False),
+ ('dists/sid/Contents-i386', 'text/plain', True),
+ ('dists/sid/Contents-i386.gz', 'text/plain', True),
+ ('dists/sid/Contents-i386.diff/Index', 'text/plain', True),
+ ('dists/sid/Contents-i386.diff/2006-06-02-1349.52.gz', 'text/plain', False),
+ ('dists/sid/main/i18n/Translation-de', 'text/plain', True),
+ ('dists/sid/main/i18n/Translation-de.gz', 'text/plain', True),
+ ('dists/sid/main/i18n/Translation-de.bz2', 'text/plain', True)
+ ]
+ for name,mimetype,mutable in tests:
+ log.debug('Testing filetype, name=%s mimetype=%s mutable=%s' % (name, mimetype, mutable))
+ result = findFileType(name)
+ self.assertNotEquals(result, None)
+ self.assertEquals(mimetype, result.contype)
+ self.assertEquals(mutable, result.mutable)
Modified: trunk/apt_proxy/test/test_config.py
==============================================================================
--- trunk/apt_proxy/test/test_config.py (original)
+++ trunk/apt_proxy/test/test_config.py Thu Aug 3 23:54:46 2006
@@ -19,7 +19,7 @@
from twisted.trial import unittest
from StringIO import StringIO
-from apt_proxy.apt_proxy_conf import apConfig
+from apt_proxy.apt_proxy_conf import apConfig, ConfigError
class EmptyConfigTest(unittest.TestCase):
def setUp(self):
@@ -35,10 +35,12 @@
port=8989
address=1.2.3.4 5.6.7.8
timeout = 888
+bandwidth_limit = 2323
[backend1]
backends = ftp://a.b.c
timeout = 999
+bandwidth_limit = 3434
[backend2]
backends =
@@ -79,4 +81,17 @@
self.assertEquals(self.c.backends['dynamic1'].name,'dynamic1')
self.assertEquals(self.c.backends['dynamic1'].dynamic,True)
self.assertEquals(self.c.backends['dynamic1'].timeout,888)
+ def testBandwidthLimit(self):
+ self.assertEquals(self.c.bandwidth_limit, 2323)
+ self.assertEquals(self.c.backends['backend1'].bandwidth_limit,3434)
+ self.assertEquals(self.c.backends['backend2'].bandwidth_limit,2323)
+
+class BrokenTimeoutTest(unittest.TestCase):
+ def testBrokenTimeout(self):
+ self.assertRaises(ConfigError, apConfig, StringIO("[Default]\ntimeout = "))
+class DefaultsTest(unittest.TestCase):
+ def setUp(self):
+ self.c = apConfig(StringIO(""))
+ def testDefaultReadLimit(self):
+ self.assertEquals(self.c.bandwidth_limit, None)
Added: trunk/apt_proxy/test/test_fetchers.py
==============================================================================
--- (empty file)
+++ trunk/apt_proxy/test/test_fetchers.py Thu Aug 3 23:54:46 2006
@@ -0,0 +1,567 @@
+#
+# Copyright (C) 2006 Chris Halls <halls at debian.org>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""This module tests the Fetcher classes"""
+
+import time, os, socket, signal, string
+
+from twisted.internet import reactor, protocol, defer, error
+from twisted.protocols import ftp
+from twisted.cred import portal, checkers, credentials
+from twisted.python import failure
+
+from apt_proxy.apt_proxy_conf import apConfig
+from apt_proxy.apt_proxy import Factory
+from apt_proxy.misc import log
+from apt_proxy.cache import CacheEntry
+from apt_proxy.fetchers import HttpFetcher, FetcherHttpClient, FtpFetcher, Fetcher, \
+ RsyncFetcher, DownloadQueue, DownloadQueuePerClient
+from apt_proxy.test.test_apt_proxy import apTestHelper, FactoryTestHelper
+
+
+config1="""
+[DEFAULT]
+debug=all:9
+port=9999
+address=
+cleanup_freq=off
+max_versions=off
+
+[backend1]
+backends = http://localhost/nothing-really
+
+[ftp]
+backends = ftp://localhost/nothing-really
+"""
+
+class FetcherHttpTest(FactoryTestHelper):
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ FactoryTestHelper.setUp(self, config1)
+
+ def testInit(self):
+ "Brief init test"
+ backend = self.factory.getBackend('backend1')
+ backendServer = backend.uris[0]
+ httpFetcher = HttpFetcher(backendServer)
+ httpFetcher.proxy = None # Would otherwise have been set by httpFetcher.connect
+ connection = FetcherHttpClient(httpFetcher)
+
+
+class FetcherFtpInitTest(FactoryTestHelper):
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ FactoryTestHelper.setUp(self, config1)
+
+ def testInit(self):
+ "Brief init test"
+ backend = self.factory.getBackend('ftp')
+ backendServer = backend.uris[0]
+ ftpFetcher = FtpFetcher(backendServer)
+
+class FetcherFtpTestHelper(FactoryTestHelper):
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ self.ftpserver = FtpServer()
+ port = self.ftpserver.start()
+ config = (config1 +
+ "[test_ftp]\n" +
+ "backends=http://127.0.0.1:" + str(port))
+ FactoryTestHelper.setUp(self, config)
+ self.backend = self.factory.getBackend('test_ftp')
+ self.backendServer = self.backend.uris[0]
+ self.ftpFetcher = FtpFetcher(self.backendServer)
+ self.ftpFetcher.debug = 1
+ def tearDown(self):
+ # We don't care about deferreds left over e.g. pending connection
+ #delayeds = reactor.getDelayedCalls()
+ #for d in delayeds:
+ # d.cancel()
+ self.ftpFetcher.disconnect()
+ self.ftpFetcher = None
+ self.ftpserver.stop()
+ self.ftpserver = None
+ FactoryTestHelper.tearDown(self)
+
+class FetcherFtpTest(FetcherFtpTestHelper):
+ def setUp(self):
+ FetcherFtpTestHelper.setUp(self)
+
+ def testConnect(self):
+ "Test connect"
+ return self.ftpFetcher.connect()
+ testConnect.timeout = 2
+
+ def testConnectFail(self):
+ "Test connect failure"
+ self.ftpserver.stop()
+ d = self.ftpFetcher.connect()
+ def callBack(result):
+ raise RuntimeError("Connect should have failed")
+ def errorBack(result):
+ result.trap(error.ConnectionRefusedError)
+ # Reverse meaning of deferred, ie errorback = as expected
+ d.addCallbacks(callBack, errorBack)
+ return d
+ testConnectFail.timeout = 2
+
+class DummyFetcher:
+ def __init__(self, deferred):
+ self.deferred = deferred
+ self.error_code = None # Anticipated error
+ self.wait_for_mtime = False
+ self.wait_for_not_found = False
+ def download_failed(self, code, reason):
+ if self.error_code is not None and \
+ self.error_code == code:
+ self.deferred.callback()
+ else:
+ self.deferred.errback(None)
+ def server_mtime(self, time):
+ if self.wait_for_mtime == True:
+ self.deferred.callback(None)
+ def file_not_found(self):
+ if self.wait_for_not_found == True:
+ self.deferred.callback(None)
+
+class FetcherFtpProtocolTest(FetcherFtpTestHelper):
+ def setUp(self):
+ FetcherFtpTestHelper.setUp(self)
+ self.resultCallback = defer.Deferred()
+ self.fetcher = DummyFetcher(self.resultCallback)
+ self.fetcher.backendServer = self.backendServer
+
+ def tearDown(self):
+ FetcherFtpTestHelper.tearDown(self)
+
+ def testNotFound(self):
+ "Test for file not found"
+ d = self.ftpFetcher.connect()
+ d.addCallback(self.NotFoundConnectCallback)
+ return self.resultCallback
+ testNotFound.timeout = 1
+ def NotFoundConnectCallback(self,result):
+ self.fetcher.wait_for_not_found = True
+ self.ftpFetcher.download(self.fetcher, 'notHereFile', 0)
+
+ def MtimeConnectCallback(self,result):
+ log.debug("connection made", 'FetcherFtpProtocolTest')
+ self.fetcher.wait_for_mtime = True
+ self.ftpFetcher.download(self.fetcher, 'packages/Packages', 0)
+
+ def testMtime(self):
+ "Test mtime request"
+ def FetchSize():
+ pass
+ self.ftpFetcher.ftpFetchSize = FetchSize # We don't want to get size afterwards
+ d = self.ftpFetcher.connect()
+ d.addCallback(self.MtimeConnectCallback)
+ return self.resultCallback
+ testMtime.timeout = 1
+
+class FtpServer:
+ def start(self):
+ """
+ Start FTP server, serving test data
+
+ @ret port number that server listens on
+
+ This routine was hacked from twisted/tap/ftp.py
+ """
+ root = '../test_data'
+ f = ftp.FTPFactory()
+ r = ftp.FTPRealm(root)
+ f.tld = root
+ p = portal.Portal(r)
+ p.registerChecker(checkers.AllowAnonymousAccess(), credentials.IAnonymous)
+
+ f.userAnonymous = 'anonymous'
+ f.portal = p
+ f.protocol = ftp.FTP
+
+ self.port = reactor.listenTCP(0, f, interface="127.0.0.1")
+ portnum = self.port.getHost().port
+ log.debug("Ftp server listening on port %s" %(portnum))
+ self.factory = f
+ return portnum
+
+ def stop(self):
+ #pass
+ self.port.stopListening()
+ self.factory.stopFactory()
+
+class RsyncFetcherTest(FactoryTestHelper):
+ """
+ Set up a cache dir and a factory
+ """
+
+ rsync_config="""
+[DEFAULT]
+debug=all:9
+port=9999
+address=
+cleanup_freq=off
+max_versions=off
+
+[rsync]
+backends = rsync://127.0.0.1:0/test
+"""
+
+ class DummyFetcher:
+ def __init__(self, backend, backendServer):
+ self.backend = backend
+ self.backendServer = backendServer
+ self.cacheEntry = backend.get_cache_entry("testdir/testfile.deb")
+ def fetcher_internal_error(self, message):
+ log.debug('fetcher_internal_error: %s' % (message))
+
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ FactoryTestHelper.setUp(self, self.rsync_config)
+ self.backend = self.factory.getBackend('rsync')
+ self.backendServer = self.backend.get_first_server()
+ self.f = RsyncFetcher(self.backendServer)
+ def tearDown(self):
+ self.f.disconnect()
+ def testRsyncInit(self):
+ self.assertEquals(self.f.backendServer, self.backendServer)
+ def testConnect(self):
+ return self.f.connect() # connect returns a deferred that fires
+ def testDownload(self):
+ self.f.connect()
+ dummyFetcher = self.DummyFetcher(self.backend, self.backendServer)
+ self.f.download(dummyFetcher, 'test', time.time())
+
+class RsyncServer(protocol.ProcessProtocol):
+ """
+ Starts an rsync daemon on localhost for testing
+ """
+ rsyncCommand = '/usr/bin/rsync'
+
+ def start(self):
+ """
+ Start rsync server, serving test data
+
+ @ret port number that server listens on
+ """
+ self.rsync_dir = '../test_data'
+
+ # Find a port number for the rsync server process:
+ # Start listening on a random port, then close it
+ s = socket.socket()
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ s.listen(1)
+ self.rsync_port = s.getsockname()[1]
+ s.close()
+
+ self.rsync_confpath = self.rsync_dir + os.sep + 'testrsync.conf'
+ self.write_rsyncconf()
+
+ args = (self.rsyncCommand, '--daemon', '--config=' + self.rsync_confpath, '--verbose', '--no-detach')
+ self.rsyncProcess = reactor.spawnProcess(self, self.rsyncCommand, args, None,self.rsync_dir)
+
+ # wait for server to start
+ s2 = socket.socket()
+ while s2:
+ try:
+ s2.connect(('127.0.0.1', self.rsync_port))
+ break
+ except:
+ pass
+ reactor.iterate(0.1)
+ s2.close()
+
+ log.debug("rsync server listening on port %s" %(self.rsync_port))
+ return self.rsync_port
+
+ def stop(self):
+ if self.rsyncProcess and self.rsyncProcess.pid:
+ log.debug("killing rsync child pid " +
+ str(self.rsyncProcess.pid), 'RsyncServer')
+ self.rsyncProcess.loseConnection()
+ os.kill(self.rsyncProcess.pid, signal.SIGTERM)
+
+ def write_rsyncconf(self):
+ f = open(self.rsync_confpath, 'w')
+ f.write("address = 127.0.0.1\n")
+ f.write("port = %s\n" % (self.rsync_port))
+ f.write("log file = %s\n" %(self.rsync_dir+os.sep+'testrsync.log'))
+ f.write("[apt-proxy]\n")
+ f.write("path = %s\n" %(self.rsync_dir))
+ f.write("use chroot = false\n") # Can't chroot becuase daemon isn't root
+ f.close()
+
+ def outReceived(self, data):
+ "Data received from rsync process to stdout"
+ for s in string.split(data, '\n'):
+ if len(s):
+ log.debug('rsync: ' + s, 'RsyncServer')
+
+ def errReceived(self, data):
+ "Data received from rsync process to stderr"
+ for s in string.split(data, '\n'):
+ if len(s):
+ log.err('rsync error: ' + s, 'RsyncServer')
+
+ def processEnded(self, status_object):
+ if isinstance(status_object, failure.Failure):
+ log.debug("rsync failure: %s" %(status_object)
+ ,'RsyncServer')
+ else:
+ log.debug("Status: %d" %(status_object.value.exitCode)
+ ,'RsyncServer')
+
+ # Success?
+ exitcode = status_object.value.exitCode
+
+
+class FetcherRsyncTestHelper(FactoryTestHelper):
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ self.rsyncserver = RsyncServer()
+ port = self.rsyncserver.start()
+ config = (config1 +
+ "[test_rsync]\n" +
+ "backends=http://127.0.0.1:" + str(port) + '/apt-proxy')
+ FactoryTestHelper.setUp(self, config)
+ self.backend = self.factory.getBackend('test_rsync')
+ self.backendServer = self.backend.uris[0]
+ self.rsyncFetcher = RsyncFetcher(self.backendServer)
+ self.rsyncFetcher.debug = 1
+ def tearDown(self):
+ # We don't care about deferreds left over e.g. pending connection
+ #delayeds = reactor.getDelayedCalls()
+ #for d in delayeds:
+ # d.cancel()
+ self.rsyncFetcher.disconnect()
+ self.rsyncFetcher = None
+ self.rsyncserver.stop()
+ self.rsyncserver = None
+ FactoryTestHelper.tearDown(self)
+
+class FetcherRsyncProtocolTest(FetcherRsyncTestHelper):
+ def setUp(self):
+ FetcherRsyncTestHelper.setUp(self)
+ self.resultCallback = defer.Deferred()
+ self.fetcher = DummyFetcher(self.resultCallback)
+ self.fetcher.backendServer = self.backendServer
+
+ def tearDown(self):
+ FetcherRsyncTestHelper.tearDown(self)
+
+ def testNotFound(self):
+ "Test for file not found"
+ d = self.rsyncFetcher.connect()
+ d.addCallback(self.NotFound2)
+ return self.resultCallback
+ testNotFound.timeout = 1
+ def NotFound2(self,result):
+ self.fetcher.wait_for_not_found = True
+ fileName = 'notHereFile'
+ self.fetcher.cacheEntry = self.backend.get_cache_entry(fileName)
+ self.rsyncFetcher.download(self.fetcher, fileName, 0)
+
+QueueConfig = """
+[test_queue]
+backends=http://server1/path1
+"""
+
+class DummyFetcher:
+ def __init__(self, backend):
+ self.backend = backend
+ def connect(self):
+ # We always conect
+ d = defer.succeed(True)
+ return d
+ def disconnect(self):
+ pass
+ def download(self, fetcher, uri, mtime):
+ fetcher.cacheEntry.state = CacheEntry.STATE_DOWNLOAD
+ pass
+
+class DummyServer:
+ fetcher = DummyFetcher
+ path = 'Dummy'
+ uri = 'dummy://'
+class DummyBackend:
+ name = 'Dummy'
+ def get_first_server(self):
+ return DummyServer()
+class DummyCacheEntry:
+ """
+ Class that provides basic CacheEntry information
+ """
+
+ STATE_NEW = CacheEntry.STATE_NEW
+ STATE_DOWNLOAD = CacheEntry.STATE_DOWNLOAD
+ def __init__(self, cache_dir, backend, file):
+ self.filename = os.path.basename(file)
+ self.path = file
+ self.cache_path = backend + os.sep + file
+ self.file_path = cache_dir + os.sep + self.cache_path
+ self.file_mtime = None
+ self.requests = []
+ self.state = self.STATE_NEW
+
+class DownloadQueueTest(FactoryTestHelper):
+
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ FactoryTestHelper.setUp(self, QueueConfig)
+ self.queue = DownloadQueue()
+ self.backend = self.factory.getBackend('test_queue')
+ def testInit(self):
+ self.assertEquals(len(self.queue.queue), 0)
+ self.assertEquals(self.queue.fetcher, None)
+ self.assertEquals(self.queue.activeFile, None)
+ def testAddFile(self):
+ entry = DummyCacheEntry(self.cache_dir, 'test_queue', 'test.deb')
+ entry.backend = DummyBackend()
+ self.queue.addFile(entry)
+ self.assertEquals(len(self.queue.queue), 0)
+ self.assertEquals(self.queue.activeFile, entry)
+ self.queue.stop() # Cancel timeout CB
+ def testDownloadComplete(self):
+ entry = DummyCacheEntry(self.cache_dir, 'test_queue', 'test.deb')
+ entry.backend = DummyBackend()
+ self.queue.addFile(entry)
+ self.assertEquals(self.queue.activeFile, entry)
+ self.queue.downloadFinished([True, 'Test complete'])
+ self.assertEquals(self.queue.activeFile, None)
+ self.queue.stop() # Cancel timeout CB
+
+class DummyRequest:
+ def __init__(self, fileno=0):
+ self.fileno=fileno
+ self.finished = False
+ self.streamed = 0
+ def finishCode(self, code, reason):
+ self.finished = True
+ def start_streaming(self, file_size, file_mtime):
+ self.streamed = self.streamed + 1
+ def getFileno(self):
+ return self.fileno
+
+class DownloadQueuePerClientTest(FactoryTestHelper):
+ def setUp(self):
+ """
+ Set up a factory using the additional config given
+ """
+ FactoryTestHelper.setUp(self, QueueConfig)
+ self.queue = DownloadQueuePerClient()
+ self.backend = self.factory.getBackend('test_queue')
+ def testInit(self):
+ self.assertEquals(len(self.queue.queues), 0)
+ def testSeparateFilesAndClients(self):
+ req1 = DummyRequest(123)
+ req2 = DummyRequest(234)
+
+ entry1 = DummyCacheEntry(self.cache_dir, 'test_queue', 'test1.deb')
+ entry1.requests = [req1]
+ entry1.backend = DummyBackend()
+
+ entry2 = DummyCacheEntry(self.cache_dir, 'test_queue', 'test2.deb')
+ entry2.requests = [req2]
+ entry2.backend = entry1.backend
+
+ self.assertEquals(len(self.queue.queues.keys()), 0)
+ self.assertNotEquals(self.queue.queues.has_key(req1.fileno), True)
+ self.queue.addFile(entry1)
+ self.assertEquals(len(self.queue.queues.keys()), 1)
+ self.assertEquals(self.queue.queues[req1.fileno].activeFile, entry1)
+
+ self.queue.addFile(entry2)
+ self.assertEquals(len(self.queue.queues.keys()), 2)
+ self.assertEquals(self.queue.queues[req2.fileno].activeFile, entry2)
+
+ self.queue.stop() # Cancel timeout CB
+
+ def testSeparateFiles(self):
+ req1 = DummyRequest(123)
+ req2 = DummyRequest(123)
+
+ entry1 = DummyCacheEntry(self.cache_dir, 'test_queue', 'test1.deb')
+ entry1.requests = [req1]
+ entry1.backend = DummyBackend()
+
+ entry2 = DummyCacheEntry(self.cache_dir, 'test_queue', 'test2.deb')
+ entry2.requests = [req2]
+ entry2.backend = entry1.backend
+
+ self.assertEquals(len(self.queue.queues.keys()), 0)
+ self.assertNotEquals(self.queue.queues.has_key(req1.fileno), True)
+ self.queue.addFile(entry1)
+ self.assertEquals(len(self.queue.queues.keys()), 1)
+ self.assertEquals(self.queue.queues[req1.fileno].activeFile, entry1)
+
+ self.queue.addFile(entry2)
+ self.assertEquals(len(self.queue.queues.keys()), 1)
+ # Entry 2 should have been added to the first queue, and entry1 will
+ # still be active
+ self.assertEquals(self.queue.queues[req2.fileno].activeFile, entry1)
+ self.assertEquals(self.queue.queues[req2.fileno].queue[0], entry2)
+
+ self.queue.stop() # Cancel timeout CB
+
+ def testSeparateClients(self):
+ # 2 clients requesting 1 file
+ req1 = DummyRequest(123)
+ req2 = DummyRequest(234)
+
+ entry1 = DummyCacheEntry(self.cache_dir, 'test_queue', 'test1.deb')
+ entry1.requests = [req1]
+ entry1.backend = DummyBackend()
+
+ self.assertEquals(len(self.queue.queues.keys()), 0)
+ self.assertNotEquals(self.queue.queues.has_key(req1.fileno), True)
+ self.queue.addFile(entry1)
+ self.assertEquals(len(self.queue.queues.keys()), 1)
+ self.assertEquals(self.queue.queues[req1.fileno].activeFile, entry1)
+
+ entry2 = entry1
+ entry2.requests.append(req2)
+
+ # Entry 2 will have been added to a second queue, but will be immediately
+ # dequeued because it is on entry 1's queue
+ self.queue.addFile(entry2)
+ self.assertEquals(len(self.queue.queues.keys()), 2)
+ self.assertEquals(self.queue.queues[req2.fileno].activeFile, None)
+
+ self.queue.stop() # Cancel timeout CB
+
+ def testDownloadComplete(self):
+ req = DummyRequest(678)
+ entry = DummyCacheEntry(self.cache_dir, 'test_queue', 'test.deb')
+ entry.backend = DummyBackend()
+ entry.requests = [req]
+ self.queue.addFile(entry)
+ self.assertEquals(len(self.queue.queues.keys()), 1)
+ self.queue.queues[req.fileno].closeFetcher()
+ # Check that queue for this client has been removed
+ self.assertEquals(len(self.queue.queues.keys()), 0)
+ #self.queue.stop() # Cancel timeout CB
Modified: trunk/apt_proxy/test/test_packages.py
==============================================================================
--- trunk/apt_proxy/test/test_packages.py (original)
+++ trunk/apt_proxy/test/test_packages.py Thu Aug 3 23:54:46 2006
@@ -16,7 +16,8 @@
"""Unit test for packages.py"""
-from apt_proxy.packages import AptPackages, PackageFileList, get_mirror_versions, AptDpkgInfo
+from apt_proxy import packages
+#import AptPackages, PackageFileList, get_mirror_versions, AptDpkgInfo, cleanup
from apt_proxy.apt_proxy import Factory
from apt_proxy.test.test_apt_proxy import apTestHelper
from apt_proxy.apt_proxy_conf import apConfig
@@ -24,10 +25,20 @@
from twisted.trial import unittest
import shutil, os, re, glob
+class DummyCacheEntry:
+ """
+ Class that provides basic CacheEntry information
+ """
+ def __init__(self, cache_dir, backend, file):
+ self.filename = os.path.basename(file)
+ self.path = file
+ self.cache_path = backend + os.sep + file
+ self.file_path = cache_dir + os.sep + self.cache_path
+
class PackageFileListTest(apTestHelper):
def setUp(self):
apTestHelper.setUp(self)
- self.f = PackageFileList('test', self.cache_dir)
+ self.f = packages.PackageFileList('test', self.cache_dir)
def tearDown(self):
del(self.f) # Needed otherwise we'll get a database exception when cache dir is removed
apTestHelper.tearDown(self)
@@ -35,9 +46,9 @@
self.assertEqual(self.f.get_files(),[])
def testAddPackages(self):
shutil.copytree('../test_data/packages', self.cache_dir+'/packages')
- self.f.update_file('packages/Packages')
+ self.f.update_file(DummyCacheEntry(self.cache_dir, 'packages','Packages'))
self.assertEqual(self.f.get_files(),['packages/Packages'])
- self.f.update_file('packages/Packages.gz') # This file should not be added
+ self.f.update_file(DummyCacheEntry(self.cache_dir, 'packages','Packages.gz')) # This file should not be added
self.assertNotIn('packages/Packages.gz', self.f.get_files())
# Remove packages file and check that it is removed from database
@@ -47,7 +58,7 @@
class PackagesCacheTest(apTestHelper):
def setUp(self):
apTestHelper.setUp(self)
- self.p = AptPackages('test', self.cache_dir)
+ self.p = packages.AptPackages('test', self.cache_dir)
def tearDown(self):
del(self.p)
apTestHelper.tearDown(self)
@@ -56,7 +67,7 @@
def testReLoadEmpty(self):
self.failIfEqual(self.p.load(),True)
del(self.p)
- self.p = AptPackages('test', self.cache_dir)
+ self.p = packages.AptPackages('test', self.cache_dir)
class PackagesTestHelper(apTestHelper):
"""
@@ -64,9 +75,9 @@
"""
def setUp(self):
apTestHelper.setUp(self)
- self.p = AptPackages('test1', self.cache_dir)
+ self.p = packages.AptPackages('test1', self.cache_dir)
shutil.copytree('../test_data/packages', self.cache_dir+'/packages')
- self.p.file_updated('packages/Packages')
+ self.p.file_updated(DummyCacheEntry(self.cache_dir, 'packages','Packages'))
#print "Cache dir:", self.cache_dir, '\n'
def tearDown(self):
del(self.p)
@@ -83,11 +94,11 @@
def get_test_deb_name():
"Return filename of test deb file"
debs = glob.glob('../test_data/packages/apt_*_*.deb')
- return debs[0]
+ return debs[-1]
def get_test_deb_info():
"Return an AptDpkgInfo for our test deb"
- return AptDpkgInfo(get_test_deb_name())
+ return packages.AptDpkgInfo(get_test_deb_name())
class AptDpkgInfoTest(unittest.TestCase):
def testGetInfo(self):
@@ -109,19 +120,23 @@
self.factory = Factory(self.c)
self.factory.createBackends()
# Register test package files in db
- self.factory.getBackend('packages').get_packages_db().file_updated('packages/Packages')
+ entry = DummyCacheEntry(self.cache_dir,'packages','Packages')
+ self.factory.getBackend('packages').get_packages_db().file_updated(entry)
# Get version of apt used for testing
self.aptinfo = get_test_deb_info()
#print self.cache_dir
def tearDown(self):
+ packages.cleanup(self.factory)
del(self.factory)
apTestHelper.tearDown(self)
def testGetAllMirrorVersions(self):
- aptversions = get_mirror_versions(self.factory, 'apt')
+ aptversions = packages.get_mirror_versions(self.factory, 'apt')
self.assertEquals(self.aptinfo['Version'], aptversions[0][0])
testdeb_name = get_test_deb_name().replace('../test_data/','') # strip test data directory
self.assertEquals(testdeb_name, aptversions[0][1])
- self.assertEquals([], get_mirror_versions(self.factory, 'unknown'))
+ self.assertEquals([], packages.get_mirror_versions(self.factory, 'unknown'))
def testGetEmptyMirrorVersions(self):
- self.assertEquals([], get_mirror_versions(self.factory, 'unknown'))
+ self.assertEquals([], packages.get_mirror_versions(self.factory, 'unknown'))
+ #def testGetMirrorPath(self):
+ #self.assertEquals([], packages.get_mirror_path(self.factory, 'unknown'))
Added: trunk/apt_proxy/test/test_requests.py
==============================================================================
--- (empty file)
+++ trunk/apt_proxy/test/test_requests.py Thu Aug 3 23:54:46 2006
@@ -0,0 +1,524 @@
+#
+# Copyright (C) 2006 Chris Halls <halls at debian.org>
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of version 2.1 of the GNU Lesser General Public
+# License as published by the Free Software Foundation.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+"""This module tests the client protocol itself"""
+
+import os, time
+from twisted.trial import unittest
+from twisted.internet import protocol, reactor, defer
+from twisted.python import failure
+from twisted import web
+from twisted.web import http
+from StringIO import StringIO
+
+from apt_proxy.apt_proxy_conf import apConfig
+from apt_proxy.cache import CacheEntry
+from apt_proxy.apt_proxy import Factory
+from apt_proxy.misc import log
+from apt_proxy.fetchers import DownloadQueue
+from apt_proxy.test.test_fetchers import RsyncServer
+from apt_proxy.test.test_apt_proxy import apTestHelper
+
+class uriData:
+ """
+ class containing test data for a request
+ """
+ def __init__(self, filename, expectedResponse, if_modified_since=None, expectedSize=None, filePath=None, abortTransfer=False):
+ self.filename = filename
+ self.expectedResponse = expectedResponse
+ self.if_modified_since = if_modified_since
+ self.filePath = filePath
+ self.abortTransfer = abortTransfer
+
+ if expectedSize is not None:
+ self.expectedSize = expectedSize # If not none, the file sent should have this size
+ elif filePath is not None:
+ # Get size of actual file
+ self.expectedSize = os.path.getsize(filePath)
+ else:
+ self.expectedSize = None
+
+class uriRequester(http.HTTPClient):
+ """
+ class to request files and parse responses
+ """
+ class requestFactory(protocol.ClientFactory):
+ """
+ Helper factory to connect to apt-proxy and send
+ HTTP requests using uriRequester
+ """
+ #def startedConnecting(self, connector):
+ # print 'Started to connect.'
+ def __init__(self, request):
+ self.request = request
+ def buildProtocol(self, addr):
+ "Pass incoming connection to our request"
+ return self.request
+ def clientConnectionLost(self, connector, reason):
+ log.debug('Lost connection. Reason:'+ str(reason))
+ def clientConnectionFailed(self, connector, reason):
+ log.err('Connection failed. Reason:', reason, 'requestFactory')
+ self.request.failed()
+
+ def __init__(self, host, *testData):
+ """
+ Start test. *testData holds classes of uriData
+ """
+ self.factory = self.requestFactory(self)
+ self.host = host
+ self.deferred = defer.Deferred() # Deferred that returns result of test
+ self.http_status = None
+ self.received_len = 0
+ self.tests=testData
+
+ def connectionMade(self):
+ """
+ Http connection made
+ """
+ log.debug("connection made to test apt-proxy server", 'uriRequester')
+ for i in range(0,len(self.tests)):
+ test = self.tests[i]
+ log.debug("requesting: %s" %(test.filename), 'uriRequester')
+ #self.sendCommand("GET", test.filename)
+ self.transport.write('%s %s HTTP/1.1\r\n' % ("GET", test.filename))
+
+ self.sendHeader('Host', self.host)
+ if len(self.tests)>1 and i != len(self.tests):
+ self.sendHeader('Connection','keep-alive')
+ else:
+ self.sendHeader('Connection','close')
+ if test.if_modified_since is not None:
+ datetime = http.datetimeToString(test.if_modified_since)
+ self.sendHeader('if-modified-since', datetime)
+ self.sendHeader("User-Agent", "apt-proxy test suite test_requests.py")
+
+ self.endHeaders()
+ self.getNextTest() # Start first test
+
+ def getNextTest(self):
+ # Ready for next status code
+ self.firstLine = 1
+ #self.length = None
+ self.__buffer = ''
+
+ if len(self.tests):
+ self.nextTest = self.tests[0]
+ self.tests = self.tests[1:]
+ log.debug("waiting for test results for: " + self.nextTest.filename, 'uriRequester')
+ else:
+ log.debug('test passed', 'uriRequester')
+ self.deferred.callback(None)
+
+ #def handleStatus(self, version, code, message):
+ def handleStatus(self, version, code, message):
+ log.debug('handleStatus: (%s) %s - %s, expected:%s' %
+ (version, code, message, self.nextTest.expectedResponse), 'uriRequester')
+ self.http_status = int(code)
+
+ def dataReceived(self, data):
+ self.received_len = self.received_len + len(data)
+ log.debug("data received, len: %s" % (self.received_len), 'uriRequester')
+ if self.nextTest.abortTransfer == False:
+ http.HTTPClient.dataReceived(self, data)
+ else:
+ self.passed() # Trigger disconnection of connection
+
+ class ResponseError:
+ pass
+ class SizeError:
+ pass
+
+ def handleResponse(self, buffer):
+ received_len = len(buffer)
+ log.debug('data received: %s bytes, expected:%s' % (received_len, self.nextTest.expectedSize), 'uriRequester')
+ if self.http_status != self.nextTest.expectedResponse:
+ log.debug('test FAILED: response code (%s) is not %s' %
+ (self.http_status, self.nextTest.expectedResponse), 'uriRequester')
+ self.failed(self.ResponseError())
+ elif self.nextTest.expectedSize is not None and received_len != self.nextTest.expectedSize:
+ log.debug('test FAILED: received %s bytes, but expected %s' %
+ (received_len, self.nextTest.expectedSize), 'uriRequester')
+ self.failed(self.SizeError())
+ else:
+ self.passed()
+
+ def passed(self):
+ self.getNextTest()
+ def failed(self, data):
+ log.debug('test failed', 'uriRequester')
+ self.deferred.errback(data)
+
+
+class TestRequestHelper(apTestHelper):
+ def setUp(self, config):
+ apTestHelper.setUp(self)
+ config = self.config + '\n' + config
+ log.debug("config:\n" + config, 'TestRequestHelper')
+ self.c = apConfig(StringIO(config))
+ self.factory = Factory(self.c)
+ #self.factory.configurationChanged()
+ self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
+
+ def tearDown(self):
+ self.port.stopListening()
+ self.factory.stopFactory()
+ del(self.factory)
+ apTestHelper.tearDown(self)
+ self.assertRaises(OSError, os.stat, self.cache_dir)
+
+ def doRequest(self, *data):
+ portno = self.port.getHost().port
+ host = "127.0.0.1:%s" % (portno)
+ for d in data:
+ log.debug("Starting test connection to %s, file:%s:" %(host, d.filename), 'uriRequesterTest')
+ client = uriRequester(host, *data)
+ connection = reactor.connectTCP("127.0.0.1", portno, client.factory)
+ self.connection = connection
+
+ client.deferred.addBoth(lambda x: connection.disconnect())
+ self.lastRequestFactory = client
+ return client.deferred
+
+class FileBackendTest(TestRequestHelper):
+ def setUp(self):
+ """
+ Make a configuration with a single backend
+ [files]
+ backends=file:///<path to test packages directory>
+ """
+ self.filedir = os.path.normpath(os.getcwd()+"/../test_data/packages")
+ config = ("dynamic_backends=off\n"+
+ "[files]\n" +
+ "backends=file://" + self.filedir)
+ #print "config: " + config
+ TestRequestHelper.setUp(self, config)
+
+ def testNotFound(self):
+ return self.doRequest(uriData('/files/test.gz', http.NOT_FOUND))
+ testNotFound.timeout = 1
+ def testPackagesFile(self):
+ file = 'Packages.gz'
+ d = uriData('/files/'+file, http.OK, filePath=self.filedir+os.sep+file)
+ return self.doRequest(d).addCallback(self.PackagesFile2)
+ testPackagesFile.timeout = 1
+ def PackagesFile2(self, x):
+ backend = self.factory.getBackend('files')
+ # Check that request was deleted from backend
+ self.assertEquals(len(backend.entries), 0)
+
+ def testForbidden(self):
+ d = self.doRequest(uriData('/notbackend/Release', http.NOT_FOUND))
+ return d
+ testForbidden.timeout = 1
+
+class WebServer:
+ def start(self):
+ """
+ Start web server, serving test data
+
+ @ret port number that server listens on
+ """
+ root = web.static.File("../test_data")
+ #self.application = service.Application('web')
+ site = web.server.Site(root)
+ #self.port = reactor.listenTCP(0, self.application, interface="127.0.0.1")
+ self.port = reactor.listenTCP(0, site, interface="127.0.0.1")
+
+ return self.port.getHost().port
+
+ def stop(self):
+ self.port.stopListening()
+
+class BackendTestBase:
+ """
+ Class to perform a series of requests against a test backend.
+ Derived classes should arrange for a local server to serve
+ files from the test_daya directory.
+ """
+
+ # Name of test backend
+ backendName = 'test_data'
+
+ def setUp(self, backend_uri):
+ """
+ Make a configuration with a single backend
+
+ @param backend_uri: backend server uri e.g. http://127.0.0.1:1234
+ """
+ config = ("dynamic_backends=off\n" +
+ "[test_data]\n" +
+ "backends=" + str(backend_uri))
+ TestRequestHelper.setUp(self, config)
+ self.testfilesdir = os.path.normpath(os.getcwd()+"/../test_data")
+ def tearDown(self):
+ log.debug("tearDown", self.debugname)
+ TestRequestHelper.tearDown(self)
+ def testNotFound(self):
+ return self.doRequest(uriData('/test_data/NotHere.gz', http.NOT_FOUND))
+ testNotFound.timeout = 2
+
+ def downloadFile(self, file='/packages/Packages.gz'):
+ """
+ Download a file to cache
+ self.backend is set to backend name
+ self.file is set to last filename
+ self.filepath is set to last physical filename
+ """
+ return self.downloadFiles(file)
+
+ def getFilePaths(self, file):
+ """
+ Given a filename, generate real filename and request path
+ """
+ filename = '/' + self.backendName + file
+ sourcepath = self.testfilesdir+file
+ destpath = self.cache_dir + filename
+ # File should not be in cache
+ self.assertRaises(OSError, os.stat, destpath)
+ return filename, sourcepath, destpath
+
+ def downloadFiles(self, *files):
+ """
+ Download a number of files to cache
+ """
+ data = []
+ self.filepaths = []
+ for f in files:
+ self.filename, sourcepath, destpath = self.getFilePaths(f)
+ self.filepaths.append(destpath)
+ data.append(uriData(self.filename, http.OK, filePath=sourcepath))
+ d = self.doRequest(*data)
+ def checkPath(x):
+ # Check that files were really placed in cache
+ for f in self.filepaths:
+ os.stat(f)
+ d.addCallback(checkPath)
+ return d
+
+ def testPackagesFile(self):
+ return self.downloadFile().addCallback(self.PackagesFile2)
+ def PackagesFile2(self, x):
+ # Check access time datbase was updated
+ self.assertApproximates(self.factory.access_times[self.filename], time.time(), 6)
+ testPackagesFile.timeout = 2
+
+ def testNotModifiedGreater(self):
+ "Check http not modified is sent for new file"
+ d = self.downloadFile()
+ self.testResult = defer.Deferred()
+ d.addCallback(self.NotModifiedGreater2)
+ d.addErrback(lambda x: self.testResult.errback(failure.Failure()))
+ return self.testResult
+ def NotModifiedGreater2(self, x):
+ log.debug("testNotModifiedGreater: starting second client", self.debugname)
+ d = self.doRequest(uriData(self.filename, http.NOT_MODIFIED, time.time()))
+ d.chainDeferred(self.testResult)
+ testNotModifiedGreater.timeout = 3
+
+ def testNotModifiedExact(self):
+ d= self.downloadFile()
+ self.testResult = defer.Deferred()
+ d.addCallback(self.NotModifiedGreater2)
+ d.addErrback(lambda x: self.testResult.errback(failure.Failure()))
+ return self.testResult
+ def NotModifiedExact2(self, x):
+ d = self.doRequest(uriData(self.filename, http.NOT_MODIFIED, os.path.getmtime(self.filepath)))
+ d.chainDeferred(self.testResult)
+ testNotModifiedExact.timeout = 2
+
+ def testCloseFetcherImmediately(self):
+ DownloadQueue.closeTimeout = 0 # Close fetcher immediately
+ return self.downloadFile().addCallback(self.CloseFetcherImmediately2)
+ def CloseFetcherImmediately2(self, x):
+ queues = self.factory.getBackend(self.backendName).queue.queues.values()
+ self.assertEquals(len(queues), 0)
+ testCloseFetcherImmediately.timeout = 2
+
+ def testLeaveFetcherOpen(self):
+ DownloadQueue.closeTimeout = 2 # 2 second delay to close
+ return self.downloadFile().addCallback(self.LeaveFetcherOpen2)
+ def LeaveFetcherOpen2(self, x):
+ queues = self.factory.getBackend(self.backendName).queue.queues.values()
+ self.assertNotEquals(len(queues), 0)
+ testLeaveFetcherOpen.timeout = 4
+
+ def testAutoCloseFetcher(self):
+ DownloadQueue.closeTimeout = 0.1
+ d = self.downloadFile()
+ self.autoclosedeferred = defer.Deferred()
+ d.addCallback(self.AutoCloseFetcher2)
+ d.addErrback(lambda x: self.autoclosedeferred.errback(failure.Failure()))
+ return self.autoclosedeferred
+ def AutoCloseFetcher2(self, x):
+ # File is downloaded, now check fetcher state
+ self.f = self.factory.getBackend(self.backendName).queue.queues.values()[0].fetcher
+ reactor.callLater(0.2, self.AutoCloseFetcher3)
+ def AutoCloseFetcher3(self):
+ queues = self.factory.getBackend(self.backendName).queue.queues.values()
+ self.assertEquals(len(queues), 0)
+ self.autoclosedeferred.callback(None)
+ testAutoCloseFetcher.timeout = 2
+
+ def testCached(self):
+ self.testResult = defer.Deferred()
+ d = self.downloadFile()
+ d.addCallback(self.Cached2)
+ d.addErrback(self.CachedError)
+ return self.testResult
+ def Cached2(self, x):
+ d = self.doRequest(uriData(self.filename, http.OK, filePath=self.filepaths[0]))
+ d.addCallback(self.Cached3)
+ d.addErrback(self.CachedError)
+ def Cached3(self, x):
+ log.debug("Downloading second copy", self.debugname)
+ self.factory.config.min_refresh_delay = 0
+ d = self.doRequest(uriData(self.filename, http.OK, filePath=self.filepaths[0]))
+ d.addCallback(self.CachedPass)
+ d.addErrback(self.CachedError)
+ def CachedPass(self, x):
+ self.testResult.callback(None)
+ def CachedError(self, x):
+ log.debug("testCached ERROR", self.debugname)
+ self.testResult.errback(failure.Failure())
+ testCached.timeout = 2
+
+ def testBwLimit(self):
+ "Bandwidth limiting"
+ b = self.factory.getBackend(self.backendName)
+ b.config.bandwidth_limit = 10000000
+ # We're not testing here that limiting is applied, just that the code runs
+ return self.downloadFile(file='/packages/apt_0.0.1_test.deb')
+ testBwLimit.timeout = 2
+
+ def testAbort(self):
+ "Abort with complete_clientless_downloads=off"
+ import twisted
+ twisted.internet.base.DelayedCall.debug = True
+ b = self.factory.getBackend(self.backendName)
+ b.config.bandwidth_limit = 10
+ # We're not testing here that limiting is applied, just that the code runs
+ filename, sourcepath, destpath = self.getFilePaths('/packages/apt_0.0.1_test.deb')
+ d = self.doRequest(uriData(filename, http.OK, filePath=sourcepath, abortTransfer=True))
+ d.addCallback(self.Abort2)
+ return d
+ testBwLimit.timeout = 2
+ def Abort2(self, x):
+ "Connection was aborted, check that fetchers were closed"
+
+
+ # This test does not work with current twisted http client :(
+ #def testPipeline(self):
+ #"Test pipelined GETs"
+ #return self.downloadFiles('/packages/Packages.gz', '/packages/Packages', '/packages/Packages.bz2')
+ #testPipeline.timeout = 2
+
+ def testEmpty(self):
+ "Test download of empty file"
+ return self.downloadFiles('/packages/empty.txt')
+ testEmpty.timeout = 2
+
+ #def testTimeout(self):
+ #pass
+ #testTimeout.todo = True
+
+ #def setFileTime(self):
+ #"cache file modification time is set to same time as server time"
+ #pass
+ #setFileTime.todo = True
+
+ #def doubleDownload(self):
+ #"download, delete from cache, re-request file"
+ #pass
+ #doubleDownload.todo = True
+
+ # More TODO tests:
+ # - file mtime is same as server mtime
+ # - correct file path is entered in databases after download
+
+class HttpBackendTest(TestRequestHelper, BackendTestBase):
+ def setUp(self):
+ """
+ Make a configuration with a single backend
+ [files]
+ backends=file:///<path to test packages directory>
+ """
+ self.debugname = 'HttpBackendTest'
+ self.httpserver = WebServer()
+ port = self.httpserver.start()
+ uri = "http://127.0.0.1:" + str(port)
+ BackendTestBase.setUp(self, uri)
+ def tearDown(self):
+ self.httpserver.stop()
+ BackendTestBase.tearDown(self)
+
+class FtpBackendTest(TestRequestHelper, BackendTestBase):
+ def setUp(self):
+ """
+ Make a configuration with a single backend
+ [files]
+ backends=file:///<path to test packages directory>
+ """
+ import test_fetchers
+ import twisted
+ self.debugname = 'FtpBackendTest'
+ self.ftpserver = test_fetchers.FtpServer()
+ port = self.ftpserver.start()
+ uri = "ftp://127.0.0.1:" + str(port)
+ BackendTestBase.setUp(self, uri)
+ def tearDown(self):
+ self.ftpserver.stop()
+ BackendTestBase.tearDown(self)
+
+ # The ftp classes use callLater(0, ...) several times, so allow
+ # those calls to complete
+ reactor.iterate()
+ reactor.iterate()
+
+class RsyncBackendTest(TestRequestHelper, BackendTestBase):
+ def setUp(self):
+ """
+ Make a configuration with a single backend
+ [files]
+ backends=file:///<path to test packages directory>
+ """
+ self.debugname = 'RsyncBackendTest'
+ self.rsyncserver = RsyncServer()
+ port = self.rsyncserver.start()
+ uri = "rsync://127.0.0.1:" + str(port) + '/apt-proxy'
+ BackendTestBase.setUp(self, uri)
+ def tearDown(self):
+ self.rsyncserver.stop()
+ BackendTestBase.tearDown(self)
+ def testTempFile(self):
+ "rysnc Tempfile is detected"
+ b = self.factory.getBackend(self.backendName)
+ b.config.bandwidth_limit = 100000
+ self.downloadFile(file='/packages/apt_0.0.1_test.deb')
+ reactor.callLater(0.5, self.TempFile2)
+ self.testResult = defer.Deferred()
+ return self.testResult
+ def TempFile2(self):
+ fetcher = self.factory.getBackend(self.backendName).queue.queues.values()[0].fetcher.fetcher
+ fetcher.findRsyncTempFile()
+ file = fetcher.rsyncTempFile
+ log.debug("rsync TempFile is %s" % (file), self.debugname)
+ fetcher.disconnect()
+ self.connection.disconnect()
+ if file is not None:
+ self.testResult.callback("Tempfile is %s" %(file))
+ else:
+ self.testResult.errback(failure.Failure())
+ testTempFile.timeout=2
\ No newline at end of file
Modified: trunk/debian/changelog
==============================================================================
--- trunk/debian/changelog (original)
+++ trunk/debian/changelog Thu Aug 3 23:54:46 2006
@@ -1,3 +1,68 @@
+apt-proxy (1.9.33+svn) unstable; urgency=low
+
+ * Acknowledge NMU by Luk Claes, thanks! (Closes: #359798)
+ * Change maintainer to myself and add Otavio to Uploaders, at
+ Otavio's request. Thanks Otavio for all your work.
+ * Fix breakage caused by new twisted (Closes: #375677)
+ * http_proxy can now be set in each [backend] section
+ * Add support for username and password in http_proxy parameter.
+ Thanks to Thomas Champagne for the patch
+ (Closes: #323147, #327239)
+ * Move fetchers and cache management into separate files
+ * Add bandwidth_limit configuration parameter to limit download
+ rates (Closes: #306095, #259011)
+ * Add support for rsync port specification
+ * Always check cache directory and logfile permissions when package
+ is installed, thanks Ben Hutchings for the patch (Closes: #312969)
+ * Add more unit tests
+ * Remove obsolete debian/TODO from source package
+ * Update doc/TODO, removing fixed items
+ * Recognise apt package diff files (*.diff/Index). Thanks
+ Florian Weimer for the patch (Closes: #336433)
+ * Add debhelper to Build-Depends, needed for dh_clean in clean target
+ * Remove http scheme, host and port from requested URL (Closes: #374405)
+ * Add download queueing mechanism. Clients can now use HTTP pipelining to
+ request files, and each file will be queued at the corresponding backend.
+ Each separate apt client connection generates a connection to the
+ backend. (Closes: #261802)
+ * HTTP pipelining now works and is enabled by default
+ (Closes: #272206, #141312)
+ * Fix shutdown code (Closes: #359805)
+ * Remove reference to v1 in description (Closes: #337966)
+ * Give a meaningful error message if an empty time is given in the
+ configuration file (Closes: #304611)
+ * Reorganise download process to be more logical, fixing several problems
+ (Closes: #329764)
+ * Remove references to -i parameter in apt-proxy.conf manpage.
+ (Closes: #328983)
+ * In example apt-proxy.conf, remove references to non-US archive
+ (Closes: #329935)
+ * Support updated Python policy, thanks Matthias Klose. (Closes: #377322)
+
+ -- Chris Halls <halls at debian.org> Thu, 3 Aug 2006 18:12:27 +0100
+
+apt-proxy (1.9.33-0.1) unstable; urgency=high
+
+ * Non-maintainer upload.
+ * Update for bsddb module (Closes: #352917).
+ * Add comma in depends (Closes: #353386, #350551, #354668, #355228).
+ * Updated Vietnamese debconf translation (Closes: #313121).
+ * Complete manpage translation with po4a (Closes: #334380).
+ * Fix typos in apt-proxy.conf.5 (Closes: #355225).
+ * Remove extra 'by default' from apt-proxy.8 (Closes: #355229).
+ * Add German debconf translation (Closes: #352484).
+ * Updated French debconf translation (Closes: #328689).
+ * Add Portuguese debconf translation (Closes: #330202).
+ * Add Swedish debconf translation (Closes: #331515).
+ * Add Catalan debconf translation (Closes: #336384).
+ * Updated Danish debconf translation (Closes: #340132).
+ * Updated Dutch debconf translation (Closes: #356210).
+ * Add Spanish debconf translation (Closes: #333874).
+ * Updated Czech debconf translation (Closes: #335361).
+ * Updated French manpage translation (Closes: #332304).
+
+ -- Luk Claes <luk at debian.org> Wed, 29 Mar 2006 00:05:51 +0200
+
apt-proxy (1.9.33) unstable; urgency=low
[ Chris Halls ]
Modified: trunk/debian/control
==============================================================================
--- trunk/debian/control (original)
+++ trunk/debian/control Thu Aug 3 23:54:46 2006
@@ -1,20 +1,21 @@
Source: apt-proxy
Section: admin
Priority: extra
-Maintainer: Otavio Salvador <otavio at debian.org>
-Uploaders: Chris Halls <halls at debian.org>
+Maintainer: Chris Halls <halls at debian.org>
+Uploaders: Otavio Salvador <otavio at debian.org>
Standards-Version: 3.6.2
-Build-Depends-Indep: debhelper (>= 4.1.13), po-debconf, help2man, python-twisted (>= 1.0.0), python, python-apt, po4a (>= 0.18.1)
+XS-Python-Version: current
+Build-Depends: debhelper
+Build-Depends-Indep: debhelper (>= 4.1.13), po-debconf, help2man, python-twisted (>= 2.4), python (>= 2.3.5-1), python-apt, po4a (>= 0.18.1), python-central (>= 0.5)
Package: apt-proxy
Architecture: all
-Depends: debconf (>= 0.5.00) | debconf-2.0, ${python:Depends}, python-twisted (>= 1.3.0-7) | python2.3 (<< 2.3.5-1), python-twisted (>= 1.0.0), python-twisted-web | python-twisted (<< 2.1.0) python-apt (>= 0.5.8), python-bsddb3, bzip2, logrotate, adduser
+Depends: debconf (>= 0.5.00) | debconf-2.0, ${python:Depends}, python-twisted-web (>= 0.6) | python-twisted (<< 2.1.0), python-apt (>= 0.5.8), bzip2, logrotate, adduser
Conflicts: apt-proxy-v2 (<= 1.9.5)
Replaces: apt-proxy-v2 (<= 1.9.5)
Suggests: rsync
-Description: Debian archive proxy and partial mirror builder development
- This is version 2 of apt-proxy which will, when ready, replace apt-proxy v1.
- .
+XB-Python-Version: ${python:Versions}
+Description: Debian archive proxy and partial mirror builder
apt-proxy automatically builds a Debian HTTP mirror based
on requests which pass through the proxy. It's great for
multiple Debian machines on the same network with a slower
Modified: trunk/debian/po/cs.po
==============================================================================
--- trunk/debian/po/cs.po (original)
+++ trunk/debian/po/cs.po Thu Aug 3 23:54:46 2006
@@ -16,9 +16,9 @@
"Project-Id-Version: apt-proxy\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2005-08-18 12:19-0300\n"
-"PO-Revision-Date: 2004-08-13 15:32+0200\n"
+"PO-Revision-Date: 2005-10-23 11:25+0200\n"
"Last-Translator: Jan Outrata <outrataj at upcase.inf.upol.cz>\n"
-"Language-Team: Czech <provoz at debian.cz>\n"
+"Language-Team: Czech <debian-l10n-czech at debian.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=ISO-8859-2\n"
"Content-Transfer-Encoding: 8bit\n"
@@ -49,7 +49,6 @@
#. Type: note
#. Description
#: ../templates:3
-#, fuzzy
msgid ""
"I will build /etc/apt-proxy/apt-proxy-v2.conf based on your old settings if "
"you didn't already have such file. In any case, a backup file will be "
Modified: trunk/debian/po/da.po
==============================================================================
--- trunk/debian/po/da.po (original)
+++ trunk/debian/po/da.po Thu Aug 3 23:54:46 2006
@@ -16,7 +16,7 @@
"Project-Id-Version: apt-proxy 1.9.18\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2005-08-18 12:19-0300\n"
-"PO-Revision-Date: 2004-10-15 18:08+0200\n"
+"PO-Revision-Date: 2005-11-21 07:09+0200\n"
"Last-Translator: Morten Brix Pedersen <morten at wtf.dk>\n"
"Language-Team: Danish <dansk at klid.dk>\n"
"MIME-Version: 1.0\n"
@@ -51,15 +51,15 @@
#. Type: note
#. Description
#: ../templates:3
-#, fuzzy
msgid ""
"I will build /etc/apt-proxy/apt-proxy-v2.conf based on your old settings if "
"you didn't already have such file. In any case, a backup file will be "
"written to /etc/apt-proxy/apt-proxy-v2.conf.backup"
msgstr ""
"Jeg vil bygge /etc/apt-proxy/apt-proxy-v2.conf baseret på dine gamle "
-"indstillinger hvis du allerede har en sådan fil. I hvert tilfælde, vil en "
-"sikkerhedskopi blive skrevet til /etc/apt-proxy/apt-proxy-v2.conf.backup"
+"indstillinger hvis du ikke allerede har en sådan fil. I hvert tilfælde, "
+"vil en sikkerhedskopi blive skrevet til "
+"/etc/apt-proxy/apt-proxy-v2.conf.backup"
#. Type: note
#. Description
Modified: trunk/debian/po/fr.po
==============================================================================
--- trunk/debian/po/fr.po (original)
+++ trunk/debian/po/fr.po Thu Aug 3 23:54:46 2006
@@ -12,16 +12,16 @@
#
msgid ""
msgstr ""
-"Project-Id-Version: apt-proxy_1.9.15\n"
+"Project-Id-Version: apt-proxy_1.9.32\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2005-08-18 12:19-0300\n"
-"PO-Revision-Date: 2004-07-23 15:00+0200\n"
+"PO-Revision-Date: 2005-09-16 20:40+0200\n"
"Last-Translator: Olivier Trichet <olivier.trichet at freesurf.fr>\n"
"Language-Team: French <debian-l10n-french at lists.debian.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=ISO-8859-15\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Generator: KBabel 1.3.1\n"
+"X-Generator: KBabel 1.10.2\n"
#. Type: note
#. Description
@@ -51,7 +51,6 @@
#. Type: note
#. Description
#: ../templates:3
-#, fuzzy
msgid ""
"I will build /etc/apt-proxy/apt-proxy-v2.conf based on your old settings if "
"you didn't already have such file. In any case, a backup file will be "
@@ -65,8 +64,7 @@
#. Type: note
#. Description
#: ../templates:3
-msgid ""
-"There are also other issues documented in /usr/share/doc/apt-proxy/UPGRADING"
+msgid "There are also other issues documented in /usr/share/doc/apt-proxy/UPGRADING"
msgstr ""
"D'autres problèmes liés à cette mise à niveau sont documentés dans /usr/"
"share/doc/apt-proxy/UPGRADING."
@@ -97,3 +95,4 @@
"Il est recommandé de lire ces avertissements ainsi que le fichier /usr/share/"
"doc/apt-proxy/UPGRADING et d'adapter la configuration qui se trouve dans le "
"fichier /etc/apt-proxy/apt-proxy-v2.conf."
+
Modified: trunk/debian/po/nl.po
==============================================================================
--- trunk/debian/po/nl.po (original)
+++ trunk/debian/po/nl.po Thu Aug 3 23:54:46 2006
@@ -16,7 +16,7 @@
"Project-Id-Version: apt-proxy\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2005-08-18 12:19-0300\n"
-"PO-Revision-Date: 2004-11-09 01:09+0100\n"
+"PO-Revision-Date: 2006-03-10 14:00+0100\n"
"Last-Translator: Bart Cornelis <cobaco at linux.be>\n"
"Language-Team: debian-l10n-duth <debian-l10n-dutch at lists.debian.org>\n"
"MIME-Version: 1.0\n"
@@ -38,35 +38,20 @@
#. Type: note
#. Description
#: ../templates:3
-msgid ""
-"apt-proxy has been rewritten in python and the new configuration file format "
-"is incompatible with previous version. Hopefully you will like the new "
-"format better :)"
-msgstr ""
-"apt-proxy is herschreven in python en het nieuwe bestandsformaat van het "
-"configuratiebestand is incompatibel met de de vorige versie. Hopelijk vindt "
-"u het nieuwe formaat beter :-)"
+msgid "apt-proxy has been rewritten in python and the new configuration file format is incompatible with previous version. Hopefully you will like the new format better :)"
+msgstr "apt-proxy is herschreven in python en het nieuwe bestandsformaat van het configuratiebestand is incompatibel met de de vorige versie. Hopelijk vindt u het nieuwe formaat beter :-)"
#. Type: note
#. Description
#: ../templates:3
-#, fuzzy
-msgid ""
-"I will build /etc/apt-proxy/apt-proxy-v2.conf based on your old settings if "
-"you didn't already have such file. In any case, a backup file will be "
-"written to /etc/apt-proxy/apt-proxy-v2.conf.backup"
-msgstr ""
-"Als u dit bestand nog niet heeft, wordt /etc/apt-proxy/apt-proxy-v2.conf "
-"opgebouwd aan de hand van uw oude instellingen. Zowieso wordt er een "
-"reservekopie opgeslagen onder de naam /etc/apt-proxy/apt-proxy-v2.conf.backup"
+msgid "I will build /etc/apt-proxy/apt-proxy-v2.conf based on your old settings if you didn't already have such file. In any case, a backup file will be written to /etc/apt-proxy/apt-proxy-v2.conf.backup"
+msgstr "Wanneer /etc/apt-proxy/apt-proxy-v2.conf nog niet bestaat wordt deze opgebouwd aan de hand van uw oude instellingen. Er wordt altijd een reservekopie opgeslagen onder de naam /etc/apt-proxy/apt-proxy-v2.conf.backup"
#. Type: note
#. Description
#: ../templates:3
-msgid ""
-"There are also other issues documented in /usr/share/doc/apt-proxy/UPGRADING"
-msgstr ""
-"Er zijn verdere issues gedocumenteerd in /usr/share/doc/apt-proxy/UPGRADING"
+msgid "There are also other issues documented in /usr/share/doc/apt-proxy/UPGRADING"
+msgstr "Er zijn verdere issues gedocumenteerd in /usr/share/doc/apt-proxy/UPGRADING"
#. Type: note
#. Description
@@ -77,20 +62,12 @@
#. Type: note
#. Description
#: ../templates:19
-msgid ""
-"The upgrading script dumped some warnings and they have been mailed to "
-"root at localhost."
-msgstr ""
-"Het opwaarderingsscript gaf enkele waarschuwingen; deze zijn naar "
-"root at localhost gemaild."
+msgid "The upgrading script dumped some warnings and they have been mailed to root at localhost."
+msgstr "Het opwaarderingsscript gaf enkele waarschuwingen; deze zijn naar root at localhost gemaild."
#. Type: note
#. Description
#: ../templates:19
-msgid ""
-"You should read those warnings and /usr/share/doc/apt-proxy/UPGRADING and "
-"revise your configuration (/etc/apt-proxy/apt-proxy-v2.conf)"
-msgstr ""
-"U kunt deze waarschuwingen en /usr/share/doc/apt-proxy/UPGRADING best "
-"nalezen en vervolgens uw configuratie herzien (/etc/apt-prox/apt-proxy-v2."
-"conf)"
+msgid "You should read those warnings and /usr/share/doc/apt-proxy/UPGRADING and revise your configuration (/etc/apt-proxy/apt-proxy-v2.conf)"
+msgstr "U kunt deze waarschuwingen en /usr/share/doc/apt-proxy/UPGRADING best nalezen en vervolgens uw configuratie herzien (/etc/apt-prox/apt-proxy-v2.conf)"
+
Modified: trunk/debian/po/vi.po
==============================================================================
--- trunk/debian/po/vi.po (original)
+++ trunk/debian/po/vi.po Thu Aug 3 23:54:46 2006
@@ -4,10 +4,10 @@
#
msgid ""
msgstr ""
-"Project-Id-Version: apt-proxy 1.9.28\n"
+"Project-Id-Version: apt-proxy 1.9.30\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2005-08-18 12:19-0300\n"
-"PO-Revision-Date: 2005-05-12 23:46+0930\n"
+"PO-Revision-Date: 2005-06-12 12:04+0930\n"
"Last-Translator: Clytie Siddall <clytie at riverland.net.au>\n"
"Language-Team: Vietnamese <gnomevi-list at lists.sourceforge.net>\n"
"MIME-Version: 1.0\n"
@@ -19,7 +19,7 @@
#. Description
#: ../templates:3
msgid "Upgrading from pre-v1.9 packages."
-msgstr "Cáºp nhât từ các gói tin trưá»c phiên bản 1.9..."
+msgstr "Cáºp nhât từ các gói tin trưá»c phiên bản 1.9."
#. Type: note
#. Description
@@ -47,9 +47,9 @@
"you didn't already have such file. In any case, a backup file will be "
"written to /etc/apt-proxy/apt-proxy-v2.conf.backup"
msgstr ""
-"Sẽ xây dụng /etc/apt-proxy/apt-proxy-v2.conf Äá»±a và o các thiết láºp cÅ© cá»§a "
+"Sẽ xây dụng «/etc/apt-proxy/apt-proxy-v2.conf» Äá»±a và o các thiết láºp cÅ© cá»§a "
"bạn nếu bạn chưa có táºp tin như váºy. Trong bất cứ trưá»ng hợp nà o, sẽ ghi má»t "
-"táºp tin lưu trữ và o /etc/apt-proxy/apt-proxy-v2.conf.backup"
+"táºp tin lưu trữ và o «/etc/apt-proxy/apt-proxy-v2.conf.backup»."
#. Type: note
#. Description
@@ -57,8 +57,8 @@
msgid ""
"There are also other issues documented in /usr/share/doc/apt-proxy/UPGRADING"
msgstr ""
-"CÅ©ng có má»t sá» vấn Äá» khác ÄÆ°á»£c diá»
n tả trong tà i liá»u /usr/share/doc/apt-"
-"proxy/UPGRADING (cáºp nháºt)"
+"CÅ©ng có má»t sá» vấn Äá» khác ÄÆ°á»£c diá»
n tả trong tà i liá»u «/usr/share/doc/apt-"
+"proxy/UPGRADING» (cáºp nháºt)"
#. Type: note
#. Description
@@ -73,7 +73,7 @@
"The upgrading script dumped some warnings and they have been mailed to "
"root at localhost."
msgstr ""
-"Táºp lá»nh cáºp nháºt Äã Äá» má»t sá» cảnh báo mÃ ÄÆ°á»£c gá»i cho root at localhost."
+"Táºp lá»nh cáºp nháºt Äã Äá» má»t sá» cảnh báo mÃ ÄÆ°á»£c gá»i cho «root at localhost»."
#. Type: note
#. Description
@@ -82,5 +82,6 @@
"You should read those warnings and /usr/share/doc/apt-proxy/UPGRADING and "
"revise your configuration (/etc/apt-proxy/apt-proxy-v2.conf)"
msgstr ""
-"Bạn hãy Äá»c những cảnh báo ấy và tà i liá»u /usr/share/doc/apt-proxy/UPGRADING "
-"và cÅ©ng sá»a Äá»i cấu hình cá»§a bạn (/etc/apt-proxy/apt-proxy-v2.conf)."
+"Bạn hãy Äá»c những cảnh báo ấy và tà i liá»u «/usr/share/doc/apt-proxy/"
+"UPGRADING» và cÅ©ng sá»a Äá»i cấu hình cá»§a bạn («/etc/apt-proxy/apt-proxy-v2."
+"conf»)."
Modified: trunk/debian/postinst
==============================================================================
--- trunk/debian/postinst (original)
+++ trunk/debian/postinst Thu Aug 3 23:54:46 2006
@@ -11,63 +11,63 @@
set -e
case "$1" in
configure)
- # Check if the user exist and do what is needed
- if ! id $APTPROXY_USER > /dev/null 2>&1; then
- # Create user
- echo creating $APTPROXY_USER user...
- adduser --quiet --system --ingroup nogroup \
- --home $CACHEDIR --no-create-home $APTPROXY_USER
-
- # Make apt-proxy user own cache directory
- chown -R $APTPROXY_USER $CACHEDIR
- # Create a blank logfile owned by apt-proxy user
- touch $APTPROXY_LOGFILE
- chown $APTPROXY_USER:adm $APTPROXY_LOGFILE
- chmod 640 $APTPROXY_LOGFILE
- fi
-
- PREV="$2"
- db_fget $NAME/upgrading-v2 had_v2_conf || true
- had_v2_conf=$RET
-
- if dpkg --compare-versions "$PREV" lt-nl 1.9; then
- if [ -x /usr/sbin/update-inetd ]; then
- echo "Disabling inetd's apt-proxy v1 line."
- update-inetd --comment-chars "#<apt-proxy-v2># " --disable 9999
- invoke-rc.d inetd restart
- fi
- db_fset $NAME/upgrading-v2 seen false || true
- db_input medium $NAME/upgrading-v2 || true
- db_go
- fi
- if
- dpkg --compare-versions "$PREV" lt-nl 1.9 && ( ! $had_v2_conf ) \
- && [ -r /etc/apt-proxy/apt-proxy.conf ]
- then
- echo Customising configuration file based on old settings.
- cp -a --backup=numbered /etc/apt-proxy/apt-proxy-v2.conf \
- /etc/apt-proxy/apt-proxy-v2.conf.backup
- OUTPUT=$(/usr/sbin/apt-proxy-v1tov2 /etc/apt-proxy/apt-proxy.conf \
- /etc/apt-proxy/apt-proxy-v2.conf.backup\
- 2>&1 \
- > /etc/apt-proxy/apt-proxy-v2.conf )
- echo "$OUTPUT" > /var/log/apt-proxy-v1tov2.log
- chown aptproxy:adm /var/log/apt-proxy-v1tov2.log
- chmod 640 /var/log/apt-proxy-v1tov2.log
- echo "The log of conversion was save in /var/log/apt-proxy-v1tov2.log."
- if [ -n "$OUTPUT" ]; then
- db_fset $NAME/upgrading-v2-result seen false
- db_input high $NAME/upgrading-v2-result || true
- db_go
- fi
- fi
- # Older versions got database permissions wrong #288829
- if dpkg --compare-versions "$PREV" lt-nl 1.9.27; then
- if [ -d /var/cache/apt-proxy/.apt-proxy ]; then
- echo "Fixing database owner."
- chown -R aptproxy.nogroup /var/cache/apt-proxy/.apt-proxy
- fi
- fi
+ # Check if the user exist and do what is needed
+ if ! id $APTPROXY_USER > /dev/null 2>&1; then
+ # Create user
+ echo creating $APTPROXY_USER user...
+ adduser --quiet --system --ingroup nogroup \
+ --home $CACHEDIR --no-create-home $APTPROXY_USER
+ fi
+
+ # Make apt-proxy user own cache directory
+ chown -R $APTPROXY_USER $CACHEDIR
+ # Create a blank logfile owned by apt-proxy user
+ touch $APTPROXY_LOGFILE
+ chown $APTPROXY_USER:adm $APTPROXY_LOGFILE
+ chmod 640 $APTPROXY_LOGFILE
+
+ PREV="$2"
+ db_fget $NAME/upgrading-v2 had_v2_conf || true
+ had_v2_conf=$RET
+
+ if dpkg --compare-versions "$PREV" lt-nl 1.9; then
+ if [ -x /usr/sbin/update-inetd ]; then
+ echo "Disabling inetd's apt-proxy v1 line."
+ update-inetd --comment-chars "#<apt-proxy-v2># " --disable 9999
+ invoke-rc.d inetd restart
+ fi
+ db_fset $NAME/upgrading-v2 seen false || true
+ db_input medium $NAME/upgrading-v2 || true
+ db_go
+ fi
+ if
+ dpkg --compare-versions "$PREV" lt-nl 1.9 && ( ! $had_v2_conf ) \
+ && [ -r /etc/apt-proxy/apt-proxy.conf ]
+ then
+ echo Customising configuration file based on old settings.
+ cp -a --backup=numbered /etc/apt-proxy/apt-proxy-v2.conf \
+ /etc/apt-proxy/apt-proxy-v2.conf.backup
+ OUTPUT=$(/usr/sbin/apt-proxy-v1tov2 /etc/apt-proxy/apt-proxy.conf \
+ /etc/apt-proxy/apt-proxy-v2.conf.backup\
+ 2>&1 \
+ > /etc/apt-proxy/apt-proxy-v2.conf )
+ echo "$OUTPUT" > /var/log/apt-proxy-v1tov2.log
+ chown aptproxy:adm /var/log/apt-proxy-v1tov2.log
+ chmod 640 /var/log/apt-proxy-v1tov2.log
+ echo "The log of conversion was save in /var/log/apt-proxy-v1tov2.log."
+ if [ -n "$OUTPUT" ]; then
+ db_fset $NAME/upgrading-v2-result seen false
+ db_input high $NAME/upgrading-v2-result || true
+ db_go
+ fi
+ fi
+ # Older versions got database permissions wrong #288829
+ if dpkg --compare-versions "$PREV" lt-nl 1.9.27; then
+ if [ -d /var/cache/apt-proxy/.apt-proxy ]; then
+ echo "Fixing database owner."
+ chown -R aptproxy.nogroup /var/cache/apt-proxy/.apt-proxy
+ fi
+ fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
Modified: trunk/debian/rules
==============================================================================
--- trunk/debian/rules (original)
+++ trunk/debian/rules Thu Aug 3 23:54:46 2006
@@ -6,8 +6,8 @@
build-stamp:
dh_testdir
- po4a doc/po4a.cfg
$(MAKE) -C doc apt-proxy-import.8
+ po4a doc/po4a.cfg
touch build-stamp
clean:
@@ -34,6 +34,7 @@
dh_installman
dh_installchangelogs
dh_installlogrotate
+ dh_pycentral
dh_python
dh_installinit
Modified: trunk/doc/TODO
==============================================================================
--- trunk/doc/TODO (original)
+++ trunk/doc/TODO Thu Aug 3 23:54:46 2006
@@ -1,8 +1,6 @@
File permision's on the cache are too strict, there is nothing wrong in all
users reading the debs and Packages files.
-it looks like it is leaking FileVerifiers
-
Verification times out with big files and considers them corrupted even if
they are OK.
- We could have three levels of checking.
@@ -13,10 +11,6 @@
- when we find that stat doesn't match, we schedule a check and serve
anyway. We don't check before serving.
-To get streaming from rsync without the LD_PRELOAD hack:
- - Since we know the pid, we can read from /proc/$pid/fd/3 and get the
- streaming from there ala 'tail -f'
-
---------------- "Not so urgent" mark ---------------------------
Handle database corruption in a more conservative way.
@@ -24,17 +18,11 @@
Support /etc/init.d/apt-proxy reload as kill -HUP
- auto reload apt-proxy.conf support too?
-Report an error at /etc/init.d/apt-proxy start if something is already
-listening on the port (current behaviour is no error and message in logfile only)
-
Add a configuration parameter which says 'never delete the final version of
a package, even if older than max_age'
Maybe it should be "never delete files which still appear in current
Packages.gz listings"
-When a file transfer is finished, move the temporary file instead of coping
-it.
-
Mirror house-keeping:
MAX_VERSIONS: keep track of versions per distro (potato/woody/sid)
python-apt should help on that
@@ -56,14 +44,8 @@
Maybe we should write an AptProxyClientFile so it can be added to
factory.runningClients to prevent that.
-Cleanup orphan entries in packages.db also
-
Implement HEAD method
-Parse options:
- --conf
- ...
-
per backend 'min_age'
we won't bother to update a Packages file before it is "min_age" old
@@ -87,8 +69,6 @@
sources.list wouldn't need to be modified and apt could be forced to ignore
http_proxy environment variable.
-Add support to make apt-proxy request to a another proxy like squid.
-
consider doing file integrity checks after downloading instead of before
serving to improve performance.
@@ -100,9 +80,6 @@
- to access mirrors which may have it.
- sincronizing two caches
-Consider using apt-build to get "auto recompiled for your processor" packages.
- - warning apt-build changes your system compiler.. ugh. Chris
-
Deal with permissions problems in the cache gracefully, not like this:
22/08/2002 17:23 [AptProxy,1,192.168.60.24] [debug:9]CHECKING_CACHED
@@ -126,22 +103,5 @@
Accept numeric ports in backend configuration
----------------- Clean up of naming conventions -------------
-
-To help Chris get his head around the code :)
-
-Some suggestions that were hashed out:
-
-- Reduce imports at top of file -
-16:54 < ranty> but I don't understand how you plan to fix the namespace problem.
-16:56 < haggai> oh, not really. My only suggestion is to avoid excessibe use of 'import', or maybe to split into smaller source files
-move 'import' to the place where it is needed if posible.
-
----------------- OLD LIST from v1 ---------------------------
-TODO list for apt-proxy
-
-- Return a better error message if the backend servers do not exist, rather
- than 'directory does not exist'
-
- Add debconf questions for default user and cache directory
Modified: trunk/doc/apt-proxy-import.8.inc
==============================================================================
--- trunk/doc/apt-proxy-import.8.inc (original)
+++ trunk/doc/apt-proxy-import.8.inc Thu Aug 3 23:54:46 2006
@@ -19,14 +19,14 @@
.PP
2. Import files from apt's cache:
.nf
- apt\-proxy\-import \-i /var/cache/apt/archives
+ apt\-proxy\-import /var/cache/apt/archives
.fi
[IMPORTING APT\-MOVE CACHE]
You can import the apt\-move generated cache into apt\-proxy using the following command:
.PP
.nf
- apt\-proxy\-import \-r \-i /var/cache/apt\-move
+ apt\-proxy\-import \-r /var/cache/apt\-move
.fi
.PP
This tells apt\-proxy\-import to recuse over each directory in the apt\-move cache.
Modified: trunk/doc/apt-proxy.8
==============================================================================
--- trunk/doc/apt-proxy.8 (original)
+++ trunk/doc/apt-proxy.8 Thu Aug 3 23:54:46 2006
@@ -11,7 +11,7 @@
\fBapt\-proxy\fP is a python program designed to be run as an stand alone
server via twistd, and provides a clean, caching, intelligent proxy for
\fBapt\-get\fP, which speaks HTTP to apt\-get clients, and http, ftp or rsync to
-the backend server(s)\&. apt-proxy listens by default on port 9999 by default\&.
+the backend server(s)\&. apt-proxy listens by default on port 9999\&.
.PP
.TP
\fB\-h\fR, \fB\-\-help\fR
Modified: trunk/doc/apt-proxy.conf
==============================================================================
--- trunk/doc/apt-proxy.conf (original)
+++ trunk/doc/apt-proxy.conf Thu Aug 3 23:54:46 2006
@@ -44,6 +44,9 @@
;; Use HTTP proxy?
;http_proxy = host:port
+;; Limit download rate from backend servers (http and rsync only), in bytes/sec
+;bandwidth_limit = 100000
+
;; Enable HTTP pipelining within apt-proxy (for test purposes)
;disable_pipelining=0
@@ -85,15 +88,6 @@
http://ftp2.de.debian.org/debian
ftp://ftp.uk.debian.org/debian
-
-[debian-non-US]
-;; Debian debian-non-US archive
-;timeout will be the global value
-backends =
- http://ftp.uk.debian.org/debian-non-US
- http://ftp.de.debian.org/debian-non-US
- ftp://ftp.uk.debian.org/debian
-
[security]
;; Debian security archive
backends =
Modified: trunk/doc/apt-proxy.conf.5
==============================================================================
--- trunk/doc/apt-proxy.conf.5 (original)
+++ trunk/doc/apt-proxy.conf.5 Thu Aug 3 23:54:46 2006
@@ -11,7 +11,7 @@
make upgrading from v1 easier.
The configuration file is divided up into several sections, where each \fI[resource]\fP
-section defines a seperate resource. The \fBDEFAULT\fP section applies to all resources.
+section defines a separate resource. The \fBDEFAULT\fP section applies to all resources.
The supplied \fIapt\-proxy\&.conf\fP will work out of the box, but it is best to
change the backends you use to a mirror closer to you. There are some in the
@@ -77,7 +77,7 @@
.TP
.B dynamic_backends
-By default apt\-proxy will add HTTP backends dynamicaly if not already
+By default apt\-proxy will add HTTP backends dynamically if not already
defined. Specify \fBoff\fP to restrict the available backends to those
listed in the configuration file. Default: on
@@ -88,6 +88,12 @@
disabled by default until this is fixed. Set to \fB0\fP to enable experimental
http pipelining. Default: 1
+.TP
+.B bandwidth_limit = \fIamount\fR
+When downloading from a backend server, limit the download speed to
+\fIamount\fR bytes per second. Note this applies to \fBhttp\fP and \fBrsync\fP
+backends only. Default: no limit
+
.PP
.SH RESOURCES
All other sections in the configuration file will be interpreted as resource
@@ -112,6 +118,11 @@
.B passive_ftp
Override the global setting of passive_ftp
+.TP
+.B bandwidth_limit
+Set a bandwidth limit for downloads for this resource, overriding the global
+bandwidth_limit
+
.SH CONFIGURATION EXAMPLES
To access a resource that's listed under a specific section name, simply append
Modified: trunk/doc/po/apt-proxy.pot
==============================================================================
--- trunk/doc/po/apt-proxy.pot (original)
+++ trunk/doc/po/apt-proxy.pot Thu Aug 3 23:54:46 2006
@@ -6,7 +6,7 @@
msgid ""
msgstr ""
"Project-Id-Version: PACKAGE VERSION\n"
-"POT-Creation-Date: 2005-08-19 14:21-0300\n"
+"POT-Creation-Date: 2006-03-29 0:11+0200\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL at ADDRESS>\n"
"Language-Team: LANGUAGE <LL at li.org>\n"
@@ -40,7 +40,7 @@
msgstr ""
# type: SH
-#: doc/apt-proxy.8:5 doc/apt-proxy-v1tov2.8:4
+#: doc/apt-proxy.8:5 doc/apt-proxy-v1tov2.8:4 doc/apt-proxy-import.8:5
#, no-wrap
msgid "SYNOPSIS"
msgstr ""
@@ -51,7 +51,7 @@
msgstr ""
# type: SH
-#: doc/apt-proxy.8:10 doc/apt-proxy-v1tov2.8:8 doc/apt-proxy.conf.5:6
+#: doc/apt-proxy.8:10 doc/apt-proxy-v1tov2.8:8 doc/apt-proxy-import.8:8 doc/apt-proxy.conf.5:6
#, no-wrap
msgid "DESCRIPTION"
msgstr ""
@@ -62,11 +62,11 @@
"B<apt-proxy> is a python program designed to be run as an stand alone server "
"via twistd, and provides a clean, caching, intelligent proxy for B<apt-get>, "
"which speaks HTTP to apt-get clients, and http, ftp or rsync to the backend "
-"server(s)\\&. apt-proxy listens by default on port 9999 by default\\&."
+"server(s)\\&. apt-proxy listens by default on port 9999\\&."
msgstr ""
# type: TP
-#: doc/apt-proxy.8:16
+#: doc/apt-proxy.8:16 doc/apt-proxy-import.8:29
#, no-wrap
msgid "B<-h>, B<--help>"
msgstr ""
@@ -77,7 +77,7 @@
msgstr ""
# type: TP
-#: doc/apt-proxy.8:19
+#: doc/apt-proxy.8:19 doc/apt-proxy-import.8:32
#, no-wrap
msgid "B<-c>, B<--config-file=>"
msgstr ""
@@ -161,7 +161,7 @@
msgstr ""
# type: SH
-#: doc/apt-proxy.8:56 doc/apt-proxy-v1tov2.8:18 doc/apt-proxy.conf.5:148
+#: doc/apt-proxy.8:56 doc/apt-proxy-v1tov2.8:18 doc/apt-proxy-import.8:66 doc/apt-proxy.conf.5:186
#, no-wrap
msgid "FILES"
msgstr ""
@@ -172,7 +172,7 @@
msgstr ""
# type: SH
-#: doc/apt-proxy.8:61 doc/apt-proxy-v1tov2.8:24 doc/apt-proxy.conf.5:152
+#: doc/apt-proxy.8:61 doc/apt-proxy-v1tov2.8:24 doc/apt-proxy-import.8:77 doc/apt-proxy.conf.5:190
#, no-wrap
msgid "SEE ALSO"
msgstr ""
@@ -183,7 +183,7 @@
msgstr ""
# type: SH
-#: doc/apt-proxy.8:68 doc/apt-proxy.conf.5:161
+#: doc/apt-proxy.8:68 doc/apt-proxy-import.8:68 doc/apt-proxy.conf.5:199
#, no-wrap
msgid "BUGS"
msgstr ""
@@ -197,7 +197,7 @@
msgstr ""
# type: SH
-#: doc/apt-proxy.8:73 doc/apt-proxy-v1tov2.8:22
+#: doc/apt-proxy.8:73 doc/apt-proxy-v1tov2.8:22 doc/apt-proxy-import.8:75
#, no-wrap
msgid "AUTHORS"
msgstr ""
@@ -221,20 +221,22 @@
msgid "November 2002"
msgstr ""
+#. DO NOT MODIFY THIS FILE! It was generated by help2man 1.36.
# type: TH
-#: doc/apt-proxy-v1tov2.8:1
+#: doc/apt-proxy-v1tov2.8:1 doc/apt-proxy-import.8:2
#, no-wrap
msgid "Debian GNU/Linux"
msgstr ""
+#. DO NOT MODIFY THIS FILE! It was generated by help2man 1.36.
# type: TH
-#: doc/apt-proxy-v1tov2.8:1
+#: doc/apt-proxy-v1tov2.8:1 doc/apt-proxy-import.8:2
#, no-wrap
-msgid " "
+msgid " "
msgstr ""
# type: SH
-#: doc/apt-proxy-v1tov2.8:2 doc/apt-proxy.conf.5:3
+#: doc/apt-proxy-v1tov2.8:2 doc/apt-proxy-import.8:3 doc/apt-proxy.conf.5:3
#, no-wrap
msgid "NAME"
msgstr ""
@@ -275,12 +277,12 @@
msgstr ""
# type: Plain text
-#: doc/apt-proxy-v1tov2.8:20 doc/apt-proxy.conf.5:150
+#: doc/apt-proxy-v1tov2.8:20 doc/apt-proxy.conf.5:188
msgid "/etc/apt-proxy/apt-proxy\\&.conf"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-v1tov2.8:22 doc/apt-proxy.conf.5:152
+#: doc/apt-proxy-v1tov2.8:22 doc/apt-proxy.conf.5:190
msgid "/etc/apt-proxy/apt-proxy-v2\\&.conf"
msgstr ""
@@ -290,106 +292,223 @@
msgstr ""
# type: Plain text
-#: doc/apt-proxy-v1tov2.8:29 doc/apt-proxy-import.8.inc:42
+#: doc/apt-proxy-v1tov2.8:29 doc/apt-proxy-import.8:82
msgid "B<apt-proxy>(8), B<apt-proxy.conf>(5)"
msgstr ""
-#. Man page was originaly copied from apt-proxy man page.
+#. DO NOT MODIFY THIS FILE! It was generated by help2man 1.36.
+# type: TH
+#: doc/apt-proxy-import.8:2
+#, no-wrap
+msgid "APT-PROXY-IMPORT"
+msgstr ""
+
+#. DO NOT MODIFY THIS FILE! It was generated by help2man 1.36.
+# type: TH
+#: doc/apt-proxy-import.8:2
+#, no-wrap
+msgid "March 2006"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:5
+msgid "apt-proxy-import - Import packages into the apt-proxy cache."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:8
+msgid "B<apt-proxy-import> [I<options>] I<E<lt>filenameE<gt> >..."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:11
+msgid ""
+"WARNING: apt-proxy has not been tested under this version of twisted "
+"(2.2.0). WARNING: although it should work without problem."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:13
+msgid "apt-proxy-import B<-r> [options] E<lt>directoryE<gt> ..."
+msgstr ""
+
+# type: SH
+#: doc/apt-proxy-import.8:13
+#, no-wrap
+msgid "OPTIONS"
+msgstr ""
+
+# type: TP
+#: doc/apt-proxy-import.8:14
+#, no-wrap
+msgid "B<-V>, B<--version>"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:17
+msgid "print version and quit"
+msgstr ""
+
+# type: TP
+#: doc/apt-proxy-import.8:17
+#, no-wrap
+msgid "B<-v>, B<--verbose>"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:20
+msgid "give verbose output"
+msgstr ""
+
+# type: TP
+#: doc/apt-proxy-import.8:20
+#, no-wrap
+msgid "B<-d>, B<--debug>"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:23
+msgid "debug output"
+msgstr ""
+
+# type: TP
+#: doc/apt-proxy-import.8:23
+#, no-wrap
+msgid "B<-q>, B<--quiet>"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:26
+msgid "try not to write messages to stdout"
+msgstr ""
+
+# type: TP
+#: doc/apt-proxy-import.8:26
+#, no-wrap
+msgid "B<-r>, B<--recursive>"
+msgstr ""
+
# type: Plain text
-#: doc/apt-proxy-import.8.inc:4
-msgid "[NAME] apt-proxy-import - Import packages into the apt-proxy cache."
+#: doc/apt-proxy-import.8:29
+msgid "recurse into subdirectories"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:9
+#: doc/apt-proxy-import.8:32
+msgid "Display this help and exit."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:35
+msgid "Configuration file"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:37
+msgid "apt-proxy-import imports .deb files into the apt-proxy cache."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:41
msgid ""
-"/apt-proxy-import imports / It uses the package lists to determine where "
-"each file should be placed, so you should run B<'apt-get update'> to allow "
-"apt-proxy to update the package lists before running apt-proxy-import."
+"It uses the package lists to determine where each file should be placed, so "
+"you should run B<'apt-get update'> to allow apt-proxy to update the package "
+"lists before running apt-proxy-import."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:44
+msgid "WARNING: although it should work without problem. apt-proxy-import 1.9.x"
+msgstr ""
+
+# type: SH
+#: doc/apt-proxy-import.8:44
+#, no-wrap
+msgid "USING TO BOOTSTRAP A NEW APT-PROXY CACHE"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:14
+#: doc/apt-proxy-import.8:48
msgid ""
-"[USING TO BOOTSTRAP A NEW APT-PROXY CACHE] If you have been using apt "
-"standalone, you probably have built up a large collection of .debs or .udebs "
-"in apt's cache directory. You can import these files into apt-proxy as "
-"follows:"
+"If you have been using apt standalone, you probably have built up a large "
+"collection of .debs or .udebs in apt's cache directory. You can import "
+"these files into apt-proxy as follows:"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:16
+#: doc/apt-proxy-import.8:50
msgid "1. Update apt-proxy's filelists:"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:18
+#: doc/apt-proxy-import.8:52
#, no-wrap
msgid " apt-get update\n"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:21
+#: doc/apt-proxy-import.8:55
msgid "2. Import files from apt's cache:"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:23
+#: doc/apt-proxy-import.8:57
#, no-wrap
msgid " apt-proxy-import -i /var/cache/apt/archives\n"
msgstr ""
+# type: SH
+#: doc/apt-proxy-import.8:58
+#, no-wrap
+msgid "IMPORTING APT-MOVE CACHE"
+msgstr ""
+
# type: Plain text
-#: doc/apt-proxy-import.8.inc:27
+#: doc/apt-proxy-import.8:60
msgid ""
-"[IMPORTING APT-MOVE CACHE] You can import the apt-move generated cache into "
-"apt-proxy using the following command:"
+"You can import the apt-move generated cache into apt-proxy using the "
+"following command:"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:30
+#: doc/apt-proxy-import.8:63
#, no-wrap
msgid " apt-proxy-import -r -i /var/cache/apt-move\n"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:33
+#: doc/apt-proxy-import.8:66
msgid ""
"This tells apt-proxy-import to recuse over each directory in the apt-move "
"cache."
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:36
-msgid "[FILES] \\ /etc/apt-proxy/apt-proxy\\&.conf"
+#: doc/apt-proxy-import.8:68
+msgid "\\ /etc/apt-proxy/apt-proxy\\&.conf"
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:38
-msgid "[SEE ALSO]"
-msgstr ""
-
-# type: Plain text
-#: doc/apt-proxy-import.8.inc:48
+#: doc/apt-proxy-import.8:71
msgid ""
-"[BUGS] apt-proxy-import does not use I<max_age> or I<max_versions> to clean "
-"the cache directory on import."
+"apt-proxy-import does not use I<max_age> or I<max_versions> to clean the "
+"cache directory on import."
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:50
+#: doc/apt-proxy-import.8:73
msgid "It does not yet import source.tar.gz or Packages files."
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:52
+#: doc/apt-proxy-import.8:75
msgid "You must run it as the apt-proxy user or as root."
msgstr ""
# type: Plain text
-#: doc/apt-proxy-import.8.inc:54
+#: doc/apt-proxy-import.8:77
msgid ""
-"[AUTHORS] Chris Halls E<lt>halls at debian.orgE<gt>, Manuel Estrada Sainz "
+"Chris Halls E<lt>halls at debian.orgE<gt>, Manuel Estrada Sainz "
"E<lt>ranty at debian.orgE<gt>"
msgstr ""
@@ -404,7 +523,7 @@
# type: TH
#: doc/apt-proxy.conf.5:2
#, no-wrap
-msgid "21 Nov 2002"
+msgid "5 Jan 2006"
msgstr ""
# type: Plain text
@@ -416,72 +535,89 @@
#: doc/apt-proxy.conf.5:9
msgid ""
"B<apt-proxy\\&.conf> is the configuration file for apt-proxy. When "
-"apt-proxy starts up, it will read B</etc/apt-proxy/apt-proxy\\&.conf>\\&."
+"apt-proxy starts up, it will read I</etc/apt-proxy/apt-proxy\\&.conf>\\&."
msgstr ""
# type: Plain text
#: doc/apt-proxy.conf.5:12
msgid ""
-"B</etc/apt-proxy/apt-proxy-v2\\&.conf> will be read instead if it exists to "
+"I</etc/apt-proxy/apt-proxy-v2\\&.conf> will be read instead if it exists to "
"make upgrading from v1 easier."
msgstr ""
+# type: Plain text
+#: doc/apt-proxy.conf.5:15
+msgid ""
+"The configuration file is divided up into several sections, where each "
+"I<[resource]> section defines a separate resource. The B<DEFAULT> section "
+"applies to all resources."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:19
+msgid ""
+"The supplied I<apt-proxy\\&.conf> will work out of the box, but it is best "
+"to change the backends you use to a mirror closer to you. There are some in "
+"the default file, and it may be enough just to reorder the lines in the "
+"file\\&."
+msgstr ""
+
# type: SH
-#: doc/apt-proxy.conf.5:13
+#: doc/apt-proxy.conf.5:21
#, no-wrap
msgid "[DEFAULT]"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:15
+#: doc/apt-proxy.conf.5:23
msgid "This section holds options global to the whole apt-proxy:"
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:16
+#: doc/apt-proxy.conf.5:24
#, no-wrap
msgid "B<address>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:20
+#: doc/apt-proxy.conf.5:28
msgid ""
"IP address on which apt-proxy will listen for requests. Multiple addresses "
"have a empty space between it."
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:21
+#: doc/apt-proxy.conf.5:29
#, no-wrap
msgid "B<port>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:24
+#: doc/apt-proxy.conf.5:32
msgid "TCP port on which apt-proxy will listen for requests."
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:25
+#: doc/apt-proxy.conf.5:33
#, no-wrap
msgid "B<min_refresh_delay>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:30
+#: doc/apt-proxy.conf.5:38
msgid ""
"If different from B<off>, means that Packages and other control files will "
"not be refreshed more frequently than this number of seconds\\&."
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:31 doc/apt-proxy.conf.5:92
+#: doc/apt-proxy.conf.5:39 doc/apt-proxy.conf.5:96
#, no-wrap
msgid "B<timeout>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:36
+#: doc/apt-proxy.conf.5:44
msgid ""
"Maximum I/O timeout in seconds for backend transfers. Default: 30 seconds. "
"If no response is received from a backend server in this time, apt-proxy "
@@ -489,24 +625,24 @@
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:37
+#: doc/apt-proxy.conf.5:45
#, no-wrap
msgid "B<cache_dir>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:40
+#: doc/apt-proxy.conf.5:48
msgid "Cache directory. Default: /var/cache/apt-proxy"
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:41
+#: doc/apt-proxy.conf.5:49
#, no-wrap
msgid "B<cleanup_freq>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:46
+#: doc/apt-proxy.conf.5:54
msgid ""
"If different from B<off>, indicates the time between housekeeping attempts: "
"delete files that have not been accessed in max_age, scan cache directories "
@@ -514,26 +650,26 @@
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:47
+#: doc/apt-proxy.conf.5:55
#, no-wrap
msgid "B<max_age>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:51
+#: doc/apt-proxy.conf.5:59
msgid ""
"If different from B<off>, indicates the maximum age of files before deletion "
"from the cache."
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:52
+#: doc/apt-proxy.conf.5:60
#, no-wrap
msgid "B<max_versions>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:59
+#: doc/apt-proxy.conf.5:67
msgid ""
"If different from B<off>, indicates the maximum number of versions of a "
"\\&.deb to keep. This is the number of versions per distribution, for "
@@ -543,13 +679,13 @@
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:60 doc/apt-proxy.conf.5:106
+#: doc/apt-proxy.conf.5:68 doc/apt-proxy.conf.5:111
#, no-wrap
msgid "B<passive_ftp>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:65
+#: doc/apt-proxy.conf.5:73
msgid ""
"Specify B<on> to use passive FTP, which works from behind a firewall, but "
"may not be supported on all servers. Specify B<off> to use active FTP "
@@ -557,38 +693,38 @@
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:66
+#: doc/apt-proxy.conf.5:74
#, no-wrap
msgid "B<http_proxy>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:69
+#: doc/apt-proxy.conf.5:77
msgid "Specify B<hostname:port> to use an upstream proxy."
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:70
+#: doc/apt-proxy.conf.5:78
#, no-wrap
msgid "B<dynamic_backends>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:75
+#: doc/apt-proxy.conf.5:83
msgid ""
-"By default apt-proxy will add HTTP backends dynamicaly if not already "
+"By default apt-proxy will add HTTP backends dynamically if not already "
"defined. Specify B<off> to restrict the available backends to those listed "
"in the configuration file. Default: on"
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:76
+#: doc/apt-proxy.conf.5:84
#, no-wrap
msgid "B<disable_pipelining>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:82
+#: doc/apt-proxy.conf.5:90
msgid ""
"apt-proxy can use HTTP pipelining to fetch several files at once (up to 10), "
"but this can generate multiple connections to each backend server. "
@@ -597,70 +733,83 @@
msgstr ""
# type: SH
-#: doc/apt-proxy.conf.5:84
+#: doc/apt-proxy.conf.5:92
#, no-wrap
-msgid "BACKENDS"
-msgstr ""
-
-# type: Plain text
-#: doc/apt-proxy.conf.5:87
-msgid ""
-"All other sections will be interpreted as backend names, and the options "
-"specified within are local to the backend."
+msgid "RESOURCES"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:91
+#: doc/apt-proxy.conf.5:95
msgid ""
-"The supplied apt-proxy\\&.conf will work out of the box, but I suggest you "
-"look for a mirror closer to you\\&. There are some in the default "
-"apt-proxy\\&.conf and it may be enough just to reorder the lines in the "
-"file\\&."
+"All other sections in the configuration file will be interpreted as resource "
+"names. The options in the section apply to this resource only."
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:95
+#: doc/apt-proxy.conf.5:99
msgid "Overrides the global timeout"
msgstr ""
# type: TP
-#: doc/apt-proxy.conf.5:96
+#: doc/apt-proxy.conf.5:100
#, no-wrap
-msgid "B<backends = E<lt>protocolE<gt>://E<lt>serverE<gt>/E<lt>directoryE<gt>>"
+msgid ""
+"B<backends = "
+">I<E<lt>protocolE<gt>>B<://>I<E<lt>serverE<gt>>B</>I<E<lt>directoryE<gt>>B< "
+"[...]>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:99
-msgid "A list of backend URLs\\&."
+#: doc/apt-proxy.conf.5:103
+msgid "A list one or more URLs referring to servers which hold debian packages\\&."
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:101
-msgid "Protocol - internet protocol to use: http, ftp or rsync"
+#: doc/apt-proxy.conf.5:106
+msgid "I<protocol>: internet protocol to use: http, ftp or rsync"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:103
-msgid "Server - hostname of the backend server to contact"
+#: doc/apt-proxy.conf.5:108
+msgid "I<server>: hostname of the backend server to contact"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:105
-msgid "Directory - directory name to prepend requests to for this server"
+#: doc/apt-proxy.conf.5:110
+msgid "I<directory>: directory name to prepend requests to for this server"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:109
+#: doc/apt-proxy.conf.5:114
msgid "Override the global setting of passive_ftp"
msgstr ""
+# type: SH
+#: doc/apt-proxy.conf.5:115
+#, no-wrap
+msgid "CONFIGURATION EXAMPLES"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:120
+msgid ""
+"To access a resource that's listed under a specific section name, simply "
+"append the section name (without the brackets) to the end of your deb source "
+"line in /etc/apt/sources.list"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:122
+msgid "B<Debian main>"
+msgstr ""
+
# type: Plain text
-#: doc/apt-proxy.conf.5:113
+#: doc/apt-proxy.conf.5:125
msgid "This example shows how to give clients access to the main Debian archive:"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:117
+#: doc/apt-proxy.conf.5:129
#, no-wrap
msgid ""
"[debian]\n"
@@ -669,25 +818,25 @@
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:122
-msgid "Using this configuration, the client would use a B<sources.list> entry like:"
+#: doc/apt-proxy.conf.5:134
+msgid "Using this configuration, the client would use a I<sources.list> entry like:"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:125
+#: doc/apt-proxy.conf.5:137
#, no-wrap
-msgid " deb http://server:9999/debian/ woody main\n"
+msgid " deb http://server:9999/debian woody main\n"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:130
+#: doc/apt-proxy.conf.5:142
msgid ""
"And so the file request `/debian/woody/main/binary-i386/x11/foo_1-1.deb' "
"would turn into a backend request of first"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:133
+#: doc/apt-proxy.conf.5:145
#, no-wrap
msgid ""
" "
@@ -695,12 +844,12 @@
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:136
+#: doc/apt-proxy.conf.5:148
msgid "and if that failed,"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:139
+#: doc/apt-proxy.conf.5:151
#, no-wrap
msgid ""
" "
@@ -708,13 +857,12 @@
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:142
-#, no-wrap
-msgid "and the file would be placed in\n"
+#: doc/apt-proxy.conf.5:154
+msgid "and apt-proxy will place the downloaded package in"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:144
+#: doc/apt-proxy.conf.5:156
#, no-wrap
msgid ""
" "
@@ -722,31 +870,76 @@
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:147
+#: doc/apt-proxy.conf.5:159
+msgid "B<backports.org>"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:161
+msgid "The backports.org website tells you to use this I<sources.list> line:"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:164
+#, no-wrap
+msgid " deb http://www.backports.org/debian sarge-backports main\n"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:168
+msgid ""
+"You can add this to apt-proxy by creating a new section in "
+"I<apt-proxy\\&.conf>\\&. In the new section, add a backends entry for the "
+"URL:"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:172
+#, no-wrap
+msgid ""
+" [backports]\n"
+" backends = http://www.backports.org/debian\n"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:177
+msgid ""
+"On the clients, replace the URL with one pointing to the apt-proxy resource "
+"name, in the form I<http://hostname:port/backend>. If your apt-proxy "
+"hostname is I<proxy> and it is running on port 9999, you would write:"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:180
#, no-wrap
+msgid " deb http://proxy:9999/backports sarge-backports main\n"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:185
msgid ""
"For many more examples, see the supplied "
-"/etc/apt-proxy/apt-proxy\\&.conf\\&.\n"
+"/etc/apt-proxy/apt-proxy\\&.conf\\&."
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:158
+#: doc/apt-proxy.conf.5:196
msgid "B<apt-proxy(8),> B</usr/share/doc/apt-proxy/README,> B<apt-proxy-import(8)>"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:163
+#: doc/apt-proxy.conf.5:201
msgid "Plenty sure. Please report."
msgstr ""
# type: SH
-#: doc/apt-proxy.conf.5:164
+#: doc/apt-proxy.conf.5:202
#, no-wrap
msgid "AUTHOR"
msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:165
+#: doc/apt-proxy.conf.5:203
msgid ""
"apt-proxy v2 was written by Manuel Estrada Sainz "
"E<lt>ranty at debian.orgE<gt>\\&."
Modified: trunk/doc/po/fr.po
==============================================================================
--- trunk/doc/po/fr.po (original)
+++ trunk/doc/po/fr.po Thu Aug 3 23:54:46 2006
@@ -1,19 +1,21 @@
-# Translation of fr.po to french
+# translation of fr.po to French
# Raphaël 'SurcouF' Bordet <surcouf at debianfr.net>, 2004.
# <surcouf at gmx.fr>, 2004.
-#
-#
+# Sylvain Archenault <sylvain.archenault at laposte.net>, 2005.
+#
+#
msgid ""
msgstr ""
"Project-Id-Version: apt-proxy 1.3.6.1\n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2005-08-18 11:55-0300\n"
-"PO-Revision-Date: 2005-02-17 01:20+0100\n"
-"Last-Translator: Raphaël 'SurcouF' Bordet <surcouf at debianfr.net>\n"
-"Language-Team: French <debian-l10n-french at lists.debian.org>\n"
+"POT-Creation-Date: 2006-03-29 0:11+0200\n"
+"PO-Revision-Date: 2005-10-18 19:14+0200\n"
+"Last-Translator: Sylvain Archenault <sylvain.archenault at laposte.net>\n"
+"Language-Team: French <French <debian-l10n-french at lists.debian.org>>\n"
"MIME-Version: 1.0\n"
-"Content-Type: text/plain; charset=iso-8859-1\n"
+"Content-Type: text/plain; charset=ISO-8859-1\n"
"Content-Transfer-Encoding: 8bit\n"
+"X-Generator: KBabel 1.10.2\n"
# type: TH
#. Man page copied from apt.conf man page.
@@ -27,7 +29,7 @@
#: doc/apt-proxy.8:2
#, no-wrap
msgid "15 Jul 2005"
-msgstr ""
+msgstr "15 jul 2005"
# type: SH
#: doc/apt-proxy.8:3
@@ -39,11 +41,11 @@
#: doc/apt-proxy.8:5
msgid "apt-proxy - A proxy for saving bandwidth to Debian servers"
msgstr ""
-"apt\\-proxy \\- Un mandataire pour économiser de la bande passante sur les "
+"apt-proxy - Un mandataire pour économiser de la bande passante sur les "
"serveurs Debian"
# type: SH
-#: doc/apt-proxy.8:5 doc/apt-proxy-v1tov2.8:4
+#: doc/apt-proxy.8:5 doc/apt-proxy-v1tov2.8:4 doc/apt-proxy-import.8:5
#, no-wrap
msgid "SYNOPSIS"
msgstr "SYNOPSIS"
@@ -51,53 +53,53 @@
# type: Plain text
#: doc/apt-proxy.8:8
msgid "B<apt-proxy> I<[options] [logfile]>"
-msgstr "B<apt\\-proxy> I<[options] [fichier de log]>"
+msgstr "B<apt-proxy> I<[options] [fichier de journal]>"
# type: SH
-#: doc/apt-proxy.8:10 doc/apt-proxy-v1tov2.8:8 doc/apt-proxy.conf.5:6
+#: doc/apt-proxy.8:10 doc/apt-proxy-v1tov2.8:8 doc/apt-proxy-import.8:8
+#: doc/apt-proxy.conf.5:6
#, no-wrap
msgid "DESCRIPTION"
msgstr "DESCRIPTION"
# type: Plain text
#: doc/apt-proxy.8:15
-#, fuzzy
msgid ""
"B<apt-proxy> is a python program designed to be run as an stand alone server "
"via twistd, and provides a clean, caching, intelligent proxy for B<apt-get>, "
"which speaks HTTP to apt-get clients, and http, ftp or rsync to the backend "
-"server(s)\\&. apt-proxy listens by default on port 9999 by default\\&."
+"server(s)\\&. apt-proxy listens by default on port 9999\\&."
msgstr ""
-"B<apt\\-proxy> est un logiciel écrit en python, conçu pour tourner de lui-"
+"B<apt-proxy> est un logiciel écrit en python, conçu pour fonctionner de lui-"
"même via twistd et qui fournit un serveur mandataire (« proxy ») propre, "
-"cachant et intelligent pour B<apt\\-get>.Il communique via HTTP avec les "
-"clients apt\\-get et HTTP ou FTP aux serveurs\\&. Normalement, il est "
-"configuré sur le port TCP 9999, principalement parce que c'est la "
-"configuration par défaut, et que les gens sont paresseux\\&."
+"cachant et intelligent pour B<apt-get>. Il communique via HTTP avec les "
+"clients apt-get et via HTTP, FTP et rsync avec les serveur(s) finaux. apt-"
+"proxy écoute par défaut sur le port 9999."
# type: TP
-#: doc/apt-proxy.8:16
+#: doc/apt-proxy.8:16 doc/apt-proxy-import.8:29
#, no-wrap
msgid "B<-h>, B<--help>"
-msgstr ""
+msgstr "B<-h>, B<--help>"
# type: Plain text
#: doc/apt-proxy.8:19
msgid "Display usage information\\&."
-msgstr ""
+msgstr "Affiche les informations d'utilisation"
# type: TP
-#: doc/apt-proxy.8:19
+#: doc/apt-proxy.8:19 doc/apt-proxy-import.8:32
#, no-wrap
msgid "B<-c>, B<--config-file=>"
-msgstr ""
+msgstr "B<-c>, B<--config-file=>"
# type: Plain text
#: doc/apt-proxy.8:22
-#, fuzzy
msgid ""
"Configuration file. This defaults to /etc/apt-proxy/apt-proxy-v2\\&.conf"
-msgstr "/etc/apt\\-proxy/apt\\-proxy-v2\\&.conf"
+msgstr ""
+"Fichier de configuration. Le fichier par défaut est /etc/apt-proxy/apt-proxy-"
+"v2.conf."
# type: SH
#: doc/apt-proxy.8:23
@@ -107,14 +109,13 @@
# type: Plain text
#: doc/apt-proxy.8:27
-#, fuzzy
msgid ""
"Once B<apt-proxy> is configured on a host SERVER, users then edit their "
"B<sources\\&.list> file to point to the proxy (which uses the http protocol "
"to serve clients), like so:"
msgstr ""
-"Une fois qu'B<apt\\-proxy> est configuré, les utilisateurs doivent éditer "
-"leur fichier B<sources\\&.list> pour utiliser le serveur mandataire ou "
+"Une fois qu'B<apt-proxy> est configuré, les utilisateurs doivent éditer leur "
+"fichier B<sources.list> pour utiliser le serveur mandataire ou "
"« proxy » (qui utilise le protocole HTTP pour servir les clients), comme "
"suit :"
@@ -125,8 +126,8 @@
"deb http://SERVER:9999/debian stable main contrib non-free\n"
"deb-src http://SERVER:9999/debian stable main contrib non-free\n"
msgstr ""
-"deb http://SERVER:9999/main stable main contrib non\\-free\n"
-"deb\\-src http://SERVER:9999/main stable main contrib non\\-free\n"
+"deb http://SERVEUR:9999/debian stable main contrib non-free\n"
+"deb-src http://SERVEUR:9999/debian stable main contrib non-free\n"
# type: Plain text
#: doc/apt-proxy.8:33
@@ -136,7 +137,6 @@
# type: Plain text
#: doc/apt-proxy.8:39
-#, fuzzy
msgid ""
"What path should be specified after the server name and port number depends "
"on the configuration of B<apt-proxy> (which can restrict paths and send "
@@ -144,14 +144,12 @@
"\\&."
msgstr ""
"Le chemin devant être spécifié après le nom du serveur et le numéro de port "
-"dépendent de la configuration d'B<apt\\-proxy> (ce qui peut restreindre les "
-"chemins et envoyer des chemins différents à différents serveurs)\\&. Dans "
-"cet exemple, non\\-US/ et helixcode/ récupèrent actuellement des fichiers de "
-"différents serveurs\\&."
+"dépend de la configuration d'B<apt-proxy> (ce qui peut restreindre les "
+"chemins et envoyer des chemins différents à différents serveurs). Voir B< "
+"CONFIGURATION DU SERVEUR>."
# type: Plain text
#: doc/apt-proxy.8:43
-#, fuzzy
msgid ""
"Note that you can also use the nicknames `unstable', `frozen' etc, but "
"Packages/Sources files may get duplicated, so it is best to use either the "
@@ -159,14 +157,14 @@
msgstr ""
"Notez que vous pouvez aussi utiliser les saveurs « unstable », « frozen », "
"etc., mais les fichiers Packages/Sources seraient dupliqués, aussi il est "
-"conseillé d'utiliser soit le lien symbolique soit le nom de code mais de s'y "
+"conseillé d'utiliser soit le nom symbolique soit le nom de code et de s'y "
"tenir."
# type: SH
#: doc/apt-proxy.8:44
#, no-wrap
msgid "SERVER CONFIGURATION"
-msgstr "CONFIGURATION DU SERVER"
+msgstr "CONFIGURATION DU SERVEUR"
# type: Plain text
#: doc/apt-proxy.8:48
@@ -174,8 +172,8 @@
"See B<apt-proxy.conf>(5) for details of how to set up apt-proxy to use "
"backends near to you."
msgstr ""
-"Voir B<apt\\-proxy.conf>(5) pour les détails sur comment configurer apt-"
-"proxy afin d'utiliser les dorsaux proches de vous."
+"Voir B<apt-proxy.conf>(5) pour les détails sur comment configurer apt-proxy "
+"afin d'utiliser les serveurs proches de vous."
# type: SH
#: doc/apt-proxy.8:49
@@ -191,26 +189,27 @@
"from the back end and only doing a single fetch for any file, how ever many "
"users request it from the proxy."
msgstr ""
-"B<apt\\-proxy> réduit les besoins en bande passante des miroirs Debian en "
+"B<apt-proxy> réduit les besoins en bande passante des miroirs Debian en "
"restreignant la fréquence des mises à jour des fichiers Packages, Releases "
-"et Sources depuis le serveur et en téléchargeant une seule fois pour tout "
+"et Sources depuis le serveur et en téléchargeant une seule fois chaque "
"fichier, sans tenir compte du nombre d'utilisateurs qui en font la requête "
"au mandataire (« proxy »)."
# type: SH
-#: doc/apt-proxy.8:56 doc/apt-proxy-v1tov2.8:18 doc/apt-proxy.conf.5:148
+#: doc/apt-proxy.8:56 doc/apt-proxy-v1tov2.8:18 doc/apt-proxy-import.8:66
+#: doc/apt-proxy.conf.5:186
#, no-wrap
msgid "FILES"
msgstr "FICHIERS"
# type: Plain text
#: doc/apt-proxy.8:60
-#, fuzzy
msgid "/etc/apt-proxy/apt-proxy\\&.conf or /etc/apt-proxy/apt-proxy-v2\\&.conf"
-msgstr "/etc/apt\\-proxy/apt\\-proxy-v2\\&.conf"
+msgstr "/etc/apt-proxy/apt-proxy.conf ou /etc/apt-proxy/apt-proxy-v2.conf"
# type: SH
-#: doc/apt-proxy.8:61 doc/apt-proxy-v1tov2.8:24 doc/apt-proxy.conf.5:152
+#: doc/apt-proxy.8:61 doc/apt-proxy-v1tov2.8:24 doc/apt-proxy-import.8:77
+#: doc/apt-proxy.conf.5:190
#, no-wrap
msgid "SEE ALSO"
msgstr "VOIR AUSSI"
@@ -218,10 +217,10 @@
# type: Plain text
#: doc/apt-proxy.8:65
msgid "B<apt-proxy.conf>(5),B<apt-proxy-import>(8)"
-msgstr "B<apt-proxy.conf>(5),B<apt-proxy-import>(8)"
+msgstr "B<apt-proxy.conf>(5), B<apt-proxy-import>(8)"
# type: SH
-#: doc/apt-proxy.8:68 doc/apt-proxy.conf.5:161
+#: doc/apt-proxy.8:68 doc/apt-proxy-import.8:68 doc/apt-proxy.conf.5:199
#, no-wrap
msgid "BUGS"
msgstr "ANOMALIES"
@@ -233,15 +232,15 @@
"reduction in bytes transferred for binary packages, and much greater for "
"source and other packages."
msgstr ""
-"Les paquets ne sont pas compressés en utilisant l'option \\-\\-rsyncable de "
-"gzip, ce qui octroie une réduction de 30 % pour les paquets binaires, et "
+"Les paquets ne sont pas compressés en utilisant l'option --rsyncable de "
+"gzip, qui octroie une réduction de 30 % pour les paquets binaires, et "
"beaucoup plus pour les paquets sources et autres."
# type: SH
-#: doc/apt-proxy.8:73 doc/apt-proxy-v1tov2.8:22
+#: doc/apt-proxy.8:73 doc/apt-proxy-v1tov2.8:22 doc/apt-proxy-import.8:75
#, no-wrap
msgid "AUTHORS"
-msgstr "AUTEUR"
+msgstr "AUTEURS"
# type: Plain text
#: doc/apt-proxy.8:75
@@ -249,7 +248,7 @@
"apt-proxy v2 was written by Manuel Estrada Sainz and is maintained by Otavio "
"Salvador and Chris Halls."
msgstr ""
-"apt-proxy·v2 a été écrit par Manuel Estrada Sainz et est maintenu par Chris "
+"apt-proxy v2 a été écrit par Manuel Estrada Sainz et est maintenu par Chris "
"Halls."
# type: TH
@@ -262,22 +261,24 @@
#: doc/apt-proxy-v1tov2.8:1
#, no-wrap
msgid "November 2002"
-msgstr "Novembre 2002"
+msgstr "novembre 2002"
# type: TH
-#: doc/apt-proxy-v1tov2.8:1
+#. DO NOT MODIFY THIS FILE! It was generated by help2man 1.36.
+#: doc/apt-proxy-v1tov2.8:1 doc/apt-proxy-import.8:2
#, no-wrap
msgid "Debian GNU/Linux"
msgstr "Debian GNU/Linux"
# type: TH
-#: doc/apt-proxy-v1tov2.8:1
-#, no-wrap
-msgid " "
-msgstr "."
+#. DO NOT MODIFY THIS FILE! It was generated by help2man 1.36.
+#: doc/apt-proxy-v1tov2.8:1 doc/apt-proxy-import.8:2
+#, fuzzy, no-wrap
+msgid " "
+msgstr " "
# type: SH
-#: doc/apt-proxy-v1tov2.8:2 doc/apt-proxy.conf.5:3
+#: doc/apt-proxy-v1tov2.8:2 doc/apt-proxy-import.8:3 doc/apt-proxy.conf.5:3
#, no-wrap
msgid "NAME"
msgstr "NOM"
@@ -286,13 +287,13 @@
#: doc/apt-proxy-v1tov2.8:4
msgid "apt-proxy-v1tov2 - Updates apt-proxy configuration to the new format."
msgstr ""
-"apt-proxy-v1tov2·-·Convertit la configuration d'apt-proxy vers le nouveau "
+"apt-proxy-v1tov2 - Convertit la configuration d'apt-proxy vers le nouveau "
"format."
# type: Plain text
#: doc/apt-proxy-v1tov2.8:7
msgid "B<apt-proxy-v1tov2> [v1_conf [v2_sample_conf]] E<gt> v2_conf"
-msgstr "B<apt-proxy-v1tov2>·[v1_conf·[v2_sample_conf]]·E<gt>·v2_conf"
+msgstr "B<apt-proxy-v1tov2> [v1_conf [v2_exemple_conf]] E<gt> v2_conf"
# type: Plain text
#: doc/apt-proxy-v1tov2.8:11
@@ -300,8 +301,8 @@
"apt-proxy-v1tov2 tries to update I<v2_sample_conf> with the configuration "
"found in I<v1_conf> and writes the result to I<stdout>."
msgstr ""
-"apt-proxy-v1tov2 tente de convertir I<v2_sample_conf> avec la configuration "
-"trouvée dans le fichier I<v1_conf> et écrira le résultat vers I<stdout>."
+"apt-proxy-v1tov2 tente de convertir I<v2_exemple_conf> avec la configuration "
+"trouvée dans le fichier I<v1_conf> et écrira le résultat sur I<stdout>."
# type: Plain text
#: doc/apt-proxy-v1tov2.8:14
@@ -310,7 +311,7 @@
"etc/apt-proxy/apt-proxy-v2.conf for I<v2_sample_conf>."
msgstr ""
"Par défaut, il utilisera /etc/apt-proxy/apt-proxy.conf pour I<v1_conf> et /"
-"etc/apt-proxy/apt-proxy-v2.conf pour I<v2_sample_conf>."
+"etc/apt-proxy/apt-proxy-v2.conf pour I<v2_example_conf>."
# type: SH
#: doc/apt-proxy-v1tov2.8:15
@@ -326,96 +327,217 @@
"fait pour vous."
# type: Plain text
-#: doc/apt-proxy-v1tov2.8:20 doc/apt-proxy.conf.5:150
+#: doc/apt-proxy-v1tov2.8:20 doc/apt-proxy.conf.5:188
msgid "/etc/apt-proxy/apt-proxy\\&.conf"
-msgstr "/etc/apt\\-proxy/apt\\-proxy\\&.conf"
+msgstr "/etc/apt-proxy/apt-proxy.conf"
# type: Plain text
-#: doc/apt-proxy-v1tov2.8:22 doc/apt-proxy.conf.5:152
+#: doc/apt-proxy-v1tov2.8:22 doc/apt-proxy.conf.5:190
msgid "/etc/apt-proxy/apt-proxy-v2\\&.conf"
-msgstr "/etc/apt\\-proxy/apt\\-proxy-v2\\&.conf"
+msgstr "/etc/apt-proxy/apt-proxy-v2.conf"
# type: Plain text
#: doc/apt-proxy-v1tov2.8:24
msgid "Manuel Estrada Sainz E<lt>ranty at debian.orgE<gt>"
-msgstr "Manuel·Estrada·Sainz·E<lt>ranty at debian.orgE<gt>"
+msgstr "Manuel Estrada Sainz E<lt>ranty at debian.orgE<gt>"
# type: Plain text
-#: doc/apt-proxy-v1tov2.8:29 doc/apt-proxy-import.8.inc:42
+#: doc/apt-proxy-v1tov2.8:29 doc/apt-proxy-import.8:82
msgid "B<apt-proxy>(8), B<apt-proxy.conf>(5)"
-msgstr "B<ap\\-proxy>(8), B<apt\\-proxy\\&.conf>(5)"
+msgstr "B<apt-proxy>(8), B<apt-proxy.conf>(5)"
+
+# type: TH
+#. DO NOT MODIFY THIS FILE! It was generated by help2man 1.36.
+#: doc/apt-proxy-import.8:2
+#, no-wrap
+msgid "APT-PROXY-IMPORT"
+msgstr "APT-PROXY-IMPORT"
+
+# type: TH
+#. DO NOT MODIFY THIS FILE! It was generated by help2man 1.36.
+#: doc/apt-proxy-import.8:2
+#, no-wrap
+msgid "March 2006"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:5
+msgid "apt-proxy-import - Import packages into the apt-proxy cache."
+msgstr "apt-proxy-import - Importe les paquets dans le cache d'apt-proxy."
+
+# type: Plain text
+#: doc/apt-proxy-import.8:8
+msgid "B<apt-proxy-import> [I<options>] I<E<lt>filenameE<gt> >..."
+msgstr "B<apt-proxy-import> [I<options>] I<E<lt>[fichier de journalL<gt> >..."
+
+# type: Plain text
+#: doc/apt-proxy-import.8:11
+msgid ""
+"WARNING: apt-proxy has not been tested under this version of twisted "
+"(2.2.0). WARNING: although it should work without problem."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy-import.8:13
+msgid "apt-proxy-import B<-r> [options] E<lt>directoryE<gt> ..."
+msgstr "apt-proxy-import B<-r> [options] E<lt>directoryE<gt> ..."
+
+# type: SH
+#: doc/apt-proxy-import.8:13
+#, no-wrap
+msgid "OPTIONS"
+msgstr "OPTIONS"
+
+# type: TP
+#: doc/apt-proxy-import.8:14
+#, no-wrap
+msgid "B<-V>, B<--version>"
+msgstr "B<-V>, B<--version>"
+
+# type: Plain text
+#: doc/apt-proxy-import.8:17
+msgid "print version and quit"
+msgstr "Affiche la version"
+
+# type: TP
+#: doc/apt-proxy-import.8:17
+#, no-wrap
+msgid "B<-v>, B<--verbose>"
+msgstr "B<-v>, B<--verbose>"
+
+# type: Plain text
+#: doc/apt-proxy-import.8:20
+msgid "give verbose output"
+msgstr "Sortie verbeuse"
+
+# type: TP
+#: doc/apt-proxy-import.8:20
+#, no-wrap
+msgid "B<-d>, B<--debug>"
+msgstr "B<-d>, B<--debug>"
# type: Plain text
-#. Man page was originaly copied from apt-proxy man page.
-#: doc/apt-proxy-import.8.inc:4
-msgid "[NAME] apt-proxy-import - Import packages into the apt-proxy cache."
+#: doc/apt-proxy-import.8:23
+msgid "debug output"
+msgstr "Sortie debug"
+
+# type: TP
+#: doc/apt-proxy-import.8:23
+#, no-wrap
+msgid "B<-q>, B<--quiet>"
+msgstr "B<-q>, B<--quiet>"
+
+# type: Plain text
+#: doc/apt-proxy-import.8:26
+msgid "try not to write messages to stdout"
+msgstr "N'essaye pas d'écrire des messages dans stdout"
+
+# type: TP
+#: doc/apt-proxy-import.8:26
+#, no-wrap
+msgid "B<-r>, B<--recursive>"
+msgstr "B<-r>, B<--recursive>"
+
+# type: Plain text
+#: doc/apt-proxy-import.8:29
+msgid "recurse into subdirectories"
+msgstr "Récursif dans les sous répertoires"
+
+# type: Plain text
+#: doc/apt-proxy-import.8:32
+msgid "Display this help and exit."
+msgstr "Affiche cette aide."
+
+# type: Plain text
+#: doc/apt-proxy-import.8:35
+msgid "Configuration file"
+msgstr "Fichier de configuration"
+
+# type: Plain text
+#: doc/apt-proxy-import.8:37
+msgid "apt-proxy-import imports .deb files into the apt-proxy cache."
+msgstr " apt-proxy-import - Importe les paquets dans le cache d'apt-proxy."
+
+# type: Plain text
+#: doc/apt-proxy-import.8:41
+msgid ""
+"It uses the package lists to determine where each file should be placed, so "
+"you should run B<'apt-get update'> to allow apt-proxy to update the package "
+"lists before running apt-proxy-import."
msgstr ""
-"[NOM] apt-proxy-import - Importe les paquets dans le cache d'apt-proxy."
+"Il utilise la liste de paquets pour déterminer où sera placé chaque fichier, "
+"aussi vous devriez lancer la commande B<'apt-get update'> pour permettre à "
+"apt-proxy de mettre à jour la liste de paquets avant de lancer apt-proxy-"
+"import."
# type: Plain text
-#: doc/apt-proxy-import.8.inc:9
+#: doc/apt-proxy-import.8:44
msgid ""
-"/apt-proxy-import imports / It uses the package lists to determine where "
-"each file should be placed, so you should run B<'apt-get update'> to allow "
-"apt-proxy to update the package lists before running apt-proxy-import."
+"WARNING: although it should work without problem. apt-proxy-import 1.9.x"
msgstr ""
-"/apt-proxy-import / Il utilise la liste de paquets pour déterminer où sera "
-"placé chaque fichier, aussi vous devriez lancer la commande B<'apt-get "
-"update'> pour permettre à apt-proxy de mettre à jour la liste de paquets "
-"avant de lancer apt-proxy-import."
+
+# type: SH
+#: doc/apt-proxy-import.8:44
+#, no-wrap
+msgid "USING TO BOOTSTRAP A NEW APT-PROXY CACHE"
+msgstr "À UTILISER POUR REMPLIR UN NOUVEAU CACHE POUR APT-PROXY"
# type: Plain text
-#: doc/apt-proxy-import.8.inc:14
+#: doc/apt-proxy-import.8:48
msgid ""
-"[USING TO BOOTSTRAP A NEW APT-PROXY CACHE] If you have been using apt "
-"standalone, you probably have built up a large collection of .debs or .udebs "
-"in apt's cache directory. You can import these files into apt-proxy as "
-"follows:"
+"If you have been using apt standalone, you probably have built up a large "
+"collection of .debs or .udebs in apt's cache directory. You can import "
+"these files into apt-proxy as follows:"
msgstr ""
-"[À UTILISER POUR REMPLIR UN NOUVEAU CACHE POUR APT-PROXY] Si vous aviez déjà "
-"utilisé apt, vous avez probablement créé une importante collection de "
-"fichiers .deb ou .udeb dans le répertoire cache d'apt. Vous pouvez importer "
-"ces fichiers dans apt-proxy comme suit:"
+"Si vous aviez déjà utilisé apt, vous avez probablement créé une importante "
+"collection de fichiers .deb ou .udeb dans le répertoire cache d'apt. Vous "
+"pouvez importer ces fichiers dans apt-proxy comme suit :"
# type: Plain text
-#: doc/apt-proxy-import.8.inc:16
+#: doc/apt-proxy-import.8:50
msgid "1. Update apt-proxy's filelists:"
-msgstr "1. Mettre à jour la liste de fichiers d'apt-proxy:"
+msgstr "1. Mettre à jour la liste de fichiers d'apt-proxy :"
# type: Plain text
-#: doc/apt-proxy-import.8.inc:18
+#: doc/apt-proxy-import.8:52
#, no-wrap
msgid " apt-get update\n"
msgstr " apt-get update\n"
# type: Plain text
-#: doc/apt-proxy-import.8.inc:21
+#: doc/apt-proxy-import.8:55
msgid "2. Import files from apt's cache:"
-msgstr "2. Importer des fichiers depuis le cache d'apt:"
+msgstr "2. Importer des fichiers depuis le cache d'apt :"
# type: Plain text
-#: doc/apt-proxy-import.8.inc:23
+#: doc/apt-proxy-import.8:57
#, no-wrap
msgid " apt-proxy-import -i /var/cache/apt/archives\n"
msgstr " apt-proxy-import -i /var/cache/apt/archives\n"
+# type: SH
+#: doc/apt-proxy-import.8:58
+#, no-wrap
+msgid "IMPORTING APT-MOVE CACHE"
+msgstr "IMPORTER LE CACHE D'APT-MOVE"
+
# type: Plain text
-#: doc/apt-proxy-import.8.inc:27
+#: doc/apt-proxy-import.8:60
msgid ""
-"[IMPORTING APT-MOVE CACHE] You can import the apt-move generated cache into "
-"apt-proxy using the following command:"
+"You can import the apt-move generated cache into apt-proxy using the "
+"following command:"
msgstr ""
-"[IMPORTER LE CACHE D'APT-MOVE] Vous pouvez importer le cache généré par apt-"
-"move dans apt-proxy en utilisant la commande suivante:"
+"Vous pouvez importer le cache généré par apt-move dans apt-proxy en "
+"utilisant la commande suivante :"
# type: Plain text
-#: doc/apt-proxy-import.8.inc:30
+#: doc/apt-proxy-import.8:63
#, no-wrap
msgid " apt-proxy-import -r -i /var/cache/apt-move\n"
msgstr " apt-proxy-import -r -i /var/cache/apt-move\n"
# type: Plain text
-#: doc/apt-proxy-import.8.inc:33
+#: doc/apt-proxy-import.8:66
msgid ""
"This tells apt-proxy-import to recuse over each directory in the apt-move "
"cache."
@@ -424,44 +546,36 @@
"dans le cache d'apt-move."
# type: Plain text
-#: doc/apt-proxy-import.8.inc:36
-#, fuzzy
-msgid "[FILES] \\ /etc/apt-proxy/apt-proxy\\&.conf"
-msgstr "[FICHIERS] /etc/apt\\-proxy/apt\\-proxy\\&.conf"
-
-# type: SH
-#: doc/apt-proxy-import.8.inc:38
-msgid "[SEE ALSO]"
-msgstr "VOIR AUSSI"
+#: doc/apt-proxy-import.8:68
+msgid "\\ /etc/apt-proxy/apt-proxy\\&.conf"
+msgstr "\\ /etc/apt-proxy/apt-proxy.conf"
# type: Plain text
-#: doc/apt-proxy-import.8.inc:48
+#: doc/apt-proxy-import.8:71
msgid ""
-"[BUGS] apt-proxy-import does not use I<max_age> or I<max_versions> to clean "
-"the cache directory on import."
+"apt-proxy-import does not use I<max_age> or I<max_versions> to clean the "
+"cache directory on import."
msgstr ""
-"[BOGUES] apt-proxy-import n'utilise pas les options I<max_age> ou "
-"I<max_versions> pour purger le répertoire cache pendant l'import."
+"apt-proxy-import n'utilise pas les options I<max_age> ou I<max_versions> "
+"pour purger le répertoire cache pendant l'import."
# type: Plain text
-#: doc/apt-proxy-import.8.inc:50
+#: doc/apt-proxy-import.8:73
msgid "It does not yet import source.tar.gz or Packages files."
msgstr "Il n'importe pas encore les fichiers source.tar.gz ou Packages."
# type: Plain text
-#: doc/apt-proxy-import.8.inc:52
+#: doc/apt-proxy-import.8:75
msgid "You must run it as the apt-proxy user or as root."
-msgstr ""
-"Vous devez le lancer en tant que l'utilisateur d'apt-proxy ou en tant que "
-"root."
+msgstr "Vous devez le lancer en tant qu'utilisateur apt-proxy ou root."
# type: Plain text
-#: doc/apt-proxy-import.8.inc:54
+#: doc/apt-proxy-import.8:77
msgid ""
-"[AUTHORS] Chris Halls E<lt>halls at debian.orgE<gt>, Manuel Estrada Sainz "
+"Chris Halls E<lt>halls at debian.orgE<gt>, Manuel Estrada Sainz "
"E<lt>ranty at debian.orgE<gt>"
msgstr ""
-"[AUTEURS] Chris Halls E<lt>halls at debian.orgE<gt>, Manuel Estrada Sainz "
+"Chris Halls E<lt>halls at debian.orgE<gt>, Manuel Estrada Sainz "
"E<lt>ranty at debian.orgE<gt>"
# type: TH
@@ -469,157 +583,184 @@
#: doc/apt-proxy.conf.5:2
#, no-wrap
msgid "apt-proxy\\&.conf"
-msgstr "apt\\-proxy\\&.conf"
+msgstr "apt-proxy.conf"
# type: TH
#. Man page copied from apt.conf man page.
#: doc/apt-proxy.conf.5:2
-#, no-wrap
-msgid "21 Nov 2002"
-msgstr "21 Novembre 2002"
+#, fuzzy, no-wrap
+msgid "5 Jan 2006"
+msgstr "15 jul 2005"
# type: Plain text
#: doc/apt-proxy.conf.5:5
msgid "apt-proxy\\&.conf - configuration file for apt-proxy"
-msgstr "apt-proxy\\&.conf - fichier de configuration pour apt-proxy"
+msgstr "apt-proxy.conf - fichier de configuration pour apt-proxy"
# type: Plain text
#: doc/apt-proxy.conf.5:9
+#, fuzzy
msgid ""
"B<apt-proxy\\&.conf> is the configuration file for apt-proxy. When apt-"
-"proxy starts up, it will read B</etc/apt-proxy/apt-proxy\\&.conf>\\&."
+"proxy starts up, it will read I</etc/apt-proxy/apt-proxy\\&.conf>\\&."
msgstr ""
-"B<apt-proxy\\&.conf>·est le fichier de configuration pour apt-proxy. Au "
-"démarrage d'apt-proxy, il lira le fichier B</etc/apt-proxy/apt-proxy\\&.conf>"
-"\\&."
+"B<apt-proxy.conf> est le fichier de configuration pour apt-proxy. Au "
+"démarrage d'apt-proxy, il lira le fichier B</etc/apt-proxy/apt-proxy.conf>."
# type: Plain text
#: doc/apt-proxy.conf.5:12
+#, fuzzy
msgid ""
-"B</etc/apt-proxy/apt-proxy-v2\\&.conf> will be read instead if it exists to "
+"I</etc/apt-proxy/apt-proxy-v2\\&.conf> will be read instead if it exists to "
"make upgrading from v1 easier."
msgstr ""
-"B</etc/apt-proxy/apt-proxy-v2\\&.conf> sera lu à la place s'il existe afin "
-"de rendre la mise à jour depuis la v1 mieux transparente."
+"B</etc/apt-proxy/apt-proxy-v2.conf> sera lu à la place s'il existe afin de "
+"rendre la mise à jour depuis la version 1 plus transparente."
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:15
+msgid ""
+"The configuration file is divided up into several sections, where each I<"
+"[resource]> section defines a separate resource. The B<DEFAULT> section "
+"applies to all resources."
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:19
+#, fuzzy
+msgid ""
+"The supplied I<apt-proxy\\&.conf> will work out of the box, but it is best "
+"to change the backends you use to a mirror closer to you. There are some in "
+"the default file, and it may be enough just to reorder the lines in the file"
+"\\&."
+msgstr ""
+"Le fichier apt-proxy.conf fourni devrait fonctionner tel quel, mais il est "
+"suggéré de choisir un miroir plus proche de vous. Certains sont dans le "
+"fichier apt-proxy.conf par défaut et il devrait suffire de réordonner les "
+"lignes dans ce fichier."
# type: SH
-#: doc/apt-proxy.conf.5:13
+#: doc/apt-proxy.conf.5:21
#, no-wrap
msgid "[DEFAULT]"
msgstr "[DÉFAUT]"
# type: Plain text
-#: doc/apt-proxy.conf.5:15
+#: doc/apt-proxy.conf.5:23
msgid "This section holds options global to the whole apt-proxy:"
-msgstr "Cette section contient les options globles à tout apt-proxy:"
+msgstr "Cette section contient les options globales d'apt-proxy :"
# type: TP
-#: doc/apt-proxy.conf.5:16
+#: doc/apt-proxy.conf.5:24
#, no-wrap
msgid "B<address>"
msgstr "B<address>"
# type: Plain text
-#: doc/apt-proxy.conf.5:20
+#: doc/apt-proxy.conf.5:28
msgid ""
"IP address on which apt-proxy will listen for requests. Multiple addresses "
"have a empty space between it."
msgstr ""
-"L'adresse IP sur laquelle apt-proxy sera à l'écoute des requêtes. Pour "
-"plusieurs adresses il faut avoir un espace entre elles."
+"L'adresse IP sur laquelle apt-proxy sera à l'écoute des requêtes. S'il y a "
+"plusieurs adresses, les séparer par un espace."
# type: TP
-#: doc/apt-proxy.conf.5:21
+#: doc/apt-proxy.conf.5:29
#, no-wrap
msgid "B<port>"
msgstr "B<port>"
# type: Plain text
-#: doc/apt-proxy.conf.5:24
+#: doc/apt-proxy.conf.5:32
msgid "TCP port on which apt-proxy will listen for requests."
msgstr "Le port TCP sur lequel apt-proxy sera à l'écoute des requêtes."
# type: TP
-#: doc/apt-proxy.conf.5:25
+#: doc/apt-proxy.conf.5:33
#, no-wrap
msgid "B<min_refresh_delay>"
msgstr "B<min_refresh_delay>"
# type: Plain text
-#: doc/apt-proxy.conf.5:30
+#: doc/apt-proxy.conf.5:38
msgid ""
"If different from B<off>, means that Packages and other control files will "
"not be refreshed more frequently than this number of seconds\\&."
msgstr ""
-"Si différent de B<off>, il signifie que les fichiers Packages et autres "
-"control ne seront pas rafraîchis plus fréquemment que ce nombre de secondes"
-"\\&."
+"Si différent de B<off>, cela signifie que les fichiers Packages et les "
+"autres fichiers de contrôle ne seront pas rafraîchis plus fréquemment que ce "
+"nombre de secondes."
# type: TP
-#: doc/apt-proxy.conf.5:31 doc/apt-proxy.conf.5:92
+#: doc/apt-proxy.conf.5:39 doc/apt-proxy.conf.5:96
#, no-wrap
msgid "B<timeout>"
msgstr "B<timeout>"
# type: Plain text
-#: doc/apt-proxy.conf.5:36
+#: doc/apt-proxy.conf.5:44
msgid ""
"Maximum I/O timeout in seconds for backend transfers. Default: 30 seconds. "
"If no response is received from a backend server in this time, apt-proxy "
"will try the next server in the list. Y"
msgstr ""
+"Délai d'I/O dépassé pour les transferts. Défaut : 30 secondes. Si aucune "
+"réponse n'est reçue du serveur dans ce laps de temps, apt-proxy essaiera le "
+"serveur suivant de la liste."
# type: TP
-#: doc/apt-proxy.conf.5:37
+#: doc/apt-proxy.conf.5:45
#, no-wrap
msgid "B<cache_dir>"
msgstr "B<cache_dir>"
# type: Plain text
-#: doc/apt-proxy.conf.5:40
+#: doc/apt-proxy.conf.5:48
msgid "Cache directory. Default: /var/cache/apt-proxy"
-msgstr "Répertoire de cache: Défaut: /var/cache/apt-proxy"
+msgstr "Répertoire de cache. Défaut : /var/cache/apt-proxy"
# type: TP
-#: doc/apt-proxy.conf.5:41
+#: doc/apt-proxy.conf.5:49
#, no-wrap
msgid "B<cleanup_freq>"
msgstr "B<cleanup_freq>"
# type: Plain text
-#: doc/apt-proxy.conf.5:46
+#: doc/apt-proxy.conf.5:54
msgid ""
"If different from B<off>, indicates the time between housekeeping attempts: "
"delete files that have not been accessed in max_age, scan cache directories "
"and update internal tables, ..."
msgstr ""
-"Si différent de B<off>, il indique la période entre les tentatives de "
-"nettoyage: les fichiers effacés qui n'ont pas été accédés depuis max_age, "
-"les scan de répertoires de cache et les mises à jour des tables internes, ..."
+"Si différent de B<off>, cela indique la période entre les tentatives de "
+"nettoyage : suppression des fichiers qui n'ont pas été accédés depuis "
+"max_age, analyse des répertoires de cache, mise à jour des tables "
+"internes, ..."
# type: TP
-#: doc/apt-proxy.conf.5:47
+#: doc/apt-proxy.conf.5:55
#, no-wrap
msgid "B<max_age>"
msgstr "B<max_age>"
# type: Plain text
-#: doc/apt-proxy.conf.5:51
+#: doc/apt-proxy.conf.5:59
msgid ""
"If different from B<off>, indicates the maximum age of files before deletion "
"from the cache."
msgstr ""
-"Si différent de B<off>, il indique l'âge maximal des fichiers avant "
+"Si différent de B<off>, cela indique l'âge maximal des fichiers avant leur "
"effacement du cache."
# type: TP
-#: doc/apt-proxy.conf.5:52
+#: doc/apt-proxy.conf.5:60
#, no-wrap
msgid "B<max_versions>"
msgstr "B<max_versions>"
# type: Plain text
-#: doc/apt-proxy.conf.5:59
+#: doc/apt-proxy.conf.5:67
msgid ""
"If different from B<off>, indicates the maximum number of versions of a \\&."
"deb to keep. This is the number of versions per distribution, for example "
@@ -627,66 +768,65 @@
"kept: the last 2 stable versions, the last 2 testing versions and the last 2 "
"unstable versions."
msgstr ""
-"Si différent de B<off>, il indique le nombre maximum de version d'un "
-"\\&paquet debian à conserver. Il y a un nombre de versions par "
-"distribution, par exemple mettre max_versions à 2 assurera qu'un maximum de "
-"6 paquets sera conservé: les deux dernières versions de stable, les deux "
-"dernières versions de testing et les deux dernières versions d'unstable."
+"Si différent de B<off>, cela indique le nombre maximum de version d'un "
+"paquet Debian à conserver. Il s'agit du nombre de versions par distribution. "
+"Par exemple, mettre max_versions à 2 assurera qu'un maximum de 6 paquets "
+"sera conservé : les deux dernières versions de stable, les deux dernières "
+"versions de testing et les deux dernières versions d'unstable."
# type: TP
-#: doc/apt-proxy.conf.5:60 doc/apt-proxy.conf.5:106
+#: doc/apt-proxy.conf.5:68 doc/apt-proxy.conf.5:111
#, no-wrap
msgid "B<passive_ftp>"
msgstr "B<passive_ftp>"
# type: Plain text
-#: doc/apt-proxy.conf.5:65
+#: doc/apt-proxy.conf.5:73
msgid ""
"Specify B<on> to use passive FTP, which works from behind a firewall, but "
"may not be supported on all servers. Specify B<off> to use active FTP "
"instead. Default: on"
msgstr ""
"Spécifiez B<on> pour utiliser le FTP passif, qui fonctionne à travers un "
-"firewall, mais n'est pas supporté sur tous les serveurs. Spécifiez B<off> "
-"pour utiliser le FTP actif à la place. Défaut; on"
+"pare-feu, mais n'est pas géré sur tous les serveurs. Spécifiez B<off> pour "
+"utiliser le FTP actif à la place. Défaut : on"
# type: TP
-#: doc/apt-proxy.conf.5:66
+#: doc/apt-proxy.conf.5:74
#, no-wrap
msgid "B<http_proxy>"
msgstr "B<http_proxy>"
# type: Plain text
-#: doc/apt-proxy.conf.5:69
+#: doc/apt-proxy.conf.5:77
msgid "Specify B<hostname:port> to use an upstream proxy."
-msgstr ""
-"Spécifiez B<nom d'hôte:numéro de port> pour utiliser un proxy en amont."
+msgstr "Indiquez B<nom d'hôte:port> pour utiliser un proxy amont."
# type: TP
-#: doc/apt-proxy.conf.5:70
+#: doc/apt-proxy.conf.5:78
#, no-wrap
msgid "B<dynamic_backends>"
msgstr "B<dynamic_backends>"
# type: Plain text
-#: doc/apt-proxy.conf.5:75
+#: doc/apt-proxy.conf.5:83
msgid ""
-"By default apt-proxy will add HTTP backends dynamicaly if not already "
+"By default apt-proxy will add HTTP backends dynamically if not already "
"defined. Specify B<off> to restrict the available backends to those listed "
"in the configuration file. Default: on"
msgstr ""
"Par défaut, apt-proxy ajoutera les dorsaux HTTP dynamiquement s'ils ne sont "
"pas déjà définis. Mettre à B<off> pour restreindre les dorsaux disponibles à "
-"ceux listés dans le fichier de configuration. Défaut: on"
+"ceux listés dans le fichier de configuration. Défaut : on"
# type: TP
-#: doc/apt-proxy.conf.5:76
+#: doc/apt-proxy.conf.5:84
#, no-wrap
msgid "B<disable_pipelining>"
msgstr "B<disable_pipelining>"
# type: Plain text
-#: doc/apt-proxy.conf.5:82
+#: doc/apt-proxy.conf.5:90
msgid ""
"apt-proxy can use HTTP pipelining to fetch several files at once (up to 10), "
"but this can generate multiple connections to each backend server. "
@@ -695,85 +835,97 @@
msgstr ""
"apt-proxy peut utiliser la canalisation HTTP pour récupérer plusieurs "
"fichiers en une fois (jusqu'à 10), mais ceci génère de multiples connexions "
-"sur chaque dorsaux. La canalisation est désactivée par défaut jusqu'à ce "
-"qu'elle soit fixée. Mettre à B<0> pour activer la canalisation "
-"expérimentale HTTP. Défaut: 1"
+"sur chaque dorsal. La canalisation est désactivée par défaut jusqu'à ce que "
+"ce problème soit corrigé. Mettre à B<0> pour activer la canalisation "
+"expérimentale HTTP. Défaut : 1"
# type: SH
-#: doc/apt-proxy.conf.5:84
+#: doc/apt-proxy.conf.5:92
#, no-wrap
-msgid "BACKENDS"
-msgstr "DORSAUX"
-
-# type: Plain text
-#: doc/apt-proxy.conf.5:87
-msgid ""
-"All other sections will be interpreted as backend names, and the options "
-"specified within are local to the backend."
+msgid "RESOURCES"
msgstr ""
-"Toutes les autres sections devront être interprétées comme des noms de "
-"miroirs, et les options spécifiées avec seront locales à ce miroir."
# type: Plain text
-#: doc/apt-proxy.conf.5:91
+#: doc/apt-proxy.conf.5:95
+#, fuzzy
msgid ""
-"The supplied apt-proxy\\&.conf will work out of the box, but I suggest you "
-"look for a mirror closer to you\\&. There are some in the default apt-proxy"
-"\\&.conf and it may be enough just to reorder the lines in the file\\&."
+"All other sections in the configuration file will be interpreted as resource "
+"names. The options in the section apply to this resource only."
msgstr ""
-"Le fichier apt-proxy\\&.conf fourni devrait fonctionner tel quel, mais je "
-"vous suggère de choisir un miroir plus proche de vous\\&. Nombreux sont "
-"dans le fichier apt-proxy.conf par défaut et il devrait être suffisant de ré-"
-"ordonner les lignes dans ce fichier\\&."
+"Toutes les autres sections devront être interprétées comme des noms de "
+"miroir, et les options qui y seront spécifiées seront spécifiques à ce "
+"miroir."
# type: Plain text
-#: doc/apt-proxy.conf.5:95
+#: doc/apt-proxy.conf.5:99
msgid "Overrides the global timeout"
-msgstr "Surcharger le temps de réponse global"
+msgstr "Supplanter le temps global d'expiration"
# type: TP
-#: doc/apt-proxy.conf.5:96
-#, no-wrap
-msgid "B<backends = E<lt>protocolE<gt>://E<lt>serverE<gt>/E<lt>directoryE<gt>>"
+#: doc/apt-proxy.conf.5:100
+#, fuzzy, no-wrap
+msgid "B<backends = >I<E<lt>protocolE<gt>>B<://>I<E<lt>serverE<gt>>B</>I<E<lt>directoryE<gt>>B< [...]>"
msgstr "B<miroirs = E<lt>protocoleE<gt>://E<lt>serveurE<gt>/E<lt>répertoireE<gt>>"
# type: Plain text
-#: doc/apt-proxy.conf.5:99
-msgid "A list of backend URLs\\&."
-msgstr "Une liste d'URLs de dorsaux\\&."
+#: doc/apt-proxy.conf.5:103
+msgid ""
+"A list one or more URLs referring to servers which hold debian packages\\&."
+msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:101
-msgid "Protocol - internet protocol to use: http, ftp or rsync"
-msgstr "Protocole - protocole à utiliser: http, ftp ou rsync"
+#: doc/apt-proxy.conf.5:106
+#, fuzzy
+msgid "I<protocol>: internet protocol to use: http, ftp or rsync"
+msgstr "Protocole - protocole à utiliser : http, ftp ou rsync"
# type: Plain text
-#: doc/apt-proxy.conf.5:103
-msgid "Server - hostname of the backend server to contact"
-msgstr "Serveur - nom d'hote du miroir à contacter"
+#: doc/apt-proxy.conf.5:108
+#, fuzzy
+msgid "I<server>: hostname of the backend server to contact"
+msgstr "Serveur - nom d'hôte du miroir à contacter"
# type: Plain text
-#: doc/apt-proxy.conf.5:105
-msgid "Directory - directory name to prepend requests to for this server"
-msgstr ""
-"Répertoire - nom du répertoire où ajouter des demandes au début pour ce "
-"serveur"
+#: doc/apt-proxy.conf.5:110
+#, fuzzy
+msgid "I<directory>: directory name to prepend requests to for this server"
+msgstr "Répertoire - nom du répertoire où ajouter des demandes pour ce serveur"
# type: Plain text
-#: doc/apt-proxy.conf.5:109
+#: doc/apt-proxy.conf.5:114
msgid "Override the global setting of passive_ftp"
-msgstr "Surcharger la directive globale de passive_ftp"
+msgstr "Supplanter la configuration globale de passive_ftp"
+
+# type: SH
+#: doc/apt-proxy.conf.5:115
+#, fuzzy, no-wrap
+msgid "CONFIGURATION EXAMPLES"
+msgstr "CONFIGURATION DU CLIENT"
# type: Plain text
-#: doc/apt-proxy.conf.5:113
+#: doc/apt-proxy.conf.5:120
+msgid ""
+"To access a resource that's listed under a specific section name, simply "
+"append the section name (without the brackets) to the end of your deb source "
+"line in /etc/apt/sources.list"
+msgstr ""
+
+# type: TH
+#: doc/apt-proxy.conf.5:122
+#, fuzzy
+msgid "B<Debian main>"
+msgstr "Debian GNU/Linux"
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:125
msgid ""
"This example shows how to give clients access to the main Debian archive:"
msgstr ""
-"Cet exemple montre comment donner aux clients accès à l'archive debian "
-"principale:"
+"Cet exemple montre comment donner aux clients l'accès à l'archive Debian "
+"principale :"
# type: Plain text
-#: doc/apt-proxy.conf.5:117
+#: doc/apt-proxy.conf.5:129
#, no-wrap
msgid ""
"[debian]\n"
@@ -781,138 +933,157 @@
" http://ftp.de.debian.org/debian/\n"
msgstr ""
"[debian]\n"
-"backends·=·http://ftp.us.debian.org/debian/\n"
-" http://ftp.de.debian.org/debian/\n"
+"backends = http://ftp.us.debian.org/debian/\n"
+" http://ftp.fr.debian.org/debian/\n"
# type: Plain text
-#: doc/apt-proxy.conf.5:122
+#: doc/apt-proxy.conf.5:134
+#, fuzzy
msgid ""
-"Using this configuration, the client would use a B<sources.list> entry like:"
+"Using this configuration, the client would use a I<sources.list> entry like:"
msgstr ""
"En utilisant cette configuration, le client utilisera une entrée B<sources."
-"list> comme ceci:"
+"list> comme ceci :"
# type: Plain text
-#: doc/apt-proxy.conf.5:125
-#, no-wrap
-msgid " deb http://server:9999/debian/ woody main\n"
-msgstr " deb http://SERVER:9999/debian/ woody main\n"
+#: doc/apt-proxy.conf.5:137
+#, fuzzy, no-wrap
+msgid " deb http://server:9999/debian woody main\n"
+msgstr " deb http://SERVEUR:9999/debian/ woody main\n"
# type: Plain text
-#: doc/apt-proxy.conf.5:130
-#, fuzzy
+#: doc/apt-proxy.conf.5:142
msgid ""
"And so the file request `/debian/woody/main/binary-i386/x11/foo_1-1.deb' "
"would turn into a backend request of first"
msgstr ""
-"Et ainsi la demande du fichier `/debian/woody/main/binary-i386/x11/foo_1-1."
-"deb' sera convertie en une demande prioritaire"
+"Ainsi, la demande du fichier « /debian/woody/main/binary-i386/x11/foo_1-1."
+"deb » sera convertie en une demande d'abord de"
# type: Plain text
-#: doc/apt-proxy.conf.5:133
-#, fuzzy, no-wrap
+#: doc/apt-proxy.conf.5:145
+#, no-wrap
msgid " `http://ftp.us.debian.org/debian/woody/main/binary-i386/x11/foo_1-1.deb'\n"
-msgstr ""
-" `http://ftp.us.debian.org/debian/woody/main/binary-i386/x11/foo_1-1.deb'\n"
-" \n"
+msgstr " « http://ftp.us.debian.org/debian/woody/main/binary-i386/x11/foo_1-1.deb »\n"
# type: Plain text
-#: doc/apt-proxy.conf.5:136
+#: doc/apt-proxy.conf.5:148
msgid "and if that failed,"
-msgstr "et s'il a échoué,"
+msgstr "et, en cas d'échec, de"
# type: Plain text
-#: doc/apt-proxy.conf.5:139
-#, fuzzy, no-wrap
+#: doc/apt-proxy.conf.5:151
+#, no-wrap
msgid " `http://ftp.de.debian.org/debian/woody/main/binary-i386/x11/foo_1-1.deb'\n"
+msgstr " « http://ftp.fr.debian.org/debian/woody/main/binary-i386/x11/foo_1-1.deb »\n"
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:154
+msgid "and apt-proxy will place the downloaded package in"
msgstr ""
-" `http://ftp.de.debian.org/debian/woody/main/binary-i386/x11/foo_1-1.deb'\n"
-" \n"
# type: Plain text
-#: doc/apt-proxy.conf.5:142
+#: doc/apt-proxy.conf.5:156
#, no-wrap
-msgid "and the file would be placed in\n"
-msgstr "et le fichier devrait être placé dans\n"
+msgid " `/var/cache/apt-proxy/debian/debian/woody/main/binary-i386/x11/foo_1-1.deb'\\&.\n"
+msgstr " « /var/cache/apt-proxy/debian/debian/woody/main/binary-i386/x11/foo_1-1.deb ».\n"
+
+# type: TP
+#: doc/apt-proxy.conf.5:159
+#, fuzzy
+msgid "B<backports.org>"
+msgstr "B<port>"
# type: Plain text
-#: doc/apt-proxy.conf.5:144
+#: doc/apt-proxy.conf.5:161
+msgid "The backports.org website tells you to use this I<sources.list> line:"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:164
#, no-wrap
-msgid " `/var/cache/apt-proxy/debian/debian/woody/main/binary-i386/x11/foo_1-1.deb'\\&.\n"
-msgstr "····`/var/cache/apt-proxy/debian/debian/woody/main/binary-i386/x11/foo_1-1.deb'\\&.\n"
+msgid " deb http://www.backports.org/debian sarge-backports main\n"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:168
+msgid ""
+"You can add this to apt-proxy by creating a new section in I<apt-proxy\\&."
+"conf>\\&. In the new section, add a backends entry for the URL:"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:172
+#, no-wrap
+msgid ""
+" [backports]\n"
+" backends = http://www.backports.org/debian\n"
+msgstr ""
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:177
+msgid ""
+"On the clients, replace the URL with one pointing to the apt-proxy resource "
+"name, in the form I<http://hostname:port/backend>. If your apt-proxy "
+"hostname is I<proxy> and it is running on port 9999, you would write:"
+msgstr ""
# type: Plain text
-#: doc/apt-proxy.conf.5:147
+#: doc/apt-proxy.conf.5:180
#, fuzzy, no-wrap
-msgid "For many more examples, see the supplied /etc/apt-proxy/apt-proxy\\&.conf\\&.\n"
-msgstr "Pour d'autres exemples, voir le fichier /etc/apt-proxy/apt-proxy\\&.conf\\& fourni."
+msgid " deb http://proxy:9999/backports sarge-backports main\n"
+msgstr " deb http://SERVEUR:9999/debian/ woody main\n"
# type: Plain text
-#: doc/apt-proxy.conf.5:158
+#: doc/apt-proxy.conf.5:185
+#, fuzzy
+msgid ""
+"For many more examples, see the supplied /etc/apt-proxy/apt-proxy\\&.conf\\&."
+msgstr ""
+"Pour d'autres exemples, voir le fichier /etc/apt-proxy/apt-proxy.conf.\n"
+
+# type: Plain text
+#: doc/apt-proxy.conf.5:196
msgid ""
"B<apt-proxy(8),> B</usr/share/doc/apt-proxy/README,> B<apt-proxy-import(8)>"
msgstr ""
"B<apt-proxy>(8), B</usr/share/doc/apt-proxy/README,>B<apt-proxy-import>(8)"
# type: Plain text
-#: doc/apt-proxy.conf.5:163
+#: doc/apt-proxy.conf.5:201
msgid "Plenty sure. Please report."
-msgstr "Totalement sûr. Prière de rapporter."
+msgstr "Certainement. Veuillez nous en faire part."
# type: SH
-#: doc/apt-proxy.conf.5:164
+#: doc/apt-proxy.conf.5:202
#, no-wrap
msgid "AUTHOR"
msgstr "AUTEUR"
# type: Plain text
-#: doc/apt-proxy.conf.5:165
+#: doc/apt-proxy.conf.5:203
msgid ""
"apt-proxy v2 was written by Manuel Estrada Sainz E<lt>ranty at debian.orgE<gt>"
"\\&."
msgstr ""
-"apt\\-proxy·v2 a été écrit par Manuel Estrada Sainz E<lt>ranty at debian."
-"orgE<gt>\\&."
+"apt-proxy v2 a été écrit par Manuel Estrada Sainz E<lt>ranty at debian.orgE<gt>."
# type: TH
-#~ msgid "03 Dec 2004"
-#~ msgstr "03 Décembre 2004"
+#~ msgid "October 2005"
+#~ msgstr "Octobre 2005"
-# type: Plain text
-#, fuzzy
-#~ msgid ""
-#~ "deb http://SERVER:9999/debian-non-US stable/non-US main contrib non-free\n"
-#~ "deb-src http://SERVER:9999/debian-non-US stable/non-US main contrib non-free\n"
-#~ msgstr ""
-#~ "deb http://SERVER:9999/main stable main contrib non\\-free\n"
-#~ "deb\\-src http://SERVER:9999/main stable main contrib non\\-free\n"
-
-# type: Plain text
-#~ msgid "Maximum I/O timeout in seconds for backend transfers."
-#~ msgstr ""
-#~ "Temps de réponse maximal des E/S en secondes pour les transferts dorsaux."
-
-# type: Plain text
-#~ msgid "deb http://SERVER:9999/helixcode/ woody main\n"
-#~ msgstr "deb http://SERVER:9999/helixcode/ woody main\n"
-
-# type: Plain text
-#~ msgid ""
-#~ "NOTE: v2 doesn't officially support rsync backends, so for now the above "
-#~ "does not apply."
-#~ msgstr ""
-#~ "NOTE : la seconde version ne gère pas officiellement rsync, donc, ce "
-#~ "qui suit ne s'applique pas."
+# type: TH
+#~ msgid "21 Nov 2002"
+#~ msgstr "21 novembre 2002"
-# type: Plain text
-#~ msgid "\n"
-#~ msgstr "\n"
+# type: SH
+#~ msgid "BACKENDS"
+#~ msgstr "DORSAUX"
# type: Plain text
-#~ msgid "/etc/apt\\-proxy/apt\\-proxy\\&.conf"
-#~ msgstr "/etc/apt\\-proxy/apt\\-proxy\\&.conf"
+#~ msgid "A list of backend URLs\\&."
+#~ msgstr "Une liste de liens de dorsaux."
# type: Plain text
-#, fuzzy
-#~ msgid "B<apt\\-proxy>(8), B<apt\\-proxy.conf>(5)"
-#~ msgstr "/etc/apt\\-proxy/apt\\-proxy\\&.conf"
+#~ msgid "and the file would be placed in\n"
+#~ msgstr "et le fichier devrait être placé dans\n"
Modified: trunk/doc/po4a.cfg
==============================================================================
--- trunk/doc/po4a.cfg (original)
+++ trunk/doc/po4a.cfg Thu Aug 3 23:54:46 2006
@@ -1,5 +1,5 @@
[po4a_paths] doc/po/apt-proxy.pot fr:doc/po/fr.po
[type: man] doc/apt-proxy.8 fr:doc/apt-proxy.fr.8 add_fr:doc/apt-proxy.add.fr
-[type: man] doc/apt-proxy.conf.5
-[type: man] doc/apt-proxy-import.8.inc
-[type: man] doc/apt-proxy-v1tov2.8
+[type: man] doc/apt-proxy.conf.5 fr:doc/apt-proxy.conf.fr.5 add_fr:doc/apt-proxy.add.fr
+[type: man] doc/apt-proxy-import.8 fr:doc/apt-proxy-import.fr.8 add_fr:doc/apt-proxy.add.fr
+[type: man] doc/apt-proxy-v1tov2.8 fr:doc/apt-proxy-v1tov2.fr.8 add_fr:doc/apt-proxy.add.fr
Modified: trunk/runtests
==============================================================================
--- trunk/runtests (original)
+++ trunk/runtests Thu Aug 3 23:54:46 2006
@@ -2,21 +2,81 @@
set -e
-make_packages_dir()
+testfiles="packages/Packages packages/Packages.gz packages/Packages.bz2
+ verify/invalid-gzip.gz
+ packages/apt_0.0.1_test.deb packages/empty.txt
+ apt/apt_0.0.1_test.deb apt/apt_0.0.2_test.deb apt/apt_0.0.3_test.deb"
+rootdir="$(cd $(dirname $0); pwd)"
+testdir="$rootdir/test_data"
+
+make_pkg()
+{
+ dir=$1
+ version=$2
+ pkgdir=${dir}_${version}_test
+ cp -ra $dir $pkgdir
+ sed "s/^Version: .*/Version: ${version}/" < $dir/DEBIAN/control > $pkgdir/DEBIAN/control
+ dpkg --build $pkgdir
+ rm -r $pkgdir
+}
+
+make_test_data()
{
- echo Creating test data
- mkdir -p test_data/packages
- cd test_data/packages
- echo Creating apt package from system
- fakeroot -u dpkg-repack apt
+ mkdir -p $testdir
+ cd $testdir
+ make_data=
+ for f in $testfiles; do
+ if [ ! -f "$f" ]; then
+ make_data=1
+ break
+ fi
+ done
+
+ if [ -z "$make_data" ]; then
+ return
+ fi
+
+ echo "Creating test data"
+ [ ! -d $testdir/apt ] || rm -r $testdir/apt
+ mkdir -p $testdir/apt
+ cd $testdir/apt
+ echo Creating apt packages from system
+ fakeroot -u dpkg-repack --generate apt
+ mv dpkg-repack* apt
+ make_pkg apt 0.0.1
+ make_pkg apt 0.0.2
+ make_pkg apt 0.0.3
+ rm -r apt
+
+ echo Creating Packages file for apt directory
+ dpkg-scanpackages . /dev/null | tee Packages | gzip -c > Packages.gz
+ bzip2 -c < Packages > Packages.bz2
+ cd ..
+
+ [ ! -d $testdir/packages ] || rm -r $testdir/packages
+ mkdir -p $testdir/packages
+ cd $testdir/packages
+ cp ../apt/apt_0.0.1_test.deb .
echo Creating Packages file for package
dpkg-scanpackages . /dev/null | tee Packages | gzip -c > Packages.gz
+ bzip2 -c < Packages > Packages.bz2
+ touch empty.txt
+ cd ..
+
+ mkdir -p $testdir/verify
+ cd $testdir/verify
+ touch empty-unknown-file
+ echo "blah" > nonempty-unknown-file
+ gzip -c < nonempty-unknown-file > gzip.gz
+ cp nonempty-unknown-file invalid-gzip.gz
}
-[ -f test_data/packages/Packages ] || make_packages_dir
+make_test_data
-pwd
+cd $rootdir
if [ $# -eq 0 ]; then
set -- apt_proxy.test
fi
-PYTHONPATH="`pwd`" trial --verbose --logfile `pwd`/unitttests.log $@
+rm -f `pwd`/unittests.log
+set -x
+PYTHONPATH="`pwd`" trial --logfile `pwd`/unittests.log $@
More information about the apt-proxy-devel
mailing list