Ticket #1409: remove-contrib.darcs.patch

File remove-contrib.darcs.patch, 216.8 KB (added by zooko, at 2011-06-22T22:51:23Z)
Line 
11 patch for repository http://tahoe-lafs.org/source/tahoe-lafs/trunk:
2
3Wed Jun 22 16:47:55 MDT 2011  zooko@zooko.com
4  * contrib: remove the contributed fuse modules and the entire contrib/ directory, which is now empty
5  Also remove a couple of vestigial references to figleaf, which is long gone.
6  fixes #1409 (remove contrib/fuse)
7
8New patches:
9
10[contrib: remove the contributed fuse modules and the entire contrib/ directory, which is now empty
11zooko@zooko.com**20110622224755
12 Ignore-this: 85f78620f33ec01c8e93c795a3d04ef7
13 Also remove a couple of vestigial references to figleaf, which is long gone.
14 fixes #1409 (remove contrib/fuse)
15] {
16hunk ./contrib/fuse/impl_c/blackmatch.py 1
17-#!/usr/bin/env python
18-
19-#-----------------------------------------------------------------------------------------------
20-from allmydata.uri import CHKFileURI, DirectoryURI, LiteralFileURI, is_literal_file_uri
21-from allmydata.scripts.common_http import do_http as do_http_req
22-from allmydata.util.hashutil import tagged_hash
23-from allmydata.util.assertutil import precondition
24-from allmydata.util import base32, fileutil, observer
25-from allmydata.scripts.common import get_aliases
26-
27-from twisted.python import usage
28-from twisted.python.failure import Failure
29-from twisted.internet.protocol import Factory, Protocol
30-from twisted.internet import reactor, defer, task
31-from twisted.web import client
32-
33-import base64
34-import errno
35-import heapq
36-import sha
37-import socket
38-import stat
39-import subprocess
40-import sys
41-import os
42-import weakref
43-#import pprint
44-
45-# one needs either python-fuse to have been installed in sys.path, or
46-# suitable affordances to be made in the build or runtime environment
47-import fuse
48-
49-import time
50-import traceback
51-import simplejson
52-import urllib
53-
54-VERSIONSTR="0.7"
55-
56-USAGE = 'usage: tahoe fuse [dir_cap_name] [fuse_options] mountpoint'
57-DEFAULT_DIRECTORY_VALIDITY=26
58-
59-if not hasattr(fuse, '__version__'):
60-    raise RuntimeError, \
61-        "your fuse-py doesn't know of fuse.__version__, probably it's too old."
62-
63-fuse.fuse_python_api = (0, 2)
64-fuse.feature_assert('stateful_files', 'has_init')
65-
66-class TahoeFuseOptions(usage.Options):
67-    optParameters = [
68-        ["node-directory", None, "~/.tahoe",
69-         "Look here to find out which Tahoe node should be used for all "
70-         "operations. The directory should either contain a full Tahoe node, "
71-         "or a file named node.url which points to some other Tahoe node. "
72-         "It should also contain a file named private/aliases which contains "
73-         "the mapping from alias name to root dirnode URI."
74-         ],
75-        ["node-url", None, None,
76-         "URL of the tahoe node to use, a URL like \"http://127.0.0.1:3456\". "
77-         "This overrides the URL found in the --node-directory ."],
78-        ["alias", None, None,
79-         "Which alias should be mounted."],
80-        ["root-uri", None, None,
81-         "Which root directory uri should be mounted."],
82-        ["cache-timeout", None, 20,
83-         "Time, in seconds, to cache directory data."],
84-        ]
85-    optFlags = [
86-        ['no-split', None,
87-         'run stand-alone; no splitting into client and server'],
88-        ['server', None,
89-         'server mode (should not be used by end users)'],
90-        ['server-shutdown', None,
91-         'shutdown server (should not be used by end users)'],
92-         ]
93-
94-    def __init__(self):
95-        usage.Options.__init__(self)
96-        self.fuse_options = []
97-        self.mountpoint = None
98-
99-    def opt_option(self, fuse_option):
100-        """
101-        Pass mount options directly to fuse.  See below.
102-        """
103-        self.fuse_options.append(fuse_option)
104-       
105-    opt_o = opt_option
106-
107-    def parseArgs(self, mountpoint=''):
108-        self.mountpoint = mountpoint
109-
110-    def getSynopsis(self):
111-        return "%s [options] mountpoint" % (os.path.basename(sys.argv[0]),)
112-
113-logfile = file('tfuse.log', 'ab')
114-
115-def reopen_logfile(fname):
116-    global logfile
117-    log('switching to %s' % (fname,))
118-    logfile.close()
119-    logfile = file(fname, 'ab')
120-
121-def log(msg):
122-    logfile.write("%s: %s\n" % (time.asctime(), msg))
123-    #time.sleep(0.1)
124-    logfile.flush()
125-
126-fuse.flog = log
127-
128-def unicode_to_utf8_or_str(u):
129-    if isinstance(u, unicode):
130-        return u.encode('utf-8')
131-    else:
132-        precondition(isinstance(u, str), repr(u))
133-        return u
134-
135-def do_http(method, url, body=''):
136-    resp = do_http_req(method, url, body)
137-    log('do_http(%s, %s) -> %s, %s' % (method, url, resp.status, resp.reason))
138-    if resp.status not in (200, 201):
139-        raise RuntimeError('http response (%s, %s)' % (resp.status, resp.reason))
140-    else:
141-        return resp.read()
142-
143-def flag2mode(flags):
144-    log('flag2mode(%r)' % (flags,))
145-    #md = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
146-    md = {os.O_RDONLY: 'rb', os.O_WRONLY: 'wb', os.O_RDWR: 'w+b'}
147-    m = md[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
148-
149-    if flags & os.O_APPEND:
150-        m = m.replace('w', 'a', 1)
151-
152-    return m
153-
154-class TFSIOError(IOError):
155-    pass
156-
157-class ENOENT(TFSIOError):
158-    def __init__(self, msg):
159-        TFSIOError.__init__(self, errno.ENOENT, msg)
160-
161-class EINVAL(TFSIOError):
162-    def __init__(self, msg):
163-        TFSIOError.__init__(self, errno.EINVAL, msg)
164-
165-class EACCESS(TFSIOError):
166-    def __init__(self, msg):
167-        TFSIOError.__init__(self, errno.EACCESS, msg)
168-
169-class EEXIST(TFSIOError):
170-    def __init__(self, msg):
171-        TFSIOError.__init__(self, errno.EEXIST, msg)
172-
173-class EIO(TFSIOError):
174-    def __init__(self, msg):
175-        TFSIOError.__init__(self, errno.EIO, msg)
176-
177-def logargsretexc(meth):
178-    def inner_logargsretexc(self, *args, **kwargs):
179-        log("%s(%r, %r)" % (meth, args, kwargs))
180-        try:
181-            ret = meth(self, *args, **kwargs)
182-        except:
183-            log('exception:\n%s' % (traceback.format_exc(),))
184-            raise
185-        log("ret: %r" % (ret, ))
186-        return ret
187-    inner_logargsretexc.__name__ = '<logwrap(%s)>' % (meth,)
188-    return inner_logargsretexc
189-
190-def logexc(meth):
191-    def inner_logexc(self, *args, **kwargs):
192-        try:
193-            ret = meth(self, *args, **kwargs)
194-        except TFSIOError, tie:
195-            log('error: %s' % (tie,))
196-            raise
197-        except:
198-            log('exception:\n%s' % (traceback.format_exc(),))
199-            raise
200-        return ret
201-    inner_logexc.__name__ = '<logwrap(%s)>' % (meth,)
202-    return inner_logexc
203-
204-def log_exc():
205-    log('exception:\n%s' % (traceback.format_exc(),))
206-
207-def repr_mode(mode=None):
208-    if mode is None:
209-        return 'none'
210-    fields = ['S_ENFMT', 'S_IFBLK', 'S_IFCHR', 'S_IFDIR', 'S_IFIFO', 'S_IFLNK', 'S_IFREG', 'S_IFSOCK', 'S_IRGRP', 'S_IROTH', 'S_IRUSR', 'S_IRWXG', 'S_IRWXO', 'S_IRWXU', 'S_ISGID', 'S_ISUID', 'S_ISVTX', 'S_IWGRP', 'S_IWOTH', 'S_IWUSR', 'S_IXGRP', 'S_IXOTH', 'S_IXUSR']
211-    ret = []
212-    for field in fields:
213-        fval = getattr(stat, field)
214-        if (mode & fval) == fval:
215-            ret.append(field)
216-    return '|'.join(ret)
217-
218-def repr_flags(flags=None):
219-    if flags is None:
220-        return 'none'
221-    fields = [ 'O_APPEND', 'O_CREAT', 'O_DIRECT', 'O_DIRECTORY', 'O_EXCL', 'O_EXLOCK',
222-               'O_LARGEFILE', 'O_NDELAY', 'O_NOCTTY', 'O_NOFOLLOW', 'O_NONBLOCK', 'O_RDWR',
223-               'O_SHLOCK', 'O_SYNC', 'O_TRUNC', 'O_WRONLY', ]
224-    ret = []
225-    for field in fields:
226-        fval = getattr(os, field, None)
227-        if fval is not None and (flags & fval) == fval:
228-            ret.append(field)
229-    if not ret:
230-        ret = ['O_RDONLY']
231-    return '|'.join(ret)
232-
233-class DownloaderWithReadQueue(object):
234-    def __init__(self):
235-        self.read_heap = []
236-        self.dest_file_name = None
237-        self.running = False
238-        self.done_observer = observer.OneShotObserverList()
239-
240-    def __repr__(self):
241-        name = self.dest_file_name is None and '<none>' or os.path.basename(self.dest_file_name)
242-        return "<DWRQ(%s)> q(%s)" % (name, len(self.read_heap or []))
243-
244-    def log(self, msg):
245-        log("%r: %s" % (self, msg))
246-
247-    @logexc
248-    def start(self, url, dest_file_name, target_size, interval=0.5):
249-        self.log('start(%s, %s, %s)' % (url, dest_file_name, target_size, ))
250-        self.dest_file_name = dest_file_name
251-        file(self.dest_file_name, 'wb').close() # touch
252-        self.target_size = target_size
253-        self.log('start()')
254-        self.loop = task.LoopingCall(self._check_file_size)
255-        self.loop.start(interval)
256-        self.running = True
257-        d = client.downloadPage(url, self.dest_file_name)
258-        d.addCallbacks(self.done, self.fail)
259-        return d
260-
261-    def when_done(self):
262-        return self.done_observer.when_fired()
263-
264-    def get_size(self):
265-        if os.path.exists(self.dest_file_name):
266-            return os.path.getsize(self.dest_file_name)
267-        else:
268-            return 0
269-
270-    @logexc
271-    def _read(self, posn, size):
272-        #self.log('_read(%s, %s)' % (posn, size))
273-        f = file(self.dest_file_name, 'rb')
274-        f.seek(posn)
275-        data = f.read(size)
276-        f.close()
277-        return data
278-
279-    @logexc
280-    def read(self, posn, size):
281-        self.log('read(%s, %s)' % (posn, size))
282-        if self.read_heap is None:
283-            raise ValueError('read() called when already shut down')
284-        if posn+size > self.target_size:
285-            size -= self.target_size - posn
286-        fsize = self.get_size()
287-        if posn+size < fsize:
288-            return defer.succeed(self._read(posn, size))
289-        else:
290-            d = defer.Deferred()
291-            dread = (posn+size, posn, d)
292-            heapq.heappush(self.read_heap, dread)
293-        return d
294-
295-    @logexc
296-    def _check_file_size(self):
297-        #self.log('_check_file_size()')
298-        if self.read_heap:
299-            try:
300-                size = self.get_size()
301-                while self.read_heap and self.read_heap[0][0] <= size:
302-                    end, start, d = heapq.heappop(self.read_heap)
303-                    data = self._read(start, end-start)
304-                    d.callback(data)
305-            except Exception, e:
306-                log_exc()
307-                failure = Failure()
308-
309-    @logexc
310-    def fail(self, failure):
311-        self.log('fail(%s)' % (failure,))
312-        self.running = False
313-        if self.loop.running:
314-            self.loop.stop()
315-        # fail any reads still pending
316-        for end, start, d in self.read_heap:
317-            reactor.callLater(0, d.errback, failure)
318-        self.read_heap = None
319-        self.done_observer.fire_if_not_fired(failure)
320-        return failure
321-
322-    @logexc
323-    def done(self, result):
324-        self.log('done()')
325-        self.running = False
326-        if self.loop.running:
327-            self.loop.stop()
328-        precondition(self.get_size() == self.target_size, self.get_size(), self.target_size)
329-        self._check_file_size() # process anything left pending in heap
330-        precondition(not self.read_heap, self.read_heap, self.target_size, self.get_size())
331-        self.read_heap = None
332-        self.done_observer.fire_if_not_fired(self)
333-        return result
334-
335-
336-class TahoeFuseFile(object):
337-
338-    #def __init__(self, path, flags, *mode):
339-    def __init__(self, tfs, path, flags, *mode):
340-        log("TFF: __init__(%r, %r:%s, %r:%s)" % (path, flags, repr_flags(flags), mode, repr_mode(*mode)))
341-        self.tfs = tfs
342-        self.downloader = None
343-
344-        self._path = path # for tahoe put
345-        try:
346-            self.parent, self.name, self.fnode = self.tfs.get_parent_name_and_child(path)
347-            m = flag2mode(flags)
348-            log('TFF: flags2(mode) -> %s' % (m,))
349-            if m[0] in 'wa':
350-                # write
351-                self.fname = self.tfs.cache.tmp_file(os.urandom(20))
352-                if self.fnode is None:
353-                    log('TFF: [%s] open() for write: no file node, creating new File %s' % (self.name, self.fname, ))
354-                    self.fnode = File(0, LiteralFileURI.BASE_STRING)
355-                    self.fnode.tmp_fname = self.fname # XXX kill this
356-                    self.parent.add_child(self.name, self.fnode, {})
357-                elif hasattr(self.fnode, 'tmp_fname'):
358-                    self.fname = self.fnode.tmp_fname
359-                    log('TFF: [%s] open() for write: existing file node lists %s' % (self.name, self.fname, ))
360-                else:
361-                    log('TFF: [%s] open() for write: existing file node lists no tmp_file, using new %s' % (self.name, self.fname, ))
362-                if mode != (0600,):
363-                    log('TFF: [%s] changing mode %s(%s) to 0600' % (self.name, repr_mode(*mode), mode))
364-                    mode = (0600,)
365-                log('TFF: [%s] opening(%s) with flags %s(%s), mode %s(%s)' % (self.name, self.fname, repr_flags(flags|os.O_CREAT), flags|os.O_CREAT, repr_mode(*mode), mode))
366-                #self.file = os.fdopen(os.open(self.fname, flags|os.O_CREAT, *mode), m)
367-                self.file = os.fdopen(os.open(self.fname, flags|os.O_CREAT, *mode), m)
368-                self.fd = self.file.fileno()
369-                log('TFF: opened(%s) for write' % self.fname)
370-                self.open_for_write = True
371-            else:
372-                # read
373-                assert self.fnode is not None
374-                uri = self.fnode.get_uri()
375-
376-                # XXX make this go away
377-                if hasattr(self.fnode, 'tmp_fname'):
378-                    self.fname = self.fnode.tmp_fname
379-                    log('TFF: reopening(%s) for reading' % self.fname)
380-                else:
381-                    if is_literal_file_uri(uri) or not self.tfs.async:
382-                        log('TFF: synchronously fetching file from cache for reading')
383-                        self.fname = self.tfs.cache.get_file(uri)
384-                    else:
385-                        log('TFF: asynchronously fetching file from cache for reading')
386-                        self.fname, self.downloader = self.tfs.cache.async_get_file(uri)
387-                        # downloader is None if the cache already contains the file
388-                        if self.downloader is not None:
389-                            d = self.downloader.when_done()
390-                            def download_complete(junk):
391-                                # once the download is complete, revert to non-async behaviour
392-                                self.downloader = None
393-                            d.addCallback(download_complete)
394-
395-                self.file = os.fdopen(os.open(self.fname, flags, *mode), m)
396-                self.fd = self.file.fileno()
397-                self.open_for_write = False
398-                log('TFF: opened(%s) for read' % self.fname)
399-        except:
400-            log_exc()
401-            raise
402-
403-    def log(self, msg):
404-        log("<TFF(%s:%s)> %s" % (os.path.basename(self.fname), self.name, msg))
405-
406-    @logexc
407-    def read(self, size, offset):
408-        self.log('read(%r, %r)' % (size, offset, ))
409-        if self.downloader:
410-            # then we're busy doing an async download
411-            # (and hence implicitly, we're in an environment that supports twisted)
412-            #self.log('passing read() to %s' % (self.downloader, ))
413-            d = self.downloader.read(offset, size)
414-            def thunk(failure):
415-                raise EIO(str(failure))
416-            d.addErrback(thunk)
417-            return d
418-        else:
419-            self.log('servicing read() from %s' % (self.file, ))
420-            self.file.seek(offset)
421-            return self.file.read(size)
422-
423-    @logexc
424-    def write(self, buf, offset):
425-        self.log("write(-%s-, %r)" % (len(buf), offset))
426-        if not self.open_for_write:
427-            return -errno.EACCES
428-        self.file.seek(offset)
429-        self.file.write(buf)
430-        return len(buf)
431-
432-    @logexc
433-    def release(self, flags):
434-        self.log("release(%r)" % (flags,))
435-        self.file.close()
436-        if self.open_for_write:
437-            size = os.path.getsize(self.fname)
438-            self.fnode.size = size
439-            file_cap = self.tfs.upload(self.fname)
440-            self.fnode.ro_uri = file_cap
441-            # XXX [ ] TODO: set metadata
442-            # write new uri into parent dir entry
443-            self.parent.add_child(self.name, self.fnode, {})
444-            self.log("uploaded: %s" % (file_cap,))
445-
446-        # dbg
447-        print_tree()
448-
449-    def _fflush(self):
450-        if 'w' in self.file.mode or 'a' in self.file.mode:
451-            self.file.flush()
452-
453-    @logexc
454-    def fsync(self, isfsyncfile):
455-        self.log("fsync(%r)" % (isfsyncfile,))
456-        self._fflush()
457-        if isfsyncfile and hasattr(os, 'fdatasync'):
458-            os.fdatasync(self.fd)
459-        else:
460-            os.fsync(self.fd)
461-
462-    @logexc
463-    def flush(self):
464-        self.log("flush()")
465-        self._fflush()
466-        # cf. xmp_flush() in fusexmp_fh.c
467-        os.close(os.dup(self.fd))
468-
469-    @logexc
470-    def fgetattr(self):
471-        self.log("fgetattr()")
472-        s = os.fstat(self.fd)
473-        d = stat_to_dict(s)
474-        if self.downloader:
475-            size = self.downloader.target_size
476-            self.log("fgetattr() during async download, cache file: %s, size=%s" % (s, size))
477-            d['st_size'] = size
478-        self.log("fgetattr() -> %r" % (d,))
479-        return d
480-
481-    @logexc
482-    def ftruncate(self, len):
483-        self.log("ftruncate(%r)" % (len,))
484-        self.file.truncate(len)
485-
486-class TahoeFuseBase(object):
487-
488-    def __init__(self, tfs):
489-        log("TFB: __init__()")
490-        self.tfs = tfs
491-        self.files = {}
492-
493-    def log(self, msg):
494-        log("<TFB> %s" % (msg, ))
495-
496-    @logexc
497-    def readlink(self, path):
498-        self.log("readlink(%r)" % (path,))
499-        node = self.tfs.get_path(path)
500-        if node:
501-            raise EINVAL('Not a symlink') # nothing in tahoe is a symlink
502-        else:
503-            raise ENOENT('Invalid argument')
504-
505-    @logexc
506-    def unlink(self, path):
507-        self.log("unlink(%r)" % (path,))
508-        self.tfs.unlink(path)
509-
510-    @logexc
511-    def rmdir(self, path):
512-        self.log("rmdir(%r)" % (path,))
513-        self.tfs.unlink(path)
514-
515-    @logexc
516-    def symlink(self, path, path1):
517-        self.log("symlink(%r, %r)" % (path, path1))
518-        self.tfs.link(path, path1)
519-
520-    @logexc
521-    def rename(self, path, path1):
522-        self.log("rename(%r, %r)" % (path, path1))
523-        self.tfs.rename(path, path1)
524-
525-    @logexc
526-    def link(self, path, path1):
527-        self.log("link(%r, %r)" % (path, path1))
528-        self.tfs.link(path, path1)
529-
530-    @logexc
531-    def chmod(self, path, mode):
532-        self.log("XX chmod(%r, %r)" % (path, mode))
533-        #return -errno.EOPNOTSUPP
534-
535-    @logexc
536-    def chown(self, path, user, group):
537-        self.log("XX chown(%r, %r, %r)" % (path, user, group))
538-        #return -errno.EOPNOTSUPP
539-
540-    @logexc
541-    def truncate(self, path, len):
542-        self.log("XX truncate(%r, %r)" % (path, len))
543-        #return -errno.EOPNOTSUPP
544-
545-    @logexc
546-    def utime(self, path, times):
547-        self.log("XX utime(%r, %r)" % (path, times))
548-        #return -errno.EOPNOTSUPP
549-
550-    @logexc
551-    def statfs(self):
552-        self.log("statfs()")
553-        """
554-        Should return an object with statvfs attributes (f_bsize, f_frsize...).
555-        Eg., the return value of os.statvfs() is such a thing (since py 2.2).
556-        If you are not reusing an existing statvfs object, start with
557-        fuse.StatVFS(), and define the attributes.
558-
559-        To provide usable information (ie., you want sensible df(1)
560-        output, you are suggested to specify the following attributes:
561-
562-            - f_bsize - preferred size of file blocks, in bytes
563-            - f_frsize - fundamental size of file blcoks, in bytes
564-                [if you have no idea, use the same as blocksize]
565-            - f_blocks - total number of blocks in the filesystem
566-            - f_bfree - number of free blocks
567-            - f_files - total number of file inodes
568-            - f_ffree - nunber of free file inodes
569-        """
570-
571-        block_size = 4096 # 4k
572-        preferred_block_size = 131072 # 128k, c.f. seg_size
573-        fs_size = 8*2**40 # 8Tb
574-        fs_free = 2*2**40 # 2Tb
575-
576-        #s = fuse.StatVfs(f_bsize = preferred_block_size,
577-        s = dict(f_bsize = preferred_block_size,
578-                         f_frsize = block_size,
579-                         f_blocks = fs_size / block_size,
580-                         f_bfree = fs_free / block_size,
581-                         f_bavail = fs_free / block_size,
582-                         f_files = 2**30, # total files
583-                         f_ffree = 2**20, # available files
584-                         f_favail = 2**20, # available files (root)
585-                         f_flag = 2, # no suid
586-                         f_namemax = 255) # max name length
587-        #self.log('statfs(): %r' % (s,))
588-        return s
589-
590-    def fsinit(self):
591-        self.log("fsinit()")
592-
593-    ##################################################################
594-
595-    @logexc
596-    def readdir(self, path, offset):
597-        self.log('readdir(%r, %r)' % (path, offset))
598-        node = self.tfs.get_path(path)
599-        if node is None:
600-            return -errno.ENOENT
601-        dirlist = ['.', '..'] + node.children.keys()
602-        self.log('dirlist = %r' % (dirlist,))
603-        #return [fuse.Direntry(d) for d in dirlist]
604-        return dirlist
605-
606-    @logexc
607-    def getattr(self, path):
608-        self.log('getattr(%r)' % (path,))
609-
610-        if path == '/':
611-            # we don't have any metadata for the root (no edge leading to it)
612-            mode = (stat.S_IFDIR | 755)
613-            mtime = self.tfs.root.mtime
614-            s = TStat({}, st_mode=mode, st_nlink=1, st_mtime=mtime)
615-            self.log('getattr(%r) -> %r' % (path, s))
616-            #return s
617-            return stat_to_dict(s)
618-           
619-        parent, name, child = self.tfs.get_parent_name_and_child(path)
620-        if not child: # implicitly 'or not parent'
621-            raise ENOENT('No such file or directory')
622-        return stat_to_dict(parent.get_stat(name))
623-
624-    @logexc
625-    def access(self, path, mode):
626-        self.log("access(%r, %r)" % (path, mode))
627-        node = self.tfs.get_path(path)
628-        if not node:
629-            return -errno.ENOENT
630-        accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
631-        if (mode & 0222):
632-            if not node.writable():
633-                log('write access denied for %s (req:%o)' % (path, mode, ))
634-                return -errno.EACCES
635-        #else:
636-            #log('access granted for %s' % (path, ))
637-
638-    @logexc
639-    def mkdir(self, path, mode):
640-        self.log("mkdir(%r, %r)" % (path, mode))
641-        self.tfs.mkdir(path)
642-
643-    ##################################################################
644-    # file methods
645-
646-    def open(self, path, flags):
647-        self.log('open(%r, %r)' % (path, flags, ))
648-        if path in self.files:
649-            # XXX todo [ ] should consider concurrent open files of differing modes
650-            return
651-        else:
652-            tffobj = TahoeFuseFile(self.tfs, path, flags)
653-            self.files[path] = tffobj
654-
655-    def create(self, path, flags, mode):
656-        self.log('create(%r, %r, %r)' % (path, flags, mode))
657-        if path in self.files:
658-            # XXX todo [ ] should consider concurrent open files of differing modes
659-            return
660-        else:
661-            tffobj = TahoeFuseFile(self.tfs, path, flags, mode)
662-            self.files[path] = tffobj
663-
664-    def _get_file(self, path):
665-        if not path in self.files:
666-            raise ENOENT('No such file or directory: %s' % (path,))
667-        return self.files[path]
668-
669-    ##
670-
671-    def read(self, path, size, offset):
672-        self.log('read(%r, %r, %r)' % (path, size, offset, ))
673-        return self._get_file(path).read(size, offset)
674-
675-    @logexc
676-    def write(self, path, buf, offset):
677-        self.log("write(%r, -%s-, %r)" % (path, len(buf), offset))
678-        return self._get_file(path).write(buf, offset)
679-
680-    @logexc
681-    def release(self, path, flags):
682-        self.log("release(%r, %r)" % (path, flags,))
683-        self._get_file(path).release(flags)
684-        del self.files[path]
685-
686-    @logexc
687-    def fsync(self, path, isfsyncfile):
688-        self.log("fsync(%r, %r)" % (path, isfsyncfile,))
689-        return self._get_file(path).fsync(isfsyncfile)
690-
691-    @logexc
692-    def flush(self, path):
693-        self.log("flush(%r)" % (path,))
694-        return self._get_file(path).flush()
695-
696-    @logexc
697-    def fgetattr(self, path):
698-        self.log("fgetattr(%r)" % (path,))
699-        return self._get_file(path).fgetattr()
700-
701-    @logexc
702-    def ftruncate(self, path, len):
703-        self.log("ftruncate(%r, %r)" % (path, len,))
704-        return self._get_file(path).ftruncate(len)
705-
706-class TahoeFuseLocal(TahoeFuseBase, fuse.Fuse):
707-    def __init__(self, tfs, *args, **kw):
708-        log("TFL: __init__(%r, %r)" % (args, kw))
709-        TahoeFuseBase.__init__(self, tfs)
710-        fuse.Fuse.__init__(self, *args, **kw)
711-
712-    def log(self, msg):
713-        log("<TFL> %s" % (msg, ))
714-
715-    def main(self, *a, **kw):
716-        self.log("main(%r, %r)" % (a, kw))
717-        return fuse.Fuse.main(self, *a, **kw)
718-
719-    # overrides for those methods which return objects not marshalled
720-    def fgetattr(self, path):
721-        return TStat({}, **(TahoeFuseBase.fgetattr(self, path)))
722-
723-    def getattr(self, path):
724-        return TStat({}, **(TahoeFuseBase.getattr(self, path)))
725-
726-    def statfs(self):
727-        return fuse.StatVfs(**(TahoeFuseBase.statfs(self)))
728-        #self.log('statfs()')
729-        #ret = fuse.StatVfs(**(TahoeFuseBase.statfs(self)))
730-        #self.log('statfs(): %r' % (ret,))
731-        #return ret
732-
733-    @logexc
734-    def readdir(self, path, offset):
735-        return [ fuse.Direntry(d) for d in TahoeFuseBase.readdir(self, path, offset) ]
736-
737-class TahoeFuseShim(fuse.Fuse):
738-    def __init__(self, trpc, *args, **kw):
739-        log("TF: __init__(%r, %r)" % (args, kw))
740-        self.trpc = trpc
741-        fuse.Fuse.__init__(self, *args, **kw)
742-
743-    def log(self, msg):
744-        log("<TFs> %s" % (msg, ))
745-
746-    @logexc
747-    def readlink(self, path):
748-        self.log("readlink(%r)" % (path,))
749-        return self.trpc.call('readlink', path)
750-
751-    @logexc
752-    def unlink(self, path):
753-        self.log("unlink(%r)" % (path,))
754-        return self.trpc.call('unlink', path)
755-
756-    @logexc
757-    def rmdir(self, path):
758-        self.log("rmdir(%r)" % (path,))
759-        return self.trpc.call('unlink', path)
760-
761-    @logexc
762-    def symlink(self, path, path1):
763-        self.log("symlink(%r, %r)" % (path, path1))
764-        return self.trpc.call('link', path, path1)
765-
766-    @logexc
767-    def rename(self, path, path1):
768-        self.log("rename(%r, %r)" % (path, path1))
769-        return self.trpc.call('rename', path, path1)
770-
771-    @logexc
772-    def link(self, path, path1):
773-        self.log("link(%r, %r)" % (path, path1))
774-        return self.trpc.call('link', path, path1)
775-
776-    @logexc
777-    def chmod(self, path, mode):
778-        self.log("XX chmod(%r, %r)" % (path, mode))
779-        return self.trpc.call('chmod', path, mode)
780-
781-    @logexc
782-    def chown(self, path, user, group):
783-        self.log("XX chown(%r, %r, %r)" % (path, user, group))
784-        return self.trpc.call('chown', path, user, group)
785-
786-    @logexc
787-    def truncate(self, path, len):
788-        self.log("XX truncate(%r, %r)" % (path, len))
789-        return self.trpc.call('truncate', path, len)
790-
791-    @logexc
792-    def utime(self, path, times):
793-        self.log("XX utime(%r, %r)" % (path, times))
794-        return self.trpc.call('utime', path, times)
795-
796-    @logexc
797-    def statfs(self):
798-        self.log("statfs()")
799-        response = self.trpc.call('statfs')
800-        #self.log("statfs(): %r" % (response,))
801-        kwargs = dict([ (str(k),v) for k,v in response.items() ])
802-        return fuse.StatVfs(**kwargs)
803-
804-    def fsinit(self):
805-        self.log("fsinit()")
806-
807-    def main(self, *a, **kw):
808-        self.log("main(%r, %r)" % (a, kw))
809-
810-        return fuse.Fuse.main(self, *a, **kw)
811-
812-    ##################################################################
813-
814-    @logexc
815-    def readdir(self, path, offset):
816-        self.log('readdir(%r, %r)' % (path, offset))
817-        return [ fuse.Direntry(d) for d in self.trpc.call('readdir', path, offset) ]
818-
819-    @logexc
820-    def getattr(self, path):
821-        self.log('getattr(%r)' % (path,))
822-        response = self.trpc.call('getattr', path)
823-        kwargs = dict([ (str(k),v) for k,v in response.items() ])
824-        s = TStat({}, **kwargs)
825-        self.log('getattr(%r) -> %r' % (path, s))
826-        return s
827-
828-    @logexc
829-    def access(self, path, mode):
830-        self.log("access(%r, %r)" % (path, mode))
831-        return self.trpc.call('access', path, mode)
832-
833-    @logexc
834-    def mkdir(self, path, mode):
835-        self.log("mkdir(%r, %r)" % (path, mode))
836-        return self.trpc.call('mkdir', path, mode)
837-
838-    ##################################################################
839-    # file methods
840-
841-    def open(self, path, flags):
842-        self.log('open(%r, %r)' % (path, flags, ))
843-        return self.trpc.call('open', path, flags)
844-
845-    def create(self, path, flags, mode):
846-        self.log('create(%r, %r, %r)' % (path, flags, mode))
847-        return self.trpc.call('create', path, flags, mode)
848-
849-    ##
850-
851-    def read(self, path, size, offset):
852-        self.log('read(%r, %r, %r)' % (path, size, offset, ))
853-        return self.trpc.call('read', path, size, offset)
854-
855-    @logexc
856-    def write(self, path, buf, offset):
857-        self.log("write(%r, -%s-, %r)" % (path, len(buf), offset))
858-        return self.trpc.call('write', path, buf, offset)
859-
860-    @logexc
861-    def release(self, path, flags):
862-        self.log("release(%r, %r)" % (path, flags,))
863-        return self.trpc.call('release', path, flags)
864-
865-    @logexc
866-    def fsync(self, path, isfsyncfile):
867-        self.log("fsync(%r, %r)" % (path, isfsyncfile,))
868-        return self.trpc.call('fsync', path, isfsyncfile)
869-
870-    @logexc
871-    def flush(self, path):
872-        self.log("flush(%r)" % (path,))
873-        return self.trpc.call('flush', path)
874-
875-    @logexc
876-    def fgetattr(self, path):
877-        self.log("fgetattr(%r)" % (path,))
878-        #return self.trpc.call('fgetattr', path)
879-        response = self.trpc.call('fgetattr', path)
880-        kwargs = dict([ (str(k),v) for k,v in response.items() ])
881-        s = TStat({}, **kwargs)
882-        self.log('getattr(%r) -> %r' % (path, s))
883-        return s
884-
885-    @logexc
886-    def ftruncate(self, path, len):
887-        self.log("ftruncate(%r, %r)" % (path, len,))
888-        return self.trpc.call('ftruncate', path, len)
889-
890-
891-def launch_tahoe_fuse(tf_class, tobj, argv):
892-    sys.argv = ['tahoe fuse'] + list(argv)
893-    log('setting sys.argv=%r' % (sys.argv,))
894-    config = TahoeFuseOptions()
895-    version = "%prog " +VERSIONSTR+", fuse "+ fuse.__version__
896-    server = tf_class(tobj, version=version, usage=config.getSynopsis(), dash_s_do='setsingle')
897-    server.parse(errex=1)
898-    server.main()
899-
900-def getnodeurl(nodedir):
901-    f = file(os.path.expanduser(os.path.join(nodedir, "node.url")), 'rb')
902-    nu = f.read().strip()
903-    f.close()
904-    if nu[-1] != "/":
905-        nu += "/"
906-    return nu
907-
908-def fingerprint(uri):
909-    if uri is None:
910-        return None
911-    return base64.b32encode(sha.new(uri).digest()).lower()[:6]
912-
913-stat_fields = [ 'st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid', 'st_size',
914-                'st_atime', 'st_mtime', 'st_ctime', ]
915-def stat_to_dict(statobj, fields=None):
916-    if fields is None:
917-        fields = stat_fields
918-    d = {}
919-    for f in fields:
920-        d[f] = getattr(statobj, f, None)
921-    return d
922-
923-class TStat(fuse.Stat):
924-    # in fuse 0.2, these are set by fuse.Stat.__init__
925-    # in fuse 0.2-pre3 (hardy) they are not. badness ensues if they're missing
926-    st_mode  = None
927-    st_ino   = 0
928-    st_dev   = 0
929-    st_nlink = None
930-    st_uid   = 0
931-    st_gid   = 0
932-    st_size  = 0
933-    st_atime = 0
934-    st_mtime = 0
935-    st_ctime = 0
936-
937-    fields = [ 'st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid', 'st_gid', 'st_size',
938-               'st_atime', 'st_mtime', 'st_ctime', ]
939-    def __init__(self, metadata, **kwargs):
940-        # first load any stat fields present in 'metadata'
941-        for st in [ 'mtime', 'ctime' ]:
942-            if st in metadata:
943-                setattr(self, "st_%s" % st, metadata[st])
944-        for st in self.fields:
945-            if st in metadata:
946-                setattr(self, st, metadata[st])
947-
948-        # then set any values passed in as kwargs
949-        fuse.Stat.__init__(self, **kwargs)
950-
951-    def __repr__(self):
952-        return "<Stat%r>" % (stat_to_dict(self),)
953-
954-class Directory(object):
955-    def __init__(self, tfs, ro_uri, rw_uri):
956-        self.tfs = tfs
957-        self.ro_uri = ro_uri
958-        self.rw_uri = rw_uri
959-        assert (rw_uri or ro_uri)
960-        self.children = {}
961-        self.last_load = None
962-        self.last_data = None
963-        self.mtime = 0
964-
965-    def __repr__(self):
966-        return "<Directory %s>" % (fingerprint(self.get_uri()),)
967-
968-    def maybe_refresh(self, name=None):
969-        """
970-        if the previously cached data was retrieved within the cache
971-        validity period, does nothing. otherwise refetches the data
972-        for this directory and reloads itself
973-        """
974-        now = time.time()
975-        if self.last_load is None or (now - self.last_load) > self.tfs.cache_validity:
976-            self.load(name)
977-
978-    def load(self, name=None):
979-        now = time.time()
980-        log('%s.loading(%s)' % (self, name))
981-        url = self.tfs.compose_url("uri/%s?t=json", self.get_uri())
982-        data = urllib.urlopen(url).read()
983-        h = tagged_hash('cache_hash', data)
984-        if h == self.last_data:
985-            self.last_load = now
986-            log('%s.load() : no change h(data)=%s' % (self, base32.b2a(h), ))
987-            return
988-        try:
989-            parsed = simplejson.loads(data)
990-        except ValueError:
991-            log('%s.load(): unable to parse json data for dir:\n%r' % (self, data))
992-            return
993-        nodetype, d = parsed
994-        assert nodetype == 'dirnode'
995-        self.children.clear()
996-        for cname,details in d['children'].items():
997-            cname = unicode_to_utf8_or_str(cname)
998-            ctype, cattrs = details
999-            metadata = cattrs.get('metadata', {})
1000-            if ctype == 'dirnode':
1001-                cobj = self.tfs.dir_for(cname, cattrs.get('ro_uri'), cattrs.get('rw_uri'))
1002-            else:
1003-                assert ctype == "filenode"
1004-                cobj = File(cattrs.get('size'), cattrs.get('ro_uri'))
1005-            self.children[cname] = cobj, metadata
1006-        self.last_load = now
1007-        self.last_data = h
1008-        self.mtime = now
1009-        log('%s.load() loaded: \n%s' % (self, self.pprint(),))
1010-
1011-    def get_children(self):
1012-        return self.children.keys()
1013-
1014-    def get_child(self, name):
1015-        return self.children[name][0]
1016-
1017-    def add_child(self, name, child, metadata):
1018-        log('%s.add_child(%r, %r, %r)' % (self, name, child, metadata, ))
1019-        self.children[name] = child, metadata
1020-        url = self.tfs.compose_url("uri/%s/%s?t=uri", self.get_uri(), name)
1021-        child_cap = do_http('PUT', url, child.get_uri())
1022-        # XXX [ ] TODO: push metadata to tahoe node
1023-        assert child_cap == child.get_uri()
1024-        self.mtime = time.time()
1025-        log('added child %r with %r to %r' % (name, child_cap, self))
1026-
1027-    def remove_child(self, name):
1028-        log('%s.remove_child(%r)' % (self, name, ))
1029-        del self.children[name]
1030-        url = self.tfs.compose_url("uri/%s/%s", self.get_uri(), name)
1031-        resp = do_http('DELETE', url)
1032-        self.mtime = time.time()
1033-        log('child (%s) removal yielded %r' % (name, resp,))
1034-
1035-    def get_uri(self):
1036-        return self.rw_uri or self.ro_uri
1037-
1038-    # TODO: rename to 'is_writeable', or switch sense to 'is_readonly', for consistency with Tahoe code
1039-    def writable(self):
1040-        return self.rw_uri and self.rw_uri != self.ro_uri
1041-
1042-    def pprint(self, prefix='', printed=None, suffix=''):
1043-        ret = []
1044-        if printed is None:
1045-            printed = set()
1046-        writable = self.writable() and '+' or ' '
1047-        if self in printed:
1048-            ret.append("         %s/%s ... <%s> : %s" % (prefix, writable, fingerprint(self.get_uri()), suffix, ))
1049-        else:
1050-            ret.append("[%s] %s/%s : %s" % (fingerprint(self.get_uri()), prefix, writable, suffix, ))
1051-            printed.add(self)
1052-            for name,(child,metadata) in sorted(self.children.items()):
1053-                ret.append(child.pprint(' ' * (len(prefix)+1)+name, printed, repr(metadata)))
1054-        return '\n'.join(ret)
1055-
1056-    def get_metadata(self, name):
1057-        return self.children[name][1]
1058-
1059-    def get_stat(self, name):
1060-        child,metadata = self.children[name]
1061-        log("%s.get_stat(%s) md: %r" % (self, name, metadata))
1062-
1063-        if isinstance(child, Directory):
1064-            child.maybe_refresh(name)
1065-            mode = metadata.get('st_mode') or (stat.S_IFDIR | 0755)
1066-            s = TStat(metadata, st_mode=mode, st_nlink=1, st_mtime=child.mtime)
1067-        else:
1068-            if hasattr(child, 'tmp_fname'):
1069-                s = os.stat(child.tmp_fname)
1070-                log("%s.get_stat(%s) returning local stat of tmp file" % (self, name, ))
1071-            else:
1072-                s = TStat(metadata,
1073-                          st_nlink = 1,
1074-                          st_size = child.size,
1075-                          st_mode = metadata.get('st_mode') or (stat.S_IFREG | 0444),
1076-                          st_mtime = metadata.get('mtime') or self.mtime,
1077-                          )
1078-            return s
1079-
1080-        log("%s.get_stat(%s)->%s" % (self, name, s))
1081-        return s
1082-
1083-class File(object):
1084-    def __init__(self, size, ro_uri):
1085-        self.size = size
1086-        if ro_uri:
1087-            ro_uri = str(ro_uri)
1088-        self.ro_uri = ro_uri
1089-
1090-    def __repr__(self):
1091-        return "<File %s>" % (fingerprint(self.ro_uri) or [self.tmp_fname],)
1092-
1093-    def pprint(self, prefix='', printed=None, suffix=''):
1094-        return "         %s (%s) : %s" % (prefix, self.size, suffix, )
1095-
1096-    def get_uri(self):
1097-        return self.ro_uri
1098-
1099-    def writable(self):
1100-        return True
1101-
1102-class TFS(object):
1103-    def __init__(self, nodedir, nodeurl, root_uri,
1104-                       cache_validity_period=DEFAULT_DIRECTORY_VALIDITY, async=False):
1105-        self.cache_validity = cache_validity_period
1106-        self.nodeurl = nodeurl
1107-        self.root_uri = root_uri
1108-        self.async = async
1109-        self.dirs = {}
1110-
1111-        cachedir = os.path.expanduser(os.path.join(nodedir, '_cache'))
1112-        self.cache = FileCache(nodeurl, cachedir)
1113-        ro_uri = DirectoryURI.init_from_string(self.root_uri).get_readonly()
1114-        self.root = Directory(self, ro_uri, self.root_uri)
1115-        self.root.maybe_refresh('<root>')
1116-
1117-    def log(self, msg):
1118-        log("<TFS> %s" % (msg, ))
1119-
1120-    def pprint(self):
1121-        return self.root.pprint()
1122-
1123-    def compose_url(self, fmt, *args):
1124-        return self.nodeurl + (fmt % tuple(map(urllib.quote, args)))
1125-
1126-    def get_parent_name_and_child(self, path):
1127-        """
1128-        find the parent dir node, name of child relative to that parent, and
1129-        child node within the TFS object space.
1130-        @returns: (parent, name, child) if the child is found
1131-                  (parent, name, None) if the child is missing from the parent
1132-                  (None, name, None) if the parent is not found
1133-        """
1134-        if path == '/':
1135-            return
1136-        dirname, name = os.path.split(path)
1137-        parent = self.get_path(dirname)
1138-        if parent:
1139-            try:
1140-                child = parent.get_child(name)
1141-                return parent, name, child
1142-            except KeyError:
1143-                return parent, name, None
1144-        else:
1145-            return None, name, None
1146-
1147-    def get_path(self, path):
1148-        comps = path.strip('/').split('/')
1149-        if comps == ['']:
1150-            comps = []
1151-        cursor = self.root
1152-        c_name = '<root>'
1153-        for comp in comps:
1154-            if not isinstance(cursor, Directory):
1155-                self.log('path "%s" is not a dir' % (path,))
1156-                return None
1157-            cursor.maybe_refresh(c_name)
1158-            try:
1159-                cursor = cursor.get_child(comp)
1160-                c_name = comp
1161-            except KeyError:
1162-                self.log('path "%s" not found' % (path,))
1163-                return None
1164-        if isinstance(cursor, Directory):
1165-            cursor.maybe_refresh(c_name)
1166-        return cursor
1167-
1168-    def dir_for(self, name, ro_uri, rw_uri):
1169-        #self.log('dir_for(%s) [%s/%s]' % (name, fingerprint(ro_uri), fingerprint(rw_uri)))
1170-        if ro_uri:
1171-            ro_uri = str(ro_uri)
1172-        if rw_uri:
1173-            rw_uri = str(rw_uri)
1174-        uri = rw_uri or ro_uri
1175-        assert uri
1176-        dirobj = self.dirs.get(uri)
1177-        if not dirobj:
1178-            self.log('dir_for(%s) creating new Directory' % (name, ))
1179-            dirobj = Directory(self, ro_uri, rw_uri)
1180-            self.dirs[uri] = dirobj
1181-        return dirobj
1182-
1183-    def upload(self, fname):
1184-        self.log('upload(%r)' % (fname,))
1185-        fh = file(fname, 'rb')
1186-        url = self.compose_url("uri")
1187-        file_cap = do_http('PUT', url, fh)
1188-        self.log('uploaded to: %r' % (file_cap,))
1189-        return file_cap
1190-
1191-    def mkdir(self, path):
1192-        self.log('mkdir(%r)' % (path,))
1193-        parent, name, child = self.get_parent_name_and_child(path)
1194-
1195-        if child:
1196-            raise EEXIST('File exists: %s' % (name,))
1197-        if not parent:
1198-            raise ENOENT('No such file or directory: %s' % (path,))
1199-
1200-        url = self.compose_url("uri?t=mkdir")
1201-        new_dir_cap = do_http('PUT', url)
1202-
1203-        ro_uri = DirectoryURI.init_from_string(new_dir_cap).get_readonly()
1204-        child = Directory(self, ro_uri, new_dir_cap)
1205-        parent.add_child(name, child, {})
1206-
1207-    def rename(self, path, path1):
1208-        self.log('rename(%s, %s)' % (path, path1))
1209-        src_parent, src_name, src_child = self.get_parent_name_and_child(path)
1210-        dst_parent, dst_name, dst_child = self.get_parent_name_and_child(path1)
1211-
1212-        if not src_child or not dst_parent:
1213-            raise ENOENT('No such file or directory')
1214-
1215-        dst_parent.add_child(dst_name, src_child, {})
1216-        src_parent.remove_child(src_name)
1217-
1218-    def unlink(self, path):
1219-        parent, name, child = self.get_parent_name_and_child(path)
1220-
1221-        if child is None: # parent or child is missing
1222-            raise ENOENT('No such file or directory')
1223-        if not parent.writable():
1224-            raise EACCESS('Permission denied')
1225-
1226-        parent.remove_child(name)
1227-
1228-    def link(self, path, path1):
1229-        src = self.get_path(path)
1230-        dst_parent, dst_name, dst_child = self.get_parent_name_and_child(path1)
1231-
1232-        if not src:
1233-            raise ENOENT('No such file or directory')
1234-        if dst_parent is None:
1235-            raise ENOENT('No such file or directory')
1236-        if not dst_parent.writable():
1237-            raise EACCESS('Permission denied')
1238-
1239-        dst_parent.add_child(dst_name, src, {})
1240-
1241-class FileCache(object):
1242-    def __init__(self, nodeurl, cachedir):
1243-        self.nodeurl = nodeurl
1244-        self.cachedir = cachedir
1245-        if not os.path.exists(self.cachedir):
1246-            os.makedirs(self.cachedir)
1247-        self.tmpdir = os.path.join(self.cachedir, 'tmp')
1248-        if not os.path.exists(self.tmpdir):
1249-            os.makedirs(self.tmpdir)
1250-        self.downloaders = weakref.WeakValueDictionary()
1251-
1252-    def log(self, msg):
1253-        log("<FC> %s" % (msg, ))
1254-
1255-    def get_file(self, uri):
1256-        self.log('get_file(%s)' % (uri,))
1257-        if is_literal_file_uri(uri):
1258-            return self.get_literal(uri)
1259-        else:
1260-            return self.get_chk(uri, async=False)
1261-
1262-    def async_get_file(self, uri):
1263-        self.log('get_file(%s)' % (uri,))
1264-        return self.get_chk(uri, async=True)
1265-
1266-    def get_literal(self, uri):
1267-        h = sha.new(uri).digest()
1268-        u = LiteralFileURI.init_from_string(uri)
1269-        fname = os.path.join(self.cachedir, '__'+base64.b32encode(h).lower())
1270-        size = len(u.data)
1271-        self.log('writing literal file %s (%s)' % (fname, size, ))
1272-        fh = open(fname, 'wb')
1273-        fh.write(u.data)
1274-        fh.close()
1275-        return fname
1276-
1277-    def get_chk(self, uri, async=False):
1278-        u = CHKFileURI.init_from_string(str(uri))
1279-        storage_index = u.storage_index
1280-        size = u.size
1281-        fname = os.path.join(self.cachedir, base64.b32encode(storage_index).lower())
1282-        if os.path.exists(fname):
1283-            fsize = os.path.getsize(fname)
1284-            if fsize == size:
1285-                if async:
1286-                    return fname, None
1287-                else:
1288-                    return fname
1289-            else:
1290-                self.log('warning file "%s" is too short %s < %s' % (fname, fsize, size))
1291-        self.log('downloading file %s (%s)' % (fname, size, ))
1292-        url = "%suri/%s" % (self.nodeurl, uri)
1293-        if async:
1294-            if fname in self.downloaders and self.downloaders[fname].running:
1295-                downloader = self.downloaders[fname]
1296-            else:
1297-                downloader = DownloaderWithReadQueue()
1298-                self.downloaders[fname] = downloader
1299-                d = downloader.start(url, fname, target_size=u.size)
1300-                def clear_downloader(result, fname):
1301-                    self.log('clearing %s from downloaders: %r' % (fname, result))
1302-                    self.downloaders.pop(fname, None)
1303-                d.addBoth(clear_downloader, fname)
1304-            return fname, downloader
1305-        else:
1306-            fh = open(fname, 'wb')
1307-            download = urllib.urlopen(url)
1308-            while True:
1309-                chunk = download.read(4096)
1310-                if not chunk:
1311-                    break
1312-                fh.write(chunk)
1313-            fh.close()
1314-            return fname
1315-
1316-    def tmp_file(self, id):
1317-        fname = os.path.join(self.tmpdir, base64.b32encode(id).lower())
1318-        return fname
1319-
1320-_tfs = None # to appease pyflakes; is set in main()
1321-def print_tree():
1322-    log('tree:\n' + _tfs.pprint())
1323-
1324-
1325-def unmarshal(obj):
1326-    if obj is None or isinstance(obj, int) or isinstance(obj, long) or isinstance(obj, float):
1327-        return obj
1328-    elif isinstance(obj, unicode) or isinstance(obj, str):
1329-        #log('unmarshal(%r)' % (obj,))
1330-        return base64.b64decode(obj)
1331-    elif isinstance(obj, list):
1332-        return map(unmarshal, obj)
1333-    elif isinstance(obj, dict):
1334-        return dict([ (k,unmarshal(v)) for k,v in obj.items() ])
1335-    else:
1336-        raise ValueError('object type not int,str,list,dict,none (%s) (%r)' % (type(obj), obj))
1337-
1338-def marshal(obj):
1339-    if obj is None or isinstance(obj, int) or isinstance(obj, long) or isinstance(obj, float):
1340-        return obj
1341-    elif isinstance(obj, str):
1342-        return base64.b64encode(obj)
1343-    elif isinstance(obj, list) or isinstance(obj, tuple):
1344-        return map(marshal, obj)
1345-    elif isinstance(obj, dict):
1346-        return dict([ (k,marshal(v)) for k,v in obj.items() ])
1347-    else:
1348-        raise ValueError('object type not int,str,list,dict,none (%s)' % type(obj))
1349-
1350-
1351-class TRPCProtocol(Protocol):
1352-    compute_response_sha1 = True
1353-    log_all_requests = False
1354-
1355-    def connectionMade(self):
1356-        self.buf = []
1357-
1358-    def dataReceived(self, data):
1359-        if data == 'keepalive\n':
1360-            log('keepalive connection on %r' % (self.transport,))
1361-            self.keepalive = True
1362-            return
1363-
1364-        if not data.endswith('\n'):
1365-            self.buf.append(data)
1366-            return
1367-        if self.buf:
1368-            self.buf.append(data)
1369-            reqstr = ''.join(self.buf)
1370-            self.buf = []
1371-            self.dispatch_request(reqstr)
1372-        else:
1373-            self.dispatch_request(data)
1374-
1375-    def dispatch_request(self, reqstr):
1376-        try:
1377-            req = simplejson.loads(reqstr)
1378-        except ValueError, ve:
1379-            log(ve)
1380-            return
1381-
1382-        d = defer.maybeDeferred(self.handle_request, req)
1383-        d.addCallback(self.send_response)
1384-        d.addErrback(self.send_error)
1385-
1386-    def send_error(self, failure):
1387-        log('failure: %s' % (failure,))
1388-        if failure.check(TFSIOError):
1389-            e = failure.value
1390-            self.send_response(['error', 'errno', e.args[0], e.args[1]])
1391-        else:
1392-            self.send_response(['error', 'failure', str(failure)])
1393-
1394-    def send_response(self, result):
1395-        response = simplejson.dumps(result)
1396-        header = { 'len': len(response), }
1397-        if self.compute_response_sha1:
1398-            header['sha1'] = base64.b64encode(sha.new(response).digest())
1399-        hdr = simplejson.dumps(header)
1400-        self.transport.write(hdr)
1401-        self.transport.write('\n')
1402-        self.transport.write(response)
1403-        self.transport.loseConnection()
1404-
1405-    def connectionLost(self, reason):
1406-        if hasattr(self, 'keepalive'):
1407-            log('keepalive connection %r lost, shutting down' % (self.transport,))
1408-            reactor.callLater(0, reactor.stop)
1409-
1410-    def handle_request(self, req):
1411-        if type(req) is not list or not req or len(req) < 1:
1412-            return ['error', 'malformed request']
1413-        if req[0] == 'call':
1414-            if len(req) < 3:
1415-                return ['error', 'malformed request']
1416-            methname = req[1]
1417-            try:
1418-                args = unmarshal(req[2])
1419-            except ValueError, ve:
1420-                return ['error', 'malformed arguments', str(ve)]
1421-
1422-            try:
1423-                meth = getattr(self.factory.server, methname)
1424-            except AttributeError, ae:
1425-                return ['error', 'no such method', str(ae)]
1426-
1427-            if self.log_all_requests:
1428-                log('call %s(%s)' % (methname, ', '.join(map(repr, args))))
1429-            try:
1430-                result = meth(*args)
1431-            except TFSIOError, e:
1432-                log('errno: %s; %s' % e.args)
1433-                return ['error', 'errno', e.args[0], e.args[1]]
1434-            except Exception, e:
1435-                log('exception: ' + traceback.format_exc())
1436-                return ['error', 'exception', str(e)]
1437-            d = defer.succeed(None)
1438-            d.addCallback(lambda junk: result) # result may be Deferred
1439-            d.addCallback(lambda res: ['result', marshal(res)]) # only applies if not errback
1440-            return d
1441-
1442-class TFSServer(object):
1443-    def __init__(self, socket_path, server=None):
1444-        self.socket_path = socket_path
1445-        log('TFSServer init socket: %s' % (socket_path,))
1446-
1447-        self.factory = Factory()
1448-        self.factory.protocol = TRPCProtocol
1449-        if server:
1450-            self.factory.server = server
1451-        else:
1452-            self.factory.server = self
1453-
1454-    def get_service(self):
1455-        if not hasattr(self, 'svc'):
1456-            from twisted.application import strports
1457-            self.svc = strports.service('unix:'+self.socket_path, self.factory)
1458-        return self.svc
1459-
1460-    def run(self):
1461-        svc = self.get_service()
1462-        def ss():
1463-            try:
1464-                svc.startService()
1465-            except:
1466-                reactor.callLater(0, reactor.stop)
1467-                raise
1468-        reactor.callLater(0, ss)
1469-        reactor.run()
1470-
1471-    def hello(self):
1472-        return 'pleased to meet you'
1473-
1474-    def echo(self, arg):
1475-        return arg
1476-
1477-    def failex(self):
1478-        raise ValueError('expected')
1479-
1480-    def fail(self):
1481-        return defer.maybeDeferred(self.failex)
1482-
1483-class RPCError(RuntimeError):
1484-    pass
1485-
1486-class TRPC(object):
1487-    def __init__(self, socket_fname):
1488-        self.socket_fname = socket_fname
1489-        self.keepalive = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
1490-        self.keepalive.connect(self.socket_fname)
1491-        self.keepalive.send('keepalive\n')
1492-        log('requested keepalive on %s' % (self.keepalive,))
1493-
1494-    def req(self, req):
1495-        # open conenction to trpc server
1496-        s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
1497-        s.connect(self.socket_fname)
1498-        # send request
1499-        s.send(simplejson.dumps(req))
1500-        s.send('\n')
1501-        # read response header
1502-        hdr_data = s.recv(8192)
1503-        first_newline = hdr_data.index('\n')
1504-        header = hdr_data[:first_newline]
1505-        data = hdr_data[first_newline+1:]
1506-        hdr = simplejson.loads(header)
1507-        hdr_len = hdr['len']
1508-        if hdr.has_key('sha1'):
1509-            hdr_sha1 = base64.b64decode(hdr['sha1'])
1510-            spool = [data]
1511-            spool_sha = sha.new(data)
1512-            # spool response
1513-            while True:
1514-                data = s.recv(8192)
1515-                if data:
1516-                    spool.append(data)
1517-                    spool_sha.update(data)
1518-                else:
1519-                    break
1520-        else:
1521-            spool = [data]
1522-            # spool response
1523-            while True:
1524-                data = s.recv(8192)
1525-                if data:
1526-                    spool.append(data)
1527-                else:
1528-                    break
1529-        s.close()
1530-        # decode response
1531-        resp = ''.join(spool)
1532-        spool = None
1533-        assert hdr_len == len(resp), str((hdr_len, len(resp), repr(resp)))
1534-        if hdr.has_key('sha1'):
1535-            data_sha1 = spool_sha.digest()
1536-            spool = spool_sha = None
1537-            assert hdr_sha1 == data_sha1, str((base32.b2a(hdr_sha1), base32.b2a(data_sha1)))
1538-        #else:
1539-            #print 'warning, server provided no sha1 to check'
1540-        return resp
1541-
1542-    def call(self, methodname, *args):
1543-        res = self.req(['call', methodname, marshal(args)])
1544-
1545-        result = simplejson.loads(res)
1546-        if not result or len(result) < 2:
1547-            raise TypeError('malformed response %r' % (result,))
1548-        if result[0] == 'error':
1549-            if result[1] == 'errno':
1550-                raise TFSIOError(result[2], result[3])
1551-            else:
1552-                raise RPCError(*(result[1:])) # error, exception / error, failure
1553-        elif result[0] == 'result':
1554-            return unmarshal(result[1])
1555-        else:
1556-            raise TypeError('unknown response type %r' % (result[0],))
1557-
1558-    def shutdown(self):
1559-        log('shutdown() closing keepalive %s' % (self.keepalive,))
1560-        self.keepalive.close()
1561-
1562-# (cut-n-pasted here due to an ImportError / some py2app linkage issues)
1563-#from twisted.scripts._twistd_unix import daemonize
1564-def daemonize():
1565-    # See http://www.erlenstar.demon.co.uk/unix/faq_toc.html#TOC16
1566-    if os.fork():   # launch child and...
1567-        os._exit(0) # kill off parent
1568-    os.setsid()
1569-    if os.fork():   # launch child and...
1570-        os._exit(0) # kill off parent again.
1571-    os.umask(077)
1572-    null=os.open('/dev/null', os.O_RDWR)
1573-    for i in range(3):
1574-        try:
1575-            os.dup2(null, i)
1576-        except OSError, e:
1577-            if e.errno != errno.EBADF:
1578-                raise
1579-    os.close(null)
1580-
1581-def main(argv):
1582-    log("main(%s)" % (argv,))
1583-
1584-    # check for version or help options (no args == help)
1585-    if not argv:
1586-        argv = ['--help']
1587-    if len(argv) == 1 and argv[0] in ['-h', '--help']:
1588-        config = TahoeFuseOptions()
1589-        print >> sys.stderr, config
1590-        print >> sys.stderr, 'fuse usage follows:'
1591-    if len(argv) == 1 and argv[0] in ['-h', '--help', '--version']:
1592-        launch_tahoe_fuse(TahoeFuseLocal, None, argv)
1593-        return -2
1594-
1595-    # parse command line options
1596-    config = TahoeFuseOptions()
1597-    try:
1598-        #print 'parsing', argv
1599-        config.parseOptions(argv)
1600-    except usage.error, e:
1601-        print config
1602-        print e
1603-        return -1
1604-
1605-    # check for which alias or uri is specified
1606-    if config['alias']:
1607-        alias = config['alias']
1608-        #print 'looking for aliases in', config['node-directory']
1609-        aliases = get_aliases(os.path.expanduser(config['node-directory']))
1610-        if alias not in aliases:
1611-            raise usage.error('Alias %r not found' % (alias,))
1612-        root_uri = aliases[alias]
1613-        root_name = alias
1614-    elif config['root-uri']:
1615-        root_uri = config['root-uri']
1616-        root_name = 'uri_' + base32.b2a(tagged_hash('root_name', root_uri))[:12]
1617-        # test the uri for structural validity:
1618-        try:
1619-            DirectoryURI.init_from_string(root_uri)
1620-        except:
1621-            raise usage.error('root-uri must be a valid directory uri (not %r)' % (root_uri,))
1622-    else:
1623-        raise usage.error('At least one of --alias or --root-uri must be specified')
1624-
1625-    nodedir = config['node-directory']
1626-    nodeurl = config['node-url']
1627-    if not nodeurl:
1628-        nodeurl = getnodeurl(nodedir)
1629-
1630-    # allocate socket
1631-    socket_dir = os.path.join(os.path.expanduser(nodedir), "tfuse.sockets")
1632-    socket_path = os.path.join(socket_dir, root_name)
1633-    if len(socket_path) > 103:
1634-        # try googling AF_UNIX and sun_len for some taste of why this oddity exists.
1635-        raise OSError(errno.ENAMETOOLONG, 'socket path too long (%s)' % (socket_path,))
1636-
1637-    fileutil.make_dirs(socket_dir, 0700)
1638-    if os.path.exists(socket_path):
1639-        log('socket exists')
1640-        if config['server-shutdown']:
1641-            log('calling shutdown')
1642-            trpc = TRPC(socket_path)
1643-            result = trpc.shutdown()
1644-            log('result: %r' % (result,))
1645-            log('called shutdown')
1646-            return
1647-        else:
1648-            raise OSError(errno.EEXIST, 'fuse already running (%r exists)' % (socket_path,))
1649-    elif config['server-shutdown']:
1650-        raise OSError(errno.ENOTCONN, '--server-shutdown specified, but server not running')
1651-
1652-    if not os.path.exists(config.mountpoint):
1653-        raise OSError(errno.ENOENT, 'No such file or directory: "%s"' % (config.mountpoint,))
1654-
1655-    global _tfs
1656-    #
1657-    # Standalone ("no-split")
1658-    #
1659-    if config['no-split']:
1660-        reopen_logfile('tfuse.%s.unsplit.log' % (root_name,))
1661-        log('\n'+(24*'_')+'init (unsplit)'+(24*'_')+'\n')
1662-
1663-        cache_timeout = float(config['cache-timeout'])
1664-        tfs = TFS(nodedir, nodeurl, root_uri, cache_timeout, async=False)
1665-        #print tfs.pprint()
1666-
1667-        # make tfs instance accesible to print_tree() for dbg
1668-        _tfs = tfs
1669-
1670-        args = [ '-o'+opt for opt in config.fuse_options ] + [config.mountpoint]
1671-        launch_tahoe_fuse(TahoeFuseLocal, tfs, args)
1672-
1673-    #
1674-    # Server
1675-    #
1676-    elif config['server']:
1677-        reopen_logfile('tfuse.%s.server.log' % (root_name,))
1678-        log('\n'+(24*'_')+'init (server)'+(24*'_')+'\n')
1679-
1680-        log('daemonizing')
1681-        daemonize()
1682-
1683-        try:
1684-            cache_timeout = float(config['cache-timeout'])
1685-            tfs = TFS(nodedir, nodeurl, root_uri, cache_timeout, async=True)
1686-            #print tfs.pprint()
1687-
1688-            # make tfs instance accesible to print_tree() for dbg
1689-            _tfs = tfs
1690-
1691-            log('launching tfs server')
1692-            tfuse = TahoeFuseBase(tfs)
1693-            tfs_server = TFSServer(socket_path, tfuse)
1694-            tfs_server.run()
1695-            log('tfs server ran, exiting')
1696-        except:
1697-            log('exception: ' + traceback.format_exc())
1698-
1699-    #
1700-    # Client
1701-    #
1702-    else:
1703-        reopen_logfile('tfuse.%s.client.log' % (root_name,))
1704-        log('\n'+(24*'_')+'init (client)'+(24*'_')+'\n')
1705-
1706-        server_args = [sys.executable, sys.argv[0], '--server'] + argv
1707-        if 'Allmydata.app/Contents/MacOS' in sys.executable:
1708-            # in this case blackmatch is the 'fuse' subcommand of the 'tahoe' executable
1709-            # otherwise we assume blackmatch is being run from source
1710-            server_args.insert(2, 'fuse')
1711-        #print 'launching server:', server_args
1712-        server = subprocess.Popen(server_args)
1713-        waiting_since = time.time()
1714-        wait_at_most = 8
1715-        while not os.path.exists(socket_path):
1716-            log('waiting for appearance of %r' % (socket_path,))
1717-            time.sleep(1)
1718-            if time.time() - waiting_since > wait_at_most:
1719-                log('%r did not appear within %ss' % (socket_path, wait_at_most))
1720-                raise IOError(2, 'no socket %s' % (socket_path,))
1721-        #print 'launched server'
1722-        trpc = TRPC(socket_path)
1723-
1724-
1725-        args = [ '-o'+opt for opt in config.fuse_options ] + [config.mountpoint]
1726-        launch_tahoe_fuse(TahoeFuseShim, trpc, args)
1727-
1728-       
1729-if __name__ == '__main__':
1730-    sys.exit(main(sys.argv[1:]))
1731rmfile ./contrib/fuse/impl_c/blackmatch.py
1732rmdir ./contrib/fuse/impl_c
1733hunk ./contrib/fuse/impl_b/pyfuse/OrderedDict.py 1
1734-from UserDict import DictMixin
1735-
1736-
1737-DELETED = object()
1738-
1739-
1740-class OrderedDict(DictMixin):
1741-
1742-    def __init__(self, *args, **kwds):
1743-        self.clear()
1744-        self.update(*args, **kwds)
1745-
1746-    def clear(self):
1747-        self._keys = []
1748-        self._content = {}    # {key: (index, value)}
1749-        self._deleted = 0
1750-
1751-    def copy(self):
1752-        return OrderedDict(self)
1753-
1754-    def __iter__(self):
1755-        for key in self._keys:
1756-            if key is not DELETED:
1757-                yield key
1758-
1759-    def keys(self):
1760-        return [key for key in self._keys if key is not DELETED]
1761-
1762-    def popitem(self):
1763-        while 1:
1764-            try:
1765-                k = self._keys.pop()
1766-            except IndexError:
1767-                raise KeyError, 'OrderedDict is empty'
1768-            if k is not DELETED:
1769-                return k, self._content.pop(k)[1]
1770-
1771-    def __getitem__(self, key):
1772-        index, value = self._content[key]
1773-        return value
1774-
1775-    def __setitem__(self, key, value):
1776-        try:
1777-            index, oldvalue = self._content[key]
1778-        except KeyError:
1779-            index = len(self._keys)
1780-            self._keys.append(key)
1781-        self._content[key] = index, value
1782-
1783-    def __delitem__(self, key):
1784-        index, oldvalue = self._content.pop(key)
1785-        self._keys[index] = DELETED
1786-        if self._deleted <= len(self._content):
1787-            self._deleted += 1
1788-        else:
1789-            # compress
1790-            newkeys = []
1791-            for k in self._keys:
1792-                if k is not DELETED:
1793-                    i, value = self._content[k]
1794-                    self._content[k] = len(newkeys), value
1795-                    newkeys.append(k)
1796-            self._keys = newkeys
1797-            self._deleted = 0
1798-
1799-    def __len__(self):
1800-        return len(self._content)
1801-
1802-    def __repr__(self):
1803-        res = ['%r: %r' % (key, self._content[key][1]) for key in self]
1804-        return 'OrderedDict(%s)' % (', '.join(res),)
1805-
1806-    def __cmp__(self, other):
1807-        if not isinstance(other, OrderedDict):
1808-            return NotImplemented
1809-        keys = self.keys()
1810-        r = cmp(keys, other.keys())
1811-        if r:
1812-            return r
1813-        for k in keys:
1814-            r = cmp(self[k], other[k])
1815-            if r:
1816-                return r
1817-        return 0
1818rmfile ./contrib/fuse/impl_b/pyfuse/OrderedDict.py
1819rmfile ./contrib/fuse/impl_b/pyfuse/__init__.py
1820hunk ./contrib/fuse/impl_b/pyfuse/cachefs.py 1
1821-import os, stat, py, select
1822-import inspect
1823-from objectfs import ObjectFs
1824-
1825-
1826-BLOCKSIZE = 8192
1827-
1828-
1829-def remote_runner(BLOCKSIZE):
1830-    import sys, select, os, struct
1831-    stream = None
1832-    while True:
1833-        while stream is not None:
1834-            iwtd, owtd, ewtd = select.select([0], [1], [])
1835-            if iwtd:
1836-                break
1837-            pos = stream.tell()
1838-            data = stream.read(BLOCKSIZE)
1839-            res = ('R', path, pos, len(data))
1840-            sys.stdout.write('%r\n%s' % (res, data))
1841-            if len(data) < BLOCKSIZE:
1842-                stream = None
1843-
1844-        stream = None
1845-        msg = eval(sys.stdin.readline())
1846-        if msg[0] == 'L':
1847-            path = msg[1]
1848-            names = os.listdir(path)
1849-            res = []
1850-            for name in names:
1851-                try:
1852-                    st = os.stat(os.path.join(path, name))
1853-                except OSError:
1854-                    continue
1855-                res.append((name, st.st_mode, st.st_size))
1856-            res = msg + (res,)
1857-            sys.stdout.write('%s\n' % (res,))
1858-        elif msg[0] == 'R':
1859-            path, pos = msg[1:]
1860-            f = open(path, 'rb')
1861-            f.seek(pos)
1862-            data = f.read(BLOCKSIZE)
1863-            res = msg + (len(data),)
1864-            sys.stdout.write('%r\n%s' % (res, data))
1865-        elif msg[0] == 'S':
1866-            path, pos = msg[1:]
1867-            stream = open(path, 'rb')
1868-            stream.seek(pos)
1869-        #elif msg[0] == 'C':
1870-        #    stream = None
1871-
1872-
1873-class CacheFs(ObjectFs):
1874-    MOUNT_OPTIONS = {'max_read': BLOCKSIZE}
1875-
1876-    def __init__(self, localdir, remotehost, remotedir):
1877-        src = inspect.getsource(remote_runner)
1878-        src += '\n\nremote_runner(%d)\n' % BLOCKSIZE
1879-
1880-        remotecmd = 'python -u -c "exec input()"'
1881-        cmdline = [remotehost, remotecmd]
1882-        # XXX Unix style quoting
1883-        for i in range(len(cmdline)):
1884-            cmdline[i] = "'" + cmdline[i].replace("'", "'\\''") + "'"
1885-        cmd = 'ssh -C'
1886-        cmdline.insert(0, cmd)
1887-
1888-        child_in, child_out = os.popen2(' '.join(cmdline), bufsize=0)
1889-        child_in.write('%r\n' % (src,))
1890-
1891-        control = Controller(child_in, child_out)
1892-        ObjectFs.__init__(self, CacheDir(localdir, remotedir, control))
1893-
1894-
1895-class Controller:
1896-    def __init__(self, child_in, child_out):
1897-        self.child_in = child_in
1898-        self.child_out = child_out
1899-        self.cache = {}
1900-        self.streaming = None
1901-
1902-    def next_answer(self):
1903-        answer = eval(self.child_out.readline())
1904-        #print 'A', answer
1905-        if answer[0] == 'R':
1906-            remotefn, pos, length = answer[1:]
1907-            data = self.child_out.read(length)
1908-            self.cache[remotefn, pos] = data
1909-        return answer
1910-
1911-    def wait_answer(self, query):
1912-        self.streaming = None
1913-        #print 'Q', query
1914-        self.child_in.write('%r\n' % (query,))
1915-        while True:
1916-            answer = self.next_answer()
1917-            if answer[:len(query)] == query:
1918-                return answer[len(query):]
1919-
1920-    def listdir(self, remotedir):
1921-        query = ('L', remotedir)
1922-        res, = self.wait_answer(query)
1923-        return res
1924-
1925-    def wait_for_block(self, remotefn, pos):
1926-        key = remotefn, pos
1927-        while key not in self.cache:
1928-            self.next_answer()
1929-        return self.cache[key]
1930-
1931-    def peek_for_block(self, remotefn, pos):
1932-        key = remotefn, pos
1933-        while key not in self.cache:
1934-            iwtd, owtd, ewtd = select.select([self.child_out], [], [], 0)
1935-            if not iwtd:
1936-                return None
1937-            self.next_answer()
1938-        return self.cache[key]
1939-
1940-    def cached_block(self, remotefn, pos):
1941-        key = remotefn, pos
1942-        return self.cache.get(key)
1943-
1944-    def start_streaming(self, remotefn, pos):
1945-        if remotefn != self.streaming:
1946-            while (remotefn, pos) in self.cache:
1947-                pos += BLOCKSIZE
1948-            query = ('S', remotefn, pos)
1949-            #print 'Q', query
1950-            self.child_in.write('%r\n' % (query,))
1951-            self.streaming = remotefn
1952-
1953-    def read_blocks(self, remotefn, poslist):
1954-        lst = ['%r\n' % (('R', remotefn, pos),)
1955-               for pos in poslist if (remotefn, pos) not in self.cache]
1956-        if lst:
1957-            self.streaming = None
1958-            #print 'Q', '+ '.join(lst)
1959-            self.child_in.write(''.join(lst))
1960-
1961-    def clear_cache(self, remotefn):
1962-        for key in self.cache.keys():
1963-            if key[0] == remotefn:
1964-                del self.cache[key]
1965-
1966-
1967-class CacheDir:
1968-    def __init__(self, localdir, remotedir, control, size=0):
1969-        self.localdir  = localdir
1970-        self.remotedir = remotedir
1971-        self.control   = control
1972-        self.entries   = None
1973-    def listdir(self):
1974-        if self.entries is None:
1975-            self.entries = []
1976-            for name, st_mode, st_size in self.control.listdir(self.remotedir):
1977-                if stat.S_ISDIR(st_mode):
1978-                    cls = CacheDir
1979-                else:
1980-                    cls = CacheFile
1981-                obj = cls(os.path.join(self.localdir, name),
1982-                          os.path.join(self.remotedir, name),
1983-                          self.control,
1984-                          st_size)
1985-                self.entries.append((name, obj))
1986-        return self.entries
1987-
1988-class CacheFile:
1989-    def __init__(self, localfn, remotefn, control, size):
1990-        self.localfn  = localfn
1991-        self.remotefn = remotefn
1992-        self.control  = control
1993-        self.st_size  = size
1994-
1995-    def size(self):
1996-        return self.st_size
1997-
1998-    def read(self):
1999-        try:
2000-            st = os.stat(self.localfn)
2001-        except OSError:
2002-            pass
2003-        else:
2004-            if st.st_size == self.st_size:     # fully cached
2005-                return open(self.localfn, 'rb')
2006-            os.unlink(self.localfn)
2007-        lpath = py.path.local(self.partial())
2008-        lpath.ensure(file=1)
2009-        f = open(self.partial(), 'r+b')
2010-        return DumpFile(self, f)
2011-
2012-    def partial(self):
2013-        return self.localfn + '.partial~'
2014-
2015-    def complete(self):
2016-        try:
2017-            os.rename(self.partial(), self.localfn)
2018-        except OSError:
2019-            pass
2020-
2021-
2022-class DumpFile:
2023-
2024-    def __init__(self, cf, f):
2025-        self.cf = cf
2026-        self.f = f
2027-        self.pos = 0
2028-
2029-    def seek(self, npos):
2030-        self.pos = npos
2031-
2032-    def read(self, count):
2033-        control = self.cf.control
2034-        self.f.seek(self.pos)
2035-        buffer = self.f.read(count)
2036-        self.pos += len(buffer)
2037-        count -= len(buffer)
2038-
2039-        self.f.seek(0, 2)
2040-        curend = self.f.tell()
2041-
2042-        if count > 0:
2043-
2044-            while self.pos > curend:
2045-                curend &= -BLOCKSIZE
2046-                data = control.peek_for_block(self.cf.remotefn, curend)
2047-                if data is None:
2048-                    break
2049-                self.f.seek(curend)
2050-                self.f.write(data)
2051-                curend += len(data)
2052-                if len(data) < BLOCKSIZE:
2053-                    break
2054-
2055-            start = max(self.pos, curend) & (-BLOCKSIZE)
2056-            end = (self.pos + count + BLOCKSIZE-1) & (-BLOCKSIZE)
2057-            poslist = range(start, end, BLOCKSIZE)
2058-
2059-            if self.pos <= curend:
2060-                control.start_streaming(self.cf.remotefn, start)
2061-                self.f.seek(start)
2062-                for p in poslist:
2063-                    data = control.wait_for_block(self.cf.remotefn, p)
2064-                    assert self.f.tell() == p
2065-                    self.f.write(data)
2066-                    if len(data) < BLOCKSIZE:
2067-                        break
2068-
2069-                curend = self.f.tell()
2070-                while curend < self.cf.st_size:
2071-                    curend &= -BLOCKSIZE
2072-                    data = control.cached_block(self.cf.remotefn, curend)
2073-                    if data is None:
2074-                        break
2075-                    assert self.f.tell() == curend
2076-                    self.f.write(data)
2077-                    curend += len(data)
2078-                else:
2079-                    self.cf.complete()
2080-                    control.clear_cache(self.cf.remotefn)
2081-
2082-                self.f.seek(self.pos)
2083-                buffer += self.f.read(count)
2084-
2085-            else:
2086-                control.read_blocks(self.cf.remotefn, poslist)
2087-                result = []
2088-                for p in poslist:
2089-                    data = control.wait_for_block(self.cf.remotefn, p)
2090-                    result.append(data)
2091-                    if len(data) < BLOCKSIZE:
2092-                        break
2093-                data = ''.join(result)
2094-                buffer += data[self.pos-start:self.pos-start+count]
2095-
2096-        else:
2097-            if self.pos + 60000 > curend:
2098-                curend &= -BLOCKSIZE
2099-                control.start_streaming(self.cf.remotefn, curend)
2100-
2101-        return buffer
2102rmfile ./contrib/fuse/impl_b/pyfuse/cachefs.py
2103hunk ./contrib/fuse/impl_b/pyfuse/greenhandler.py 1
2104-import sys, os, Queue, atexit
2105-
2106-dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
2107-dir = os.path.join(dir, 'pypeers')
2108-if dir not in sys.path:
2109-    sys.path.append(dir)
2110-del dir
2111-
2112-from greensock import *
2113-import threadchannel
2114-
2115-
2116-def _read_from_kernel(handler):
2117-    while True:
2118-        msg = read(handler.fd, handler.MAX_READ)
2119-        if not msg:
2120-            print >> sys.stderr, "out-kernel connexion closed"
2121-            break
2122-        autogreenlet(handler.handle_message, msg)
2123-
2124-def add_handler(handler):
2125-    autogreenlet(_read_from_kernel, handler)
2126-    atexit.register(handler.close)
2127-
2128-# ____________________________________________________________
2129-
2130-THREAD_QUEUE = None
2131-
2132-def thread_runner(n):
2133-    while True:
2134-        #print 'thread runner %d waiting' % n
2135-        operation, answer = THREAD_QUEUE.get()
2136-        #print 'thread_runner %d: %r' % (n, operation)
2137-        try:
2138-            res = True, operation()
2139-        except Exception:
2140-            res = False, sys.exc_info()
2141-        #print 'thread_runner %d: got %d bytes' % (n, len(res or ''))
2142-        answer.send(res)
2143-
2144-
2145-def start_bkgnd_thread():
2146-    global THREAD_QUEUE, THREAD_LOCK
2147-    import thread
2148-    threadchannel.startup()
2149-    THREAD_LOCK = thread.allocate_lock()
2150-    THREAD_QUEUE = Queue.Queue()
2151-    for i in range(4):
2152-        thread.start_new_thread(thread_runner, (i,))
2153-
2154-def wget(*args, **kwds):
2155-    from wget import wget
2156-
2157-    def operation():
2158-        kwds['unlock'] = THREAD_LOCK
2159-        THREAD_LOCK.acquire()
2160-        try:
2161-            return wget(*args, **kwds)
2162-        finally:
2163-            THREAD_LOCK.release()
2164-
2165-    if THREAD_QUEUE is None:
2166-        start_bkgnd_thread()
2167-    answer = threadchannel.ThreadChannel()
2168-    THREAD_QUEUE.put((operation, answer))
2169-    ok, res = answer.receive()
2170-    if not ok:
2171-        typ, value, tb = res
2172-        raise typ, value, tb
2173-    #print 'wget returns %d bytes' % (len(res or ''),)
2174-    return res
2175rmfile ./contrib/fuse/impl_b/pyfuse/greenhandler.py
2176hunk ./contrib/fuse/impl_b/pyfuse/handler.py 1
2177-from kernel import *
2178-import os, errno, sys
2179-
2180-def fuse_mount(mountpoint, opts=None):
2181-    if not isinstance(mountpoint, str):
2182-        raise TypeError
2183-    if opts is not None and not isinstance(opts, str):
2184-        raise TypeError
2185-    import dl
2186-    try:
2187-        fuse = dl.open('libfuse.so')
2188-    except dl.error:
2189-        fuse = dl.open('libfuse.so.2')
2190-    if fuse.sym('fuse_mount_compat22'):
2191-        fnname = 'fuse_mount_compat22'
2192-    else:
2193-        fnname = 'fuse_mount'     # older versions of libfuse.so
2194-    return fuse.call(fnname, mountpoint, opts)
2195-
2196-class Handler(object):
2197-    __system = os.system
2198-    mountpoint = fd = None
2199-    __in_header_size  = fuse_in_header.calcsize()
2200-    __out_header_size = fuse_out_header.calcsize()
2201-    MAX_READ = FUSE_MAX_IN
2202-
2203-    def __init__(self, mountpoint, filesystem, logfile='STDERR', **opts1):
2204-        opts = getattr(filesystem, 'MOUNT_OPTIONS', {}).copy()
2205-        opts.update(opts1)
2206-        if opts:
2207-            opts = opts.items()
2208-            opts.sort()
2209-            opts = ' '.join(['%s=%s' % item for item in opts])
2210-        else:
2211-            opts = None
2212-        fd = fuse_mount(mountpoint, opts)
2213-        if fd < 0:
2214-            raise IOError("mount failed")
2215-        self.fd = fd
2216-        if logfile == 'STDERR':
2217-            logfile = sys.stderr
2218-        self.logfile = logfile
2219-        if self.logfile:
2220-            print >> self.logfile, '* mounted at', mountpoint
2221-        self.mountpoint = mountpoint
2222-        self.filesystem = filesystem
2223-        self.handles = {}
2224-        self.nexth = 1
2225-
2226-    def __del__(self):
2227-        if self.fd is not None:
2228-            os.close(self.fd)
2229-            self.fd = None
2230-        if self.mountpoint:
2231-            cmd = "fusermount -u '%s'" % self.mountpoint.replace("'", r"'\''")
2232-            self.mountpoint = None
2233-            if self.logfile:
2234-                print >> self.logfile, '*', cmd
2235-            self.__system(cmd)
2236-
2237-    close = __del__
2238-
2239-    def loop_forever(self):
2240-        while True:
2241-            try:
2242-                msg = os.read(self.fd, FUSE_MAX_IN)
2243-            except OSError, ose:
2244-                if ose.errno == errno.ENODEV:
2245-                    # on hardy, at least, this is what happens upon fusermount -u
2246-                    #raise EOFError("out-kernel connection closed")
2247-                    return
2248-            if not msg:
2249-                #raise EOFError("out-kernel connection closed")
2250-                return
2251-            self.handle_message(msg)
2252-
2253-    def handle_message(self, msg):
2254-        headersize = self.__in_header_size
2255-        req = fuse_in_header(msg[:headersize])
2256-        assert req.len == len(msg)
2257-        name = req.opcode
2258-        try:
2259-            try:
2260-                name = fuse_opcode2name[req.opcode]
2261-                meth = getattr(self, name)
2262-            except (IndexError, AttributeError):
2263-                raise NotImplementedError
2264-            #if self.logfile:
2265-            #    print >> self.logfile, '%s(%d)' % (name, req.nodeid)
2266-            reply = meth(req, msg[headersize:])
2267-            #if self.logfile:
2268-            #    print >> self.logfile, '   >>', repr(reply)
2269-        except NotImplementedError:
2270-            if self.logfile:
2271-                print >> self.logfile, '%s: not implemented' % (name,)
2272-            self.send_reply(req, err=errno.ENOSYS)
2273-        except EnvironmentError, e:
2274-            if self.logfile:
2275-                print >> self.logfile, '%s: %s' % (name, e)
2276-            self.send_reply(req, err = e.errno or errno.ESTALE)
2277-        except NoReply:
2278-            pass
2279-        else:
2280-            self.send_reply(req, reply)
2281-
2282-    def send_reply(self, req, reply=None, err=0):
2283-        assert 0 <= err < 1000
2284-        if reply is None:
2285-            reply = ''
2286-        elif not isinstance(reply, str):
2287-            reply = reply.pack()
2288-        f = fuse_out_header(unique = req.unique,
2289-                            error  = -err,
2290-                            len    = self.__out_header_size + len(reply))
2291-        data = f.pack() + reply
2292-        while data:
2293-            count = os.write(self.fd, data)
2294-            if not count:
2295-                raise EOFError("in-kernel connection closed")
2296-            data = data[count:]
2297-
2298-    def notsupp_or_ro(self):
2299-        if hasattr(self.filesystem, "modified"):
2300-            raise IOError(errno.ENOSYS, "not supported")
2301-        else:
2302-            raise IOError(errno.EROFS, "read-only file system")
2303-
2304-    # ____________________________________________________________
2305-
2306-    def FUSE_INIT(self, req, msg):
2307-        msg = fuse_init_in_out(msg[:8])
2308-        if self.logfile:
2309-            print >> self.logfile, 'INIT: %d.%d' % (msg.major, msg.minor)
2310-        return fuse_init_in_out(major = FUSE_KERNEL_VERSION,
2311-                                minor = FUSE_KERNEL_MINOR_VERSION)
2312-
2313-    def FUSE_GETATTR(self, req, msg):
2314-        node = self.filesystem.getnode(req.nodeid)
2315-        attr, valid = self.filesystem.getattr(node)
2316-        return fuse_attr_out(attr_valid = valid,
2317-                             attr = attr)
2318-
2319-    def FUSE_SETATTR(self, req, msg):
2320-        if not hasattr(self.filesystem, 'setattr'):
2321-            self.notsupp_or_ro()
2322-        msg = fuse_setattr_in(msg)
2323-        if msg.valid & FATTR_MODE:  mode = msg.attr.mode & 0777
2324-        else:                       mode = None
2325-        if msg.valid & FATTR_UID:   uid = msg.attr.uid
2326-        else:                       uid = None
2327-        if msg.valid & FATTR_GID:   gid = msg.attr.gid
2328-        else:                       gid = None
2329-        if msg.valid & FATTR_SIZE:  size = msg.attr.size
2330-        else:                       size = None
2331-        if msg.valid & FATTR_ATIME: atime = msg.attr.atime
2332-        else:                       atime = None
2333-        if msg.valid & FATTR_MTIME: mtime = msg.attr.mtime
2334-        else:                       mtime = None
2335-        node = self.filesystem.getnode(req.nodeid)
2336-        self.filesystem.setattr(node, mode, uid, gid,
2337-                                size, atime, mtime)
2338-        attr, valid = self.filesystem.getattr(node)
2339-        return fuse_attr_out(attr_valid = valid,
2340-                             attr = attr)
2341-
2342-    def FUSE_RELEASE(self, req, msg):
2343-        msg = fuse_release_in(msg, truncate=True)
2344-        try:
2345-            del self.handles[msg.fh]
2346-        except KeyError:
2347-            raise IOError(errno.EBADF, msg.fh)
2348-    FUSE_RELEASEDIR = FUSE_RELEASE
2349-
2350-    def FUSE_OPENDIR(self, req, msg):
2351-        #msg = fuse_open_in(msg)
2352-        node = self.filesystem.getnode(req.nodeid)
2353-        attr, valid = self.filesystem.getattr(node)
2354-        if mode2type(attr.mode) != TYPE_DIR:
2355-            raise IOError(errno.ENOTDIR, node)
2356-        fh = self.nexth
2357-        self.nexth += 1
2358-        self.handles[fh] = True, '', node
2359-        return fuse_open_out(fh = fh)
2360-
2361-    def FUSE_READDIR(self, req, msg):
2362-        msg = fuse_read_in(msg)
2363-        try:
2364-            isdir, data, node = self.handles[msg.fh]
2365-            if not isdir:
2366-                raise KeyError    # not a dir handle
2367-        except KeyError:
2368-            raise IOError(errno.EBADF, msg.fh)
2369-        if msg.offset == 0:
2370-            # start or rewind
2371-            d_entries = []
2372-            off = 0
2373-            for name, type in self.filesystem.listdir(node):
2374-                off += fuse_dirent.calcsize(len(name))
2375-                d_entry = fuse_dirent(ino  = INVALID_INO,
2376-                                      off  = off,
2377-                                      type = type,
2378-                                      name = name)
2379-                d_entries.append(d_entry)
2380-            data = ''.join([d.pack() for d in d_entries])
2381-            self.handles[msg.fh] = True, data, node
2382-        return data[msg.offset:msg.offset+msg.size]
2383-
2384-    def replyentry(self, (subnodeid, valid1)):
2385-        subnode = self.filesystem.getnode(subnodeid)
2386-        attr, valid2 = self.filesystem.getattr(subnode)
2387-        return fuse_entry_out(nodeid = subnodeid,
2388-                              entry_valid = valid1,
2389-                              attr_valid = valid2,
2390-                              attr = attr)
2391-
2392-    def FUSE_LOOKUP(self, req, msg):
2393-        filename = c2pystr(msg)
2394-        dirnode = self.filesystem.getnode(req.nodeid)
2395-        return self.replyentry(self.filesystem.lookup(dirnode, filename))
2396-
2397-    def FUSE_OPEN(self, req, msg, mask=os.O_RDONLY|os.O_WRONLY|os.O_RDWR):
2398-        msg = fuse_open_in(msg)
2399-        node = self.filesystem.getnode(req.nodeid)
2400-        attr, valid = self.filesystem.getattr(node)
2401-        if mode2type(attr.mode) != TYPE_REG:
2402-            raise IOError(errno.EPERM, node)
2403-        f = self.filesystem.open(node, msg.flags & mask)
2404-        if isinstance(f, tuple):
2405-            f, open_flags = f
2406-        else:
2407-            open_flags = 0
2408-        fh = self.nexth
2409-        self.nexth += 1
2410-        self.handles[fh] = False, f, node
2411-        return fuse_open_out(fh = fh, open_flags = open_flags)
2412-
2413-    def FUSE_READ(self, req, msg):
2414-        msg = fuse_read_in(msg)
2415-        try:
2416-            isdir, f, node = self.handles[msg.fh]
2417-            if isdir:
2418-                raise KeyError
2419-        except KeyError:
2420-            raise IOError(errno.EBADF, msg.fh)
2421-        f.seek(msg.offset)
2422-        return f.read(msg.size)
2423-
2424-    def FUSE_WRITE(self, req, msg):
2425-        if not hasattr(self.filesystem, 'modified'):
2426-            raise IOError(errno.EROFS, "read-only file system")
2427-        msg, data = fuse_write_in.from_head(msg)
2428-        try:
2429-            isdir, f, node = self.handles[msg.fh]
2430-            if isdir:
2431-                raise KeyError
2432-        except KeyError:
2433-            raise IOError(errno.EBADF, msg.fh)
2434-        f.seek(msg.offset)
2435-        f.write(data)
2436-        self.filesystem.modified(node)
2437-        return fuse_write_out(size = len(data))
2438-
2439-    def FUSE_MKNOD(self, req, msg):
2440-        if not hasattr(self.filesystem, 'mknod'):
2441-            self.notsupp_or_ro()
2442-        msg, filename = fuse_mknod_in.from_param(msg)
2443-        node = self.filesystem.getnode(req.nodeid)
2444-        return self.replyentry(self.filesystem.mknod(node, filename, msg.mode))
2445-
2446-    def FUSE_MKDIR(self, req, msg):
2447-        if not hasattr(self.filesystem, 'mkdir'):
2448-            self.notsupp_or_ro()
2449-        msg, filename = fuse_mkdir_in.from_param(msg)
2450-        node = self.filesystem.getnode(req.nodeid)
2451-        return self.replyentry(self.filesystem.mkdir(node, filename, msg.mode))
2452-
2453-    def FUSE_SYMLINK(self, req, msg):
2454-        if not hasattr(self.filesystem, 'symlink'):
2455-            self.notsupp_or_ro()
2456-        linkname, target = c2pystr2(msg)
2457-        node = self.filesystem.getnode(req.nodeid)
2458-        return self.replyentry(self.filesystem.symlink(node, linkname, target))
2459-
2460-    #def FUSE_LINK(self, req, msg):
2461-    #    ...
2462-
2463-    def FUSE_UNLINK(self, req, msg):
2464-        if not hasattr(self.filesystem, 'unlink'):
2465-            self.notsupp_or_ro()
2466-        filename = c2pystr(msg)
2467-        node = self.filesystem.getnode(req.nodeid)
2468-        self.filesystem.unlink(node, filename)
2469-
2470-    def FUSE_RMDIR(self, req, msg):
2471-        if not hasattr(self.filesystem, 'rmdir'):
2472-            self.notsupp_or_ro()
2473-        dirname = c2pystr(msg)
2474-        node = self.filesystem.getnode(req.nodeid)
2475-        self.filesystem.rmdir(node, dirname)
2476-
2477-    def FUSE_FORGET(self, req, msg):
2478-        if hasattr(self.filesystem, 'forget'):
2479-            self.filesystem.forget(req.nodeid)
2480-        raise NoReply
2481-
2482-    def FUSE_READLINK(self, req, msg):
2483-        if not hasattr(self.filesystem, 'readlink'):
2484-            raise IOError(errno.ENOSYS, "readlink not supported")
2485-        node = self.filesystem.getnode(req.nodeid)
2486-        target = self.filesystem.readlink(node)
2487-        return target
2488-
2489-    def FUSE_RENAME(self, req, msg):
2490-        if not hasattr(self.filesystem, 'rename'):
2491-            self.notsupp_or_ro()
2492-        msg, oldname, newname = fuse_rename_in.from_param2(msg)
2493-        oldnode = self.filesystem.getnode(req.nodeid)
2494-        newnode = self.filesystem.getnode(msg.newdir)
2495-        self.filesystem.rename(oldnode, oldname, newnode, newname)
2496-
2497-    def getxattrs(self, nodeid):
2498-        if not hasattr(self.filesystem, 'getxattrs'):
2499-            raise IOError(errno.ENOSYS, "xattrs not supported")
2500-        node = self.filesystem.getnode(nodeid)
2501-        return self.filesystem.getxattrs(node)
2502-
2503-    def FUSE_LISTXATTR(self, req, msg):
2504-        names = self.getxattrs(req.nodeid).keys()
2505-        names = ['user.' + name for name in names]
2506-        totalsize = 0
2507-        for name in names:
2508-            totalsize += len(name)+1
2509-        msg = fuse_getxattr_in(msg)
2510-        if msg.size > 0:
2511-            if msg.size < totalsize:
2512-                raise IOError(errno.ERANGE, "buffer too small")
2513-            names.append('')
2514-            return '\x00'.join(names)
2515-        else:
2516-            return fuse_getxattr_out(size=totalsize)
2517-
2518-    def FUSE_GETXATTR(self, req, msg):
2519-        xattrs = self.getxattrs(req.nodeid)
2520-        msg, name = fuse_getxattr_in.from_param(msg)
2521-        if not name.startswith('user.'):    # ENODATA == ENOATTR
2522-            raise IOError(errno.ENODATA, "only supports 'user.' xattrs, "
2523-                                         "got %r" % (name,))
2524-        name = name[5:]
2525-        try:
2526-            value = xattrs[name]
2527-        except KeyError:
2528-            raise IOError(errno.ENODATA, "no such xattr")    # == ENOATTR
2529-        value = str(value)
2530-        if msg.size > 0:
2531-            if msg.size < len(value):
2532-                raise IOError(errno.ERANGE, "buffer too small")
2533-            return value
2534-        else:
2535-            return fuse_getxattr_out(size=len(value))
2536-
2537-    def FUSE_SETXATTR(self, req, msg):
2538-        xattrs = self.getxattrs(req.nodeid)
2539-        msg, name, value = fuse_setxattr_in.from_param_head(msg)
2540-        assert len(value) == msg.size
2541-        # XXX msg.flags ignored
2542-        if not name.startswith('user.'):    # ENODATA == ENOATTR
2543-            raise IOError(errno.ENODATA, "only supports 'user.' xattrs")
2544-        name = name[5:]
2545-        try:
2546-            xattrs[name] = value
2547-        except KeyError:
2548-            raise IOError(errno.ENODATA, "cannot set xattr")    # == ENOATTR
2549-
2550-    def FUSE_REMOVEXATTR(self, req, msg):
2551-        xattrs = self.getxattrs(req.nodeid)
2552-        name = c2pystr(msg)
2553-        if not name.startswith('user.'):    # ENODATA == ENOATTR
2554-            raise IOError(errno.ENODATA, "only supports 'user.' xattrs")
2555-        name = name[5:]
2556-        try:
2557-            del xattrs[name]
2558-        except KeyError:
2559-            raise IOError(errno.ENODATA, "cannot delete xattr")   # == ENOATTR
2560-
2561-
2562-class NoReply(Exception):
2563-    pass
2564rmfile ./contrib/fuse/impl_b/pyfuse/handler.py
2565hunk ./contrib/fuse/impl_b/pyfuse/httpfs.py 1
2566-import os, re, urlparse
2567-from handler import Handler
2568-from objectfs import ObjectFs
2569-
2570-
2571-class Root:
2572-    def __init__(self):
2573-        self.entries = {'gg': GoogleRoot()}
2574-    def listdir(self):
2575-        return self.entries.keys()
2576-    def join(self, hostname):
2577-        if hostname in self.entries:
2578-            return self.entries[hostname]
2579-        if '.' not in hostname:
2580-            raise KeyError
2581-        result = HtmlNode('http://%s/' % (hostname,))
2582-        self.entries[hostname] = result
2583-        return result
2584-
2585-
2586-class UrlNode:
2587-    data = None
2588-
2589-    def __init__(self, url):
2590-        self.url = url
2591-
2592-    def getdata(self):
2593-        if self.data is None:
2594-            print self.url
2595-            g = os.popen("lynx -source %r" % (self.url,), 'r')
2596-            self.data = g.read()
2597-            g.close()
2598-        return self.data
2599-
2600-
2601-class HtmlNode(UrlNode):
2602-    r_links  = re.compile(r'<a\s[^>]*href="([^"]+)"[^>]*>(.*?)</a>',
2603-                          re.IGNORECASE | re.DOTALL)
2604-    r_images = re.compile(r'<img\s[^>]*src="([^"]+[.]jpg)"', re.IGNORECASE)
2605-
2606-    def format(self, text, index,
2607-               TRANSTBL = ''.join([(32<=c<127 and c!=ord('/'))
2608-                                   and chr(c) or '_'
2609-                                   for c in range(256)])):
2610-        return text.translate(TRANSTBL)
2611-
2612-    def listdir(self):
2613-        data = self.getdata()
2614-
2615-        seen = {}
2616-        def uniquename(name):
2617-            name = self.format(name, len(seen))
2618-            if name == '' or name.startswith('.'):
2619-                name = '_' + name
2620-            basename = name
2621-            i = 1
2622-            while name in seen:
2623-                i += 1
2624-                name = '%s_%d' % (basename, i)
2625-            seen[name] = True
2626-            return name
2627-
2628-        for link, text in self.r_links.findall(data):
2629-            url = urlparse.urljoin(self.url, link)
2630-            yield uniquename(text), HtmlNode(url)
2631-
2632-        for link in self.r_images.findall(data):
2633-            text = os.path.basename(link)
2634-            url = urlparse.urljoin(self.url, link)
2635-            yield uniquename(text), RawNode(url)
2636-
2637-        yield '.source', RawNode(self.url)
2638-
2639-
2640-class RawNode(UrlNode):
2641-
2642-    def read(self):
2643-        return self.getdata()
2644-
2645-    def size(self):
2646-        if self.data:
2647-            return len(self.data)
2648-        else:
2649-            return None
2650-
2651-
2652-class GoogleRoot:
2653-    def join(self, query):
2654-        return GoogleSearch(query)
2655-
2656-class GoogleSearch(HtmlNode):
2657-    r_links  = re.compile(r'<a\sclass=l\s[^>]*href="([^"]+)"[^>]*>(.*?)</a>',
2658-                          re.IGNORECASE | re.DOTALL)
2659-
2660-    def __init__(self, query):
2661-        self.url = 'http://www.google.com/search?q=' + query
2662-
2663-    def format(self, text, index):
2664-        text = text.replace('<b>', '').replace('</b>', '')
2665-        text = HtmlNode.format(self, text, index)
2666-        return '%d. %s' % (index, text)
2667-
2668-
2669-if __name__ == '__main__':
2670-    root = Root()
2671-    handler = Handler('/home/arigo/mnt', ObjectFs(root))
2672-    handler.loop_forever()
2673rmfile ./contrib/fuse/impl_b/pyfuse/httpfs.py
2674hunk ./contrib/fuse/impl_b/pyfuse/kernel.py 1
2675-from struct import pack, unpack, calcsize
2676-import stat
2677-
2678-class Struct(object):
2679-    __slots__ = []
2680-
2681-    def __init__(self, data=None, truncate=False, **fields):
2682-        if data is not None:
2683-            if truncate:
2684-                data = data[:self.calcsize()]
2685-            self.unpack(data)
2686-        for key, value in fields.items():
2687-            setattr(self, key, value)
2688-
2689-    def unpack(self, data):
2690-        data = unpack(self.__types__, data)
2691-        for key, value in zip(self.__slots__, data):
2692-            setattr(self, key, value)
2693-
2694-    def pack(self):
2695-        return pack(self.__types__, *[getattr(self, k, 0)
2696-                                      for k in self.__slots__])
2697-
2698-    def calcsize(cls):
2699-        return calcsize(cls.__types__)
2700-    calcsize = classmethod(calcsize)
2701-
2702-    def __repr__(self):
2703-        result = ['%s=%r' % (name, getattr(self, name, None))
2704-                  for name in self.__slots__]
2705-        return '<%s %s>' % (self.__class__.__name__, ', '.join(result))
2706-
2707-    def from_param(cls, msg):
2708-        limit = cls.calcsize()
2709-        zero = msg.find('\x00', limit)
2710-        assert zero >= 0
2711-        return cls(msg[:limit]), msg[limit:zero]
2712-    from_param = classmethod(from_param)
2713-
2714-    def from_param2(cls, msg):
2715-        limit = cls.calcsize()
2716-        zero1 = msg.find('\x00', limit)
2717-        assert zero1 >= 0
2718-        zero2 = msg.find('\x00', zero1+1)
2719-        assert zero2 >= 0
2720-        return cls(msg[:limit]), msg[limit:zero1], msg[zero1+1:zero2]
2721-    from_param2 = classmethod(from_param2)
2722-
2723-    def from_head(cls, msg):
2724-        limit = cls.calcsize()
2725-        return cls(msg[:limit]), msg[limit:]
2726-    from_head = classmethod(from_head)
2727-
2728-    def from_param_head(cls, msg):
2729-        limit = cls.calcsize()
2730-        zero = msg.find('\x00', limit)
2731-        assert zero >= 0
2732-        return cls(msg[:limit]), msg[limit:zero], msg[zero+1:]
2733-    from_param_head = classmethod(from_param_head)
2734-
2735-class StructWithAttr(Struct):
2736-
2737-    def unpack(self, data):
2738-        limit = -fuse_attr.calcsize()
2739-        super(StructWithAttr, self).unpack(data[:limit])
2740-        self.attr = fuse_attr(data[limit:])
2741-
2742-    def pack(self):
2743-        return super(StructWithAttr, self).pack() + self.attr.pack()
2744-
2745-    def calcsize(cls):
2746-        return super(StructWithAttr, cls).calcsize() + fuse_attr.calcsize()
2747-    calcsize = classmethod(calcsize)
2748-
2749-
2750-def _mkstruct(name, c, base=Struct):
2751-    typ2code = {
2752-        '__u32': 'I',
2753-        '__s32': 'i',
2754-        '__u64': 'Q',
2755-        '__s64': 'q'}
2756-    slots = []
2757-    types = ['=']
2758-    for line in c.split('\n'):
2759-        line = line.strip()
2760-        if line:
2761-            line, tail = line.split(';', 1)
2762-            typ, nam = line.split()
2763-            slots.append(nam)
2764-            types.append(typ2code[typ])
2765-    cls = type(name, (base,), {'__slots__': slots,
2766-                                 '__types__': ''.join(types)})
2767-    globals()[name] = cls
2768-
2769-class timeval(object):
2770-
2771-    def __init__(self, attr1, attr2):
2772-        self.sec = attr1
2773-        self.nsec = attr2
2774-
2775-    def __get__(self, obj, typ=None):
2776-        if obj is None:
2777-            return self
2778-        else:
2779-            return (getattr(obj, self.sec) +
2780-                    getattr(obj, self.nsec) * 0.000000001)
2781-
2782-    def __set__(self, obj, val):
2783-        val = int(val * 1000000000)
2784-        sec, nsec = divmod(val, 1000000000)
2785-        setattr(obj, self.sec, sec)
2786-        setattr(obj, self.nsec, nsec)
2787-
2788-    def __delete__(self, obj):
2789-        delattr(obj, self.sec)
2790-        delattr(obj, self.nsec)
2791-
2792-def _mktimeval(cls, attr1, attr2):
2793-    assert attr1.startswith('_')
2794-    assert attr2.startswith('_')
2795-    tv = timeval(attr1, attr2)
2796-    setattr(cls, attr1[1:], tv)
2797-
2798-INVALID_INO = 0xFFFFFFFFFFFFFFFF
2799-
2800-def mode2type(mode):
2801-    return (mode & 0170000) >> 12
2802-
2803-TYPE_REG = mode2type(stat.S_IFREG)
2804-TYPE_DIR = mode2type(stat.S_IFDIR)
2805-TYPE_LNK = mode2type(stat.S_IFLNK)
2806-
2807-def c2pystr(s):
2808-    n = s.find('\x00')
2809-    assert n >= 0
2810-    return s[:n]
2811-
2812-def c2pystr2(s):
2813-    first = c2pystr(s)
2814-    second = c2pystr(s[len(first)+1:])
2815-    return first, second
2816-
2817-# ____________________________________________________________
2818-
2819-# Version number of this interface
2820-FUSE_KERNEL_VERSION = 7
2821-
2822-# Minor version number of this interface
2823-FUSE_KERNEL_MINOR_VERSION = 2
2824-
2825-# The node ID of the root inode
2826-FUSE_ROOT_ID = 1
2827-
2828-# The major number of the fuse character device
2829-FUSE_MAJOR = 10
2830-
2831-# The minor number of the fuse character device
2832-FUSE_MINOR = 229
2833-
2834-# Make sure all structures are padded to 64bit boundary, so 32bit
2835-# userspace works under 64bit kernels
2836-
2837-_mkstruct('fuse_attr', '''
2838-       __u64   ino;
2839-       __u64   size;
2840-       __u64   blocks;
2841-       __u64   _atime;
2842-       __u64   _mtime;
2843-       __u64   _ctime;
2844-       __u32   _atimensec;
2845-       __u32   _mtimensec;
2846-       __u32   _ctimensec;
2847-       __u32   mode;
2848-       __u32   nlink;
2849-       __u32   uid;
2850-       __u32   gid;
2851-       __u32   rdev;
2852-''')
2853-_mktimeval(fuse_attr, '_atime', '_atimensec')
2854-_mktimeval(fuse_attr, '_mtime', '_mtimensec')
2855-_mktimeval(fuse_attr, '_ctime', '_ctimensec')
2856-
2857-_mkstruct('fuse_kstatfs', '''
2858-       __u64   blocks;
2859-       __u64   bfree;
2860-       __u64   bavail;
2861-       __u64   files;
2862-       __u64   ffree;
2863-       __u32   bsize;
2864-       __u32   namelen;
2865-''')
2866-
2867-FATTR_MODE     = 1 << 0
2868-FATTR_UID      = 1 << 1
2869-FATTR_GID      = 1 << 2
2870-FATTR_SIZE     = 1 << 3
2871-FATTR_ATIME    = 1 << 4
2872-FATTR_MTIME    = 1 << 5
2873-
2874-#
2875-# Flags returned by the OPEN request
2876-#
2877-# FOPEN_DIRECT_IO: bypass page cache for this open file
2878-# FOPEN_KEEP_CACHE: don't invalidate the data cache on open
2879-#
2880-FOPEN_DIRECT_IO                = 1 << 0
2881-FOPEN_KEEP_CACHE       = 1 << 1
2882-
2883-fuse_opcode = {
2884-    'FUSE_LOOKUP'        : 1,
2885-    'FUSE_FORGET'        : 2,  # no reply
2886-    'FUSE_GETATTR'       : 3,
2887-    'FUSE_SETATTR'       : 4,
2888-    'FUSE_READLINK'      : 5,
2889-    'FUSE_SYMLINK'       : 6,
2890-    'FUSE_MKNOD'         : 8,
2891-    'FUSE_MKDIR'         : 9,
2892-    'FUSE_UNLINK'        : 10,
2893-    'FUSE_RMDIR'         : 11,
2894-    'FUSE_RENAME'        : 12,
2895-    'FUSE_LINK'          : 13,
2896-    'FUSE_OPEN'          : 14,
2897-    'FUSE_READ'          : 15,
2898-    'FUSE_WRITE'         : 16,
2899-    'FUSE_STATFS'        : 17,
2900-    'FUSE_RELEASE'       : 18,
2901-    'FUSE_FSYNC'         : 20,
2902-    'FUSE_SETXATTR'      : 21,
2903-    'FUSE_GETXATTR'      : 22,
2904-    'FUSE_LISTXATTR'     : 23,
2905-    'FUSE_REMOVEXATTR'   : 24,
2906-    'FUSE_FLUSH'         : 25,
2907-    'FUSE_INIT'          : 26,
2908-    'FUSE_OPENDIR'       : 27,
2909-    'FUSE_READDIR'       : 28,
2910-    'FUSE_RELEASEDIR'    : 29,
2911-    'FUSE_FSYNCDIR'      : 30,
2912-}
2913-
2914-fuse_opcode2name = []
2915-def setup():
2916-    for key, value in fuse_opcode.items():
2917-        fuse_opcode2name.extend([None] * (value+1 - len(fuse_opcode2name)))
2918-        fuse_opcode2name[value] = key
2919-setup()
2920-del setup
2921-
2922-# Conservative buffer size for the client
2923-FUSE_MAX_IN = 8192
2924-
2925-FUSE_NAME_MAX = 1024
2926-FUSE_SYMLINK_MAX = 4096
2927-FUSE_XATTR_SIZE_MAX = 4096
2928-
2929-_mkstruct('fuse_entry_out', """
2930-       __u64   nodeid;         /* Inode ID */
2931-       __u64   generation;     /* Inode generation: nodeid:gen must \
2932-                                  be unique for the fs's lifetime */
2933-       __u64   _entry_valid;   /* Cache timeout for the name */
2934-       __u64   _attr_valid;    /* Cache timeout for the attributes */
2935-       __u32   _entry_valid_nsec;
2936-       __u32   _attr_valid_nsec;
2937-""", base=StructWithAttr)
2938-_mktimeval(fuse_entry_out, '_entry_valid', '_entry_valid_nsec')
2939-_mktimeval(fuse_entry_out, '_attr_valid', '_attr_valid_nsec')
2940-
2941-_mkstruct('fuse_forget_in', '''
2942-       __u64   nlookup;
2943-''')
2944-
2945-_mkstruct('fuse_attr_out', '''
2946-       __u64   _attr_valid;    /* Cache timeout for the attributes */
2947-       __u32   _attr_valid_nsec;
2948-       __u32   dummy;
2949-''', base=StructWithAttr)
2950-_mktimeval(fuse_attr_out, '_attr_valid', '_attr_valid_nsec')
2951-
2952-_mkstruct('fuse_mknod_in', '''
2953-       __u32   mode;
2954-       __u32   rdev;
2955-''')
2956-
2957-_mkstruct('fuse_mkdir_in', '''
2958-       __u32   mode;
2959-       __u32   padding;
2960-''')
2961-
2962-_mkstruct('fuse_rename_in', '''
2963-       __u64   newdir;
2964-''')
2965-
2966-_mkstruct('fuse_link_in', '''
2967-       __u64   oldnodeid;
2968-''')
2969-
2970-_mkstruct('fuse_setattr_in', '''
2971-       __u32   valid;
2972-       __u32   padding;
2973-''', base=StructWithAttr)
2974-
2975-_mkstruct('fuse_open_in', '''
2976-       __u32   flags;
2977-       __u32   padding;
2978-''')
2979-
2980-_mkstruct('fuse_open_out', '''
2981-       __u64   fh;
2982-       __u32   open_flags;
2983-       __u32   padding;
2984-''')
2985-
2986-_mkstruct('fuse_release_in', '''
2987-       __u64   fh;
2988-       __u32   flags;
2989-       __u32   padding;
2990-''')
2991-
2992-_mkstruct('fuse_flush_in', '''
2993-       __u64   fh;
2994-       __u32   flush_flags;
2995-       __u32   padding;
2996-''')
2997-
2998-_mkstruct('fuse_read_in', '''
2999-       __u64   fh;
3000-       __u64   offset;
3001-       __u32   size;
3002-       __u32   padding;
3003-''')
3004-
3005-_mkstruct('fuse_write_in', '''
3006-       __u64   fh;
3007-       __u64   offset;
3008-       __u32   size;
3009-       __u32   write_flags;
3010-''')
3011-
3012-_mkstruct('fuse_write_out', '''
3013-       __u32   size;
3014-       __u32   padding;
3015-''')
3016-
3017-fuse_statfs_out = fuse_kstatfs
3018-
3019-_mkstruct('fuse_fsync_in', '''
3020-       __u64   fh;
3021-       __u32   fsync_flags;
3022-       __u32   padding;
3023-''')
3024-
3025-_mkstruct('fuse_setxattr_in', '''
3026-       __u32   size;
3027-       __u32   flags;
3028-''')
3029-
3030-_mkstruct('fuse_getxattr_in', '''
3031-       __u32   size;
3032-       __u32   padding;
3033-''')
3034-
3035-_mkstruct('fuse_getxattr_out', '''
3036-       __u32   size;
3037-       __u32   padding;
3038-''')
3039-
3040-_mkstruct('fuse_init_in_out', '''
3041-       __u32   major;
3042-       __u32   minor;
3043-''')
3044-
3045-_mkstruct('fuse_in_header', '''
3046-       __u32   len;
3047-       __u32   opcode;
3048-       __u64   unique;
3049-       __u64   nodeid;
3050-       __u32   uid;
3051-       __u32   gid;
3052-       __u32   pid;
3053-       __u32   padding;
3054-''')
3055-
3056-_mkstruct('fuse_out_header', '''
3057-       __u32   len;
3058-       __s32   error;
3059-       __u64   unique;
3060-''')
3061-
3062-class fuse_dirent(Struct):
3063-    __slots__ = ['ino', 'off', 'type', 'name']
3064-
3065-    def unpack(self, data):
3066-        self.ino, self.off, namelen, self.type = struct.unpack('QQII',
3067-                                                               data[:24])
3068-        self.name = data[24:24+namelen]
3069-        assert len(self.name) == namelen
3070-
3071-    def pack(self):
3072-        namelen = len(self.name)
3073-        return pack('QQII%ds' % ((namelen+7)&~7,),
3074-                    self.ino, getattr(self, 'off', 0), namelen,
3075-                    self.type, self.name)
3076-
3077-    def calcsize(cls, namelen):
3078-        return 24 + ((namelen+7)&~7)
3079-    calcsize = classmethod(calcsize)
3080rmfile ./contrib/fuse/impl_b/pyfuse/kernel.py
3081hunk ./contrib/fuse/impl_b/pyfuse/memoryfs.py 1
3082-from kernel import *
3083-from handler import Handler
3084-import stat, time, os, weakref, errno
3085-from cStringIO import StringIO
3086-
3087-
3088-class MemoryFS(object):
3089-    INFINITE = 86400.0
3090-
3091-
3092-    class Dir(object):
3093-        type = TYPE_DIR
3094-        def __init__(self, attr):
3095-            self.attr = attr
3096-            self.contents = {}    # { 'filename': Dir()/File()/SymLink() }
3097-
3098-    class File(object):
3099-        type = TYPE_REG
3100-        def __init__(self, attr):
3101-            self.attr = attr
3102-            self.data = StringIO()
3103-
3104-    class SymLink(object):
3105-        type = TYPE_LNK
3106-        def __init__(self, attr, target):
3107-            self.attr = attr
3108-            self.target = target
3109-
3110-
3111-    def __init__(self, root=None):
3112-        self.uid = os.getuid()
3113-        self.gid = os.getgid()
3114-        self.umask = os.umask(0); os.umask(self.umask)
3115-        self.root = root or self.Dir(self.newattr(stat.S_IFDIR))
3116-        self.root.id = FUSE_ROOT_ID
3117-        self.nodes = weakref.WeakValueDictionary()
3118-        self.nodes[FUSE_ROOT_ID] = self.root
3119-        self.nextid = FUSE_ROOT_ID + 1
3120-
3121-    def newattr(self, s, ino=None, mode=0666):
3122-        now = time.time()
3123-        attr = fuse_attr(size  = 0,
3124-                         mode  = s | (mode & ~self.umask),
3125-                         nlink = 1,  # even on dirs! this confuses 'find' in
3126-                                     # a good way :-)
3127-                         atime = now,
3128-                         mtime = now,
3129-                         ctime = now,
3130-                         uid   = self.uid,
3131-                         gid   = self.gid)
3132-        if ino is None:
3133-            ino = id(attr)
3134-        if ino < 0:
3135-            ino = ~ino
3136-        attr.ino = ino
3137-        return attr
3138-
3139-    def getnode(self, id):
3140-        return self.nodes[id]
3141-
3142-    def modified(self, node):
3143-        node.attr.mtime = node.attr.atime = time.time()
3144-        if isinstance(node, self.File):
3145-            node.data.seek(0, 2)
3146-            node.attr.size = node.data.tell()
3147-
3148-    def getattr(self, node):
3149-        return node.attr, self.INFINITE
3150-
3151-    def setattr(self, node, mode, uid, gid, size, atime, mtime):
3152-        if mode is not None:
3153-            node.attr.mode = (node.attr.mode & ~0777) | (mode & 0777)
3154-        if uid is not None:
3155-            node.attr.uid = uid
3156-        if gid is not None:
3157-            node.attr.gid = gid
3158-        if size is not None:
3159-            assert isinstance(node, self.File)
3160-            node.data.seek(0, 2)
3161-            oldsize = node.data.tell()
3162-            if size < oldsize:
3163-                node.data.seek(size)
3164-                node.data.truncate()
3165-                self.modified(node)
3166-            elif size > oldsize:
3167-                node.data.write('\x00' * (size - oldsize))
3168-                self.modified(node)
3169-        if atime is not None:
3170-            node.attr.atime = atime
3171-        if mtime is not None:
3172-            node.attr.mtime = mtime
3173-
3174-    def listdir(self, node):
3175-        assert isinstance(node, self.Dir)
3176-        for name, subobj in node.contents.items():
3177-            yield name, subobj.type
3178-
3179-    def lookup(self, dirnode, filename):
3180-        try:
3181-            return dirnode.contents[filename].id, self.INFINITE
3182-        except KeyError:
3183-            raise IOError(errno.ENOENT, filename)
3184-
3185-    def open(self, filenode, flags):
3186-        return filenode.data
3187-
3188-    def newnodeid(self, newnode):
3189-        id = self.nextid
3190-        self.nextid += 1
3191-        newnode.id = id
3192-        self.nodes[id] = newnode
3193-        return id
3194-
3195-    def mknod(self, dirnode, filename, mode):
3196-        node = self.File(self.newattr(stat.S_IFREG, mode=mode))
3197-        dirnode.contents[filename] = node
3198-        return self.newnodeid(node), self.INFINITE
3199-
3200-    def mkdir(self, dirnode, subdirname, mode):
3201-        node = self.Dir(self.newattr(stat.S_IFDIR, mode=mode))
3202-        dirnode.contents[subdirname] = node
3203-        return self.newnodeid(node), self.INFINITE
3204-
3205-    def symlink(self, dirnode, linkname, target):
3206-        node = self.SymLink(self.newattr(stat.S_IFLNK), target)
3207-        dirnode.contents[linkname] = node
3208-        return self.newnodeid(node), self.INFINITE
3209-
3210-    def unlink(self, dirnode, filename):
3211-        del dirnode.contents[filename]
3212-
3213-    rmdir = unlink
3214-
3215-    def readlink(self, symlinknode):
3216-        return symlinknode.target
3217-
3218-    def rename(self, olddirnode, oldname, newdirnode, newname):
3219-        node = olddirnode.contents[oldname]
3220-        newdirnode.contents[newname] = node
3221-        del olddirnode.contents[oldname]
3222-
3223-    def getxattrs(self, node):
3224-        try:
3225-            return node.xattrs
3226-        except AttributeError:
3227-            node.xattrs = {}
3228-            return node.xattrs
3229-
3230-
3231-if __name__ == '__main__':
3232-    import sys
3233-    mountpoint = sys.argv[1]
3234-    memoryfs = MemoryFS()
3235-    handler = Handler(mountpoint, memoryfs)
3236-    handler.loop_forever()
3237rmfile ./contrib/fuse/impl_b/pyfuse/memoryfs.py
3238hunk ./contrib/fuse/impl_b/pyfuse/mirrorfs.py 1
3239-"""
3240-For reading and caching from slow file system (e.g. DVDs or network).
3241-
3242-    python mirrorfs.py <sourcedir> <cachedir> <mountpoint>
3243-
3244-Makes <mountpoint> show a read-only copy of the files in <sourcedir>,
3245-caching all data ever read in the <cachedir> to avoid reading it
3246-twice.  This script also features optimistic read-ahead: once a
3247-file is accessed, and as long as no other file is accessed, the
3248-whole file is read and cached as fast as the <sourcedir> will
3249-provide it.
3250-
3251-You have to clean up <cachedir> manually before mounting a modified
3252-or different <sourcedir>.
3253-"""
3254-import sys, os, posixpath, stat
3255-
3256-try:
3257-    __file__
3258-except NameError:
3259-    __file__ = sys.argv[0]
3260-this_dir = os.path.dirname(os.path.abspath(__file__))
3261-
3262-# ____________________________________________________________
3263-
3264-sys.path.append(os.path.dirname(this_dir))
3265-from blockfs import valuetree
3266-from handler import Handler
3267-import greenhandler, greensock
3268-from objectfs import ObjectFs
3269-
3270-BLOCKSIZE = 65536
3271-
3272-class MirrorFS(ObjectFs):
3273-    rawfd = None
3274-
3275-    def __init__(self, srcdir, cachedir):
3276-        self.srcdir = srcdir
3277-        self.cachedir = cachedir
3278-        self.table = valuetree.ValueTree(os.path.join(cachedir, 'table'), 'q')
3279-        if '' not in self.table:
3280-            self.initial_read_dir('')
3281-            self.table[''] = -1,
3282-        try:
3283-            self.rawfile = open(os.path.join(cachedir, 'raw'), 'r+b')
3284-        except IOError:
3285-            self.rawfile = open(os.path.join(cachedir, 'raw'), 'w+b')
3286-        ObjectFs.__init__(self, DirNode(self, ''))
3287-        self.readahead_at = None
3288-        greenhandler.autogreenlet(self.readahead)
3289-
3290-    def close(self):
3291-        self.table.close()
3292-
3293-    def readahead(self):
3294-        while True:
3295-            greensock.sleep(0.001)
3296-            while not self.readahead_at:
3297-                greensock.sleep(1)
3298-            path, blocknum = self.readahead_at
3299-            self.readahead_at = None
3300-            try:
3301-                self.readblock(path, blocknum, really=False)
3302-            except EOFError:
3303-                pass
3304-
3305-    def initial_read_dir(self, path):
3306-        print 'Reading initial directory structure...', path
3307-        dirname = os.path.join(self.srcdir, path)
3308-        for name in os.listdir(dirname):
3309-            filename = os.path.join(dirname, name)
3310-            st = os.stat(filename)
3311-            if stat.S_ISDIR(st.st_mode):
3312-                self.initial_read_dir(posixpath.join(path, name))
3313-                q = -1
3314-            else:
3315-                q = st.st_size
3316-            self.table[posixpath.join(path, name)] = q,
3317-
3318-    def __getitem__(self, key):
3319-        self.tablelock.acquire()
3320-        try:
3321-            return self.table[key]
3322-        finally:
3323-            self.tablelock.release()
3324-
3325-    def readblock(self, path, blocknum, really=True):
3326-        s = '%s/%d' % (path, blocknum)
3327-        try:
3328-            q, = self.table[s]
3329-        except KeyError:
3330-            print s
3331-            self.readahead_at = None
3332-            f = open(os.path.join(self.srcdir, path), 'rb')
3333-            f.seek(blocknum * BLOCKSIZE)
3334-            data = f.read(BLOCKSIZE)
3335-            f.close()
3336-            if not data:
3337-                q = -2
3338-            else:
3339-                data += '\x00' * (BLOCKSIZE - len(data))
3340-                self.rawfile.seek(0, 2)
3341-                q = self.rawfile.tell()
3342-                self.rawfile.write(data)
3343-            self.table[s] = q,
3344-            if q == -2:
3345-                raise EOFError
3346-        else:
3347-            if q == -2:
3348-                raise EOFError
3349-            if really:
3350-                self.rawfile.seek(q, 0)
3351-                data = self.rawfile.read(BLOCKSIZE)
3352-            else:
3353-                data = None
3354-        if self.readahead_at is None:
3355-            self.readahead_at = path, blocknum + 1
3356-        return data
3357-
3358-
3359-class Node(object):
3360-
3361-    def __init__(self, mfs, path):
3362-        self.mfs = mfs
3363-        self.path = path
3364-
3365-class DirNode(Node):
3366-
3367-    def join(self, name):
3368-        path = posixpath.join(self.path, name)
3369-        q, = self.mfs.table[path]
3370-        if q == -1:
3371-            return DirNode(self.mfs, path)
3372-        else:
3373-            return FileNode(self.mfs, path)
3374-
3375-    def listdir(self):
3376-        result = []
3377-        for key, value in self.mfs.table.iteritemsfrom(self.path):
3378-            if not key.startswith(self.path):
3379-                break
3380-            tail = key[len(self.path):].lstrip('/')
3381-            if tail and '/' not in tail:
3382-                result.append(tail)
3383-        return result
3384-
3385-class FileNode(Node):
3386-
3387-    def size(self):
3388-        q, = self.mfs.table[self.path]
3389-        return q
3390-
3391-    def read(self):
3392-        return FileStream(self.mfs, self.path)
3393-
3394-class FileStream(object):
3395-
3396-    def __init__(self, mfs, path):
3397-        self.mfs = mfs
3398-        self.path = path
3399-        self.pos = 0
3400-        self.size, = self.mfs.table[path]
3401-
3402-    def seek(self, p):
3403-        self.pos = p
3404-
3405-    def read(self, count):
3406-        result = []
3407-        end = min(self.pos + count, self.size)
3408-        while self.pos < end:
3409-            blocknum, offset = divmod(self.pos, BLOCKSIZE)
3410-            data = self.mfs.readblock(self.path, blocknum)
3411-            data = data[offset:]
3412-            data = data[:end - self.pos]
3413-            assert len(data) > 0
3414-            result.append(data)
3415-            self.pos += len(data)
3416-        return ''.join(result)
3417-
3418-# ____________________________________________________________
3419-
3420-if __name__ == '__main__':
3421-    import sys
3422-    srcdir, cachedir, mountpoint = sys.argv[1:]
3423-    mirrorfs = MirrorFS(srcdir, cachedir)
3424-    try:
3425-        handler = Handler(mountpoint, mirrorfs)
3426-        greenhandler.add_handler(handler)
3427-        greenhandler.mainloop()
3428-    finally:
3429-        mirrorfs.close()
3430rmfile ./contrib/fuse/impl_b/pyfuse/mirrorfs.py
3431hunk ./contrib/fuse/impl_b/pyfuse/objectfs.py 1
3432-from kernel import *
3433-import stat, errno, os, time
3434-from cStringIO import StringIO
3435-from OrderedDict import OrderedDict
3436-
3437-
3438-class ObjectFs:
3439-    """A simple read-only file system based on Python objects.
3440-
3441-    Interface of Directory objects:
3442-      * join(name)   returns a file or subdirectory object
3443-      * listdir()    returns a list of names, or a list of (name, object)
3444-
3445-    join() is optional if listdir() returns a list of (name, object).
3446-    Alternatively, Directory objects can be plain dictionaries {name: object}.
3447-
3448-    Interface of File objects:
3449-      * size()       returns the size
3450-      * read()       returns the data
3451-
3452-    Alternatively, File objects can be plain strings.
3453-
3454-    Interface of SymLink objects:
3455-      * readlink()   returns the symlink's target, as a string
3456-    """
3457-
3458-    INFINITE = 86400.0
3459-    USE_DIR_CACHE = True
3460-
3461-    def __init__(self, rootnode):
3462-        self.nodes = {FUSE_ROOT_ID: rootnode}
3463-        if self.USE_DIR_CACHE:
3464-            self.dircache = {}
3465-        self.starttime = time.time()
3466-        self.uid = os.getuid()
3467-        self.gid = os.getgid()
3468-        self.umask = os.umask(0); os.umask(self.umask)
3469-
3470-    def newattr(self, s, ino, mode=0666):
3471-        if ino < 0:
3472-            ino = ~ino
3473-        return fuse_attr(ino   = ino,
3474-                         size  = 0,
3475-                         mode  = s | (mode & ~self.umask),
3476-                         nlink = 1,  # even on dirs! this confuses 'find' in
3477-                                     # a good way :-)
3478-                         atime = self.starttime,
3479-                         mtime = self.starttime,
3480-                         ctime = self.starttime,
3481-                         uid   = self.uid,
3482-                         gid   = self.gid)
3483-
3484-    def getnode(self, nodeid):
3485-        try:
3486-            return self.nodes[nodeid]
3487-        except KeyError:
3488-            raise IOError(errno.ESTALE, nodeid)
3489-
3490-    def getattr(self, node):
3491-        timeout = self.INFINITE
3492-        if isinstance(node, str):
3493-            attr = self.newattr(stat.S_IFREG, id(node))
3494-            attr.size = len(node)
3495-        elif hasattr(node, 'readlink'):
3496-            target = node.readlink()
3497-            attr = self.newattr(stat.S_IFLNK, id(node))
3498-            attr.size = len(target)
3499-            attr.mode |= 0777
3500-        elif hasattr(node, 'size'):
3501-            sz = node.size()
3502-            attr = self.newattr(stat.S_IFREG, id(node))
3503-            if sz is None:
3504-                timeout = 0
3505-            else:
3506-                attr.size = sz
3507-        else:
3508-            attr = self.newattr(stat.S_IFDIR, id(node), mode=0777)
3509-        #print 'getattr(%s) -> %s, %s' % (node, attr, timeout)
3510-        return attr, timeout
3511-
3512-    def getentries(self, node):
3513-        if isinstance(node, dict):
3514-            return node
3515-        try:
3516-            if not self.USE_DIR_CACHE:
3517-                raise KeyError
3518-            return self.dircache[node]
3519-        except KeyError:
3520-            entries = OrderedDict()
3521-            if hasattr(node, 'listdir'):
3522-                for name in node.listdir():
3523-                    if isinstance(name, tuple):
3524-                        name, subnode = name
3525-                    else:
3526-                        subnode = None
3527-                    entries[name] = subnode
3528-            if self.USE_DIR_CACHE:
3529-                self.dircache[node] = entries
3530-            return entries
3531-
3532-    def listdir(self, node):
3533-        entries = self.getentries(node)
3534-        for name, subnode in entries.items():
3535-            if subnode is None:
3536-                subnode = node.join(name)
3537-                self.nodes[uid(subnode)] = subnode
3538-                entries[name] = subnode
3539-            if isinstance(subnode, str):
3540-                yield name, TYPE_REG
3541-            elif hasattr(subnode, 'readlink'):
3542-                yield name, TYPE_LNK
3543-            elif hasattr(subnode, 'size'):
3544-                yield name, TYPE_REG
3545-            else:
3546-                yield name, TYPE_DIR
3547-
3548-    def lookup(self, node, name):
3549-        entries = self.getentries(node)
3550-        try:
3551-            subnode = entries.get(name)
3552-            if subnode is None:
3553-                if hasattr(node, 'join'):
3554-                    subnode = node.join(name)
3555-                    entries[name] = subnode
3556-                else:
3557-                    raise KeyError
3558-        except KeyError:
3559-            raise IOError(errno.ENOENT, name)
3560-        else:
3561-            return self.reply(subnode)
3562-
3563-    def reply(self, node):
3564-        res = uid(node)
3565-        self.nodes[res] = node
3566-        return res, self.INFINITE
3567-
3568-    def open(self, node, mode):
3569-        if not isinstance(node, str):
3570-            node = node.read()
3571-        if not hasattr(node, 'read'):
3572-            node = StringIO(node)
3573-        return node
3574-
3575-    def readlink(self, node):
3576-        return node.readlink()
3577-
3578-    def getxattrs(self, node):
3579-        return getattr(node, '__dict__', {})
3580-
3581-# ____________________________________________________________
3582-
3583-import struct
3584-try:
3585-    HUGEVAL = 256 ** struct.calcsize('P')
3586-except struct.error:
3587-    HUGEVAL = 0
3588-
3589-def fixid(result):
3590-    if result < 0:
3591-        result += HUGEVAL
3592-    return result
3593-
3594-def uid(obj):
3595-    """
3596-    Return the id of an object as an unsigned number so that its hex
3597-    representation makes sense
3598-    """
3599-    return fixid(id(obj))
3600-
3601-class SymLink(object):
3602-    def __init__(self, target):
3603-        self.target = target
3604-    def readlink(self):
3605-        return self.target
3606rmfile ./contrib/fuse/impl_b/pyfuse/objectfs.py
3607hunk ./contrib/fuse/impl_b/pyfuse/pairtype.py 1
3608-"""
3609-Two magic tricks for classes:
3610-
3611-    class X:
3612-        __metaclass__ = extendabletype
3613-        ...
3614-
3615-    # in some other file...
3616-    class __extend__(X):
3617-        ...      # and here you can add new methods and class attributes to X
3618-
3619-Mostly useful together with the second trick, which lets you build
3620-methods whose 'self' is a pair of objects instead of just one:
3621-
3622-    class __extend__(pairtype(X, Y)):
3623-        attribute = 42
3624-        def method((x, y), other, arguments):
3625-            ...
3626-
3627-    pair(x, y).attribute
3628-    pair(x, y).method(other, arguments)
3629-
3630-This finds methods and class attributes based on the actual
3631-class of both objects that go into the pair(), with the usual
3632-rules of method/attribute overriding in (pairs of) subclasses.
3633-
3634-For more information, see test_pairtype.
3635-"""
3636-
3637-class extendabletype(type):
3638-    """A type with a syntax trick: 'class __extend__(t)' actually extends
3639-    the definition of 't' instead of creating a new subclass."""
3640-    def __new__(cls, name, bases, dict):
3641-        if name == '__extend__':
3642-            for cls in bases:
3643-                for key, value in dict.items():
3644-                    if key == '__module__':
3645-                        continue
3646-                    # XXX do we need to provide something more for pickling?
3647-                    setattr(cls, key, value)
3648-            return None
3649-        else:
3650-            return super(extendabletype, cls).__new__(cls, name, bases, dict)
3651-
3652-
3653-def pair(a, b):
3654-    """Return a pair object."""
3655-    tp = pairtype(a.__class__, b.__class__)
3656-    return tp((a, b))   # tp is a subclass of tuple
3657-
3658-pairtypecache = {}
3659-
3660-def pairtype(cls1, cls2):
3661-    """type(pair(a,b)) is pairtype(a.__class__, b.__class__)."""
3662-    try:
3663-        pair = pairtypecache[cls1, cls2]
3664-    except KeyError:
3665-        name = 'pairtype(%s, %s)' % (cls1.__name__, cls2.__name__)
3666-        bases1 = [pairtype(base1, cls2) for base1 in cls1.__bases__]
3667-        bases2 = [pairtype(cls1, base2) for base2 in cls2.__bases__]
3668-        bases = tuple(bases1 + bases2) or (tuple,)  # 'tuple': ultimate base
3669-        pair = pairtypecache[cls1, cls2] = extendabletype(name, bases, {})
3670-    return pair
3671rmfile ./contrib/fuse/impl_b/pyfuse/pairtype.py
3672hunk ./contrib/fuse/impl_b/pyfuse/pathfs.py 1
3673-from kernel import *
3674-import errno, posixpath, os
3675-
3676-
3677-class PathFs(object):
3678-    """Base class for a read-write FUSE file system interface
3679-    whose underlying content is best accessed with '/'-separated
3680-    string paths.
3681-    """
3682-    uid = os.getuid()
3683-    gid = os.getgid()
3684-    umask = os.umask(0); os.umask(umask)
3685-    timeout = 86400.0
3686-
3687-    def __init__(self, root=''):
3688-        self._paths = {FUSE_ROOT_ID: root}
3689-        self._path2id = {root: FUSE_ROOT_ID}
3690-        self._nextid = FUSE_ROOT_ID + 1
3691-
3692-    def getnode(self, nodeid):
3693-        try:
3694-            return self._paths[nodeid]
3695-        except KeyError:
3696-            raise IOError(errno.ESTALE, nodeid)
3697-
3698-    def forget(self, nodeid):
3699-        try:
3700-            p = self._paths.pop(nodeid)
3701-            del self._path2id[p]
3702-        except KeyError:
3703-            pass
3704-
3705-    def cachepath(self, path):
3706-        if path in self._path2id:
3707-            return self._path2id[path]
3708-        id = self._nextid
3709-        self._nextid += 1
3710-        self._paths[id] = path
3711-        self._path2id[path] = id
3712-        return id
3713-
3714-    def mkattr(self, path, size, st_kind, mode, time):
3715-        attr = fuse_attr(ino   = self._path2id[path],
3716-                         size  = size,
3717-                         mode  = st_kind | (mode & ~self.umask),
3718-                         nlink = 1,  # even on dirs! this confuses 'find' in
3719-                                     # a good way :-)
3720-                         atime = time,
3721-                         mtime = time,
3722-                         ctime = time,
3723-                         uid   = self.uid,
3724-                         gid   = self.gid)
3725-        return attr, self.timeout
3726-
3727-    def lookup(self, path, name):
3728-        npath = posixpath.join(path, name)
3729-        if not self.check_path(npath):
3730-            raise IOError(errno.ENOENT, name)
3731-        return self.cachepath(npath), self.timeout
3732-
3733-    def mknod(self, path, name, mode):
3734-        npath = posixpath.join(path, name)
3735-        self.mknod_path(npath, mode)
3736-        return self.cachepath(npath), self.timeout
3737-
3738-    def mkdir(self, path, name, mode):
3739-        npath = posixpath.join(path, name)
3740-        self.mkdir_path(npath, mode)
3741-        return self.cachepath(npath), self.timeout
3742-
3743-    def unlink(self, path, name):
3744-        npath = posixpath.join(path, name)
3745-        self.unlink_path(npath)
3746-
3747-    def rmdir(self, path, name):
3748-        npath = posixpath.join(path, name)
3749-        self.rmdir_path(npath)
3750-
3751-    def rename(self, oldpath, oldname, newpath, newname):
3752-        noldpath = posixpath.join(oldpath, oldname)
3753-        nnewpath = posixpath.join(newpath, newname)
3754-        if not self.rename_path(noldpath, nnewpath):
3755-            raise IOError(errno.ENOENT, oldname)
3756-        # fix all paths in the cache
3757-        N = len(noldpath)
3758-        for id, path in self._paths.items():
3759-            if path.startswith(noldpath):
3760-                if len(path) == N or path[N] == '/':
3761-                    del self._path2id[path]
3762-                    path = nnewpath + path[N:]
3763-                    self._paths[id] = path
3764-                    self._path2id[path] = id
3765rmfile ./contrib/fuse/impl_b/pyfuse/pathfs.py
3766hunk ./contrib/fuse/impl_b/pyfuse/pysvnfs.py 1
3767-from kernel import *
3768-import errno, posixpath, weakref
3769-from time import time as now
3770-from stat import S_IFDIR, S_IFREG, S_IFMT
3771-from cStringIO import StringIO
3772-from handler import Handler
3773-from pathfs import PathFs
3774-from pysvn.ra_filesystem import SvnRepositoryFilesystem
3775-import pysvn.date
3776-
3777-
3778-class SvnFS(PathFs):
3779-
3780-    def __init__(self, svnurl, root=''):
3781-        super(SvnFS, self).__init__(root)
3782-        self.svnurl = svnurl
3783-        self.openfiles = weakref.WeakValueDictionary()
3784-        self.creationtimes = {}
3785-        self.do_open()
3786-
3787-    def do_open(self, rev='HEAD'):
3788-        self.fs = SvnRepositoryFilesystem(svnurl, rev)
3789-
3790-    def do_commit(self, msg):
3791-        rev = self.fs.commit(msg)
3792-        if rev is None:
3793-            print '* no changes.'
3794-        else:
3795-            print '* checked in revision %d.' % (rev,)
3796-        self.do_open()
3797-
3798-    def do_status(self, path=''):
3799-        print '* status'
3800-        result = []
3801-        if path and not path.endswith('/'):
3802-            path += '/'
3803-        for delta in self.fs._compute_deltas():
3804-            if delta.path.startswith(path):
3805-                if delta.oldrev is None:
3806-                    c = 'A'
3807-                elif delta.newrev is None:
3808-                    c = 'D'
3809-                else:
3810-                    c = 'M'
3811-                result.append('    %s  %s\n' % (c, delta.path[len(path):]))
3812-        return ''.join(result)
3813-
3814-    def getattr(self, path):
3815-        stat = self.fs.stat(path)
3816-        if stat['svn:entry:kind'] == 'dir':
3817-            s = S_IFDIR
3818-            mode = 0777
3819-        else:
3820-            s = S_IFREG
3821-            mode = 0666
3822-        try:
3823-            time = pysvn.date.decode(stat['svn:entry:committed-date'])
3824-        except KeyError:
3825-            try:
3826-                time = self.creationtimes[path]
3827-            except KeyError:
3828-                time = self.creationtimes[path] = now()
3829-        return self.mkattr(path,
3830-                           size    = stat.get('svn:entry:size', 0),
3831-                           st_kind = s,
3832-                           mode    = mode,
3833-                           time    = time)
3834-
3835-    def setattr(self, path, mode, uid, gid, size, atime, mtime):
3836-        if size is not None:
3837-            data = self.fs.read(path)
3838-            if size < len(data):
3839-                self.fs.write(path, data[:size])
3840-            elif size > len(data):
3841-                self.fs.write(path, data + '\x00' * (size - len(data)))
3842-
3843-    def listdir(self, path):
3844-        for name in self.fs.listdir(path):
3845-            kind = self.fs.check_path(posixpath.join(path, name))
3846-            if kind == 'dir':
3847-                yield name, TYPE_DIR
3848-            else:
3849-                yield name, TYPE_REG
3850-
3851-    def check_path(self, path):
3852-        kind = self.fs.check_path(path)
3853-        return kind is not None
3854-
3855-    def open(self, path, mode):
3856-        try:
3857-            of = self.openfiles[path]
3858-        except KeyError:
3859-            of = self.openfiles[path] = OpenFile(self.fs.read(path))
3860-        return of, FOPEN_KEEP_CACHE
3861-
3862-    def modified(self, path):
3863-        try:
3864-            of = self.openfiles[path]
3865-        except KeyError:
3866-            pass
3867-        else:
3868-            self.fs.write(path, of.f.getvalue())
3869-
3870-    def mknod_path(self, path, mode):
3871-        self.fs.add(path)
3872-
3873-    def mkdir_path(self, path, mode):
3874-        self.fs.mkdir(path)
3875-
3876-    def unlink_path(self, path):
3877-        self.fs.unlink(path)
3878-
3879-    def rmdir_path(self, path):
3880-        self.fs.rmdir(path)
3881-
3882-    def rename_path(self, oldpath, newpath):
3883-        kind = self.fs.check_path(oldpath)
3884-        if kind is None:
3885-            return False
3886-        self.fs.move(oldpath, newpath, kind)
3887-        return True
3888-
3889-    def getxattrs(self, path):
3890-        return XAttrs(self, path)
3891-
3892-
3893-class OpenFile:
3894-    def __init__(self, data=''):
3895-        self.f = StringIO()
3896-        self.f.write(data)
3897-        self.f.seek(0)
3898-
3899-    def seek(self, pos):
3900-        self.f.seek(pos)
3901-
3902-    def read(self, sz):
3903-        return self.f.read(sz)
3904-
3905-    def write(self, buf):
3906-        self.f.write(buf)
3907-
3908-
3909-class XAttrs:
3910-    def __init__(self, svnfs, path):
3911-        self.svnfs = svnfs
3912-        self.path = path
3913-
3914-    def keys(self):
3915-        return []
3916-
3917-    def __getitem__(self, key):
3918-        if key == 'status':
3919-            return self.svnfs.do_status(self.path)
3920-        raise KeyError(key)
3921-
3922-    def __setitem__(self, key, value):
3923-        if key == 'commit' and self.path == '':
3924-            self.svnfs.do_commit(value)
3925-        elif key == 'update' and self.path == '':
3926-            if self.svnfs.fs.modified():
3927-                raise IOError(errno.EPERM, "there are local changes")
3928-            if value == '':
3929-                rev = 'HEAD'
3930-            else:
3931-                try:
3932-                    rev = int(value)
3933-                except ValueError:
3934-                    raise IOError(errno.EPERM, "invalid revision number")
3935-            self.svnfs.do_open(rev)
3936-        else:
3937-            raise KeyError(key)
3938-
3939-    def __delitem__(self, key):
3940-        raise KeyError(key)
3941-
3942-
3943-if __name__ == '__main__':
3944-    import sys
3945-    svnurl, mountpoint = sys.argv[1:]
3946-    handler = Handler(mountpoint, SvnFS(svnurl))
3947-    handler.loop_forever()
3948rmfile ./contrib/fuse/impl_b/pyfuse/pysvnfs.py
3949hunk ./contrib/fuse/impl_b/pyfuse/r_svnfs.py 1
3950-"""
3951-A read-only svn fs showing all the revisions in subdirectories.
3952-"""
3953-from objectfs import ObjectFs, SymLink
3954-from handler import Handler
3955-from pysvn.ra import connect
3956-from pysvn.date import decode
3957-import errno, posixpath, time
3958-
3959-
3960-#USE_SYMLINKS = 0      # they are wrong if the original file had another path
3961-
3962-# use  getfattr -d filename  to see the node's attributes, which include
3963-# information like the revision at which the file was last modified
3964-
3965-
3966-class Root:
3967-    def __init__(self, svnurl):
3968-        self.svnurl = svnurl
3969-        self.ra = connect(svnurl)
3970-        self.head = self.ra.get_latest_rev()
3971-
3972-    def listdir(self):
3973-        for rev in range(1, self.head+1):
3974-            yield str(rev)
3975-        yield 'HEAD'
3976-
3977-    def join(self, name):
3978-        try:
3979-            rev = int(name)
3980-        except ValueError:
3981-            if name == 'HEAD':
3982-                return SymLink(str(self.head))
3983-            else:
3984-                raise KeyError(name)
3985-        return TopLevelDir(self.ra, rev, rev, '')
3986-
3987-
3988-class Node:
3989-    def __init__(self, ra, rev, last_changed_rev, path):
3990-        self.ra = ra
3991-        self.rev = rev
3992-        self.last_changed_rev = last_changed_rev
3993-        self.path = path
3994-
3995-    def __repr__(self):
3996-        return '<%s %d/%s>' % (self.__class__.__name__, self.rev, self.path)
3997-
3998-class Dir(Node):
3999-    def listdir(self):
4000-        rev, props, entries = self.ra.get_dir(self.path, self.rev,
4001-                                              want_props = False)
4002-        for key, stats in entries.items():
4003-            yield key, getnode(self.ra, self.rev,
4004-                               posixpath.join(self.path, key), stats)
4005-
4006-class File(Node):
4007-    def __init__(self, ra, rev, last_changed_rev, path, size):
4008-        Node.__init__(self, ra, rev, last_changed_rev, path)
4009-        self.filesize = size
4010-
4011-    def size(self):
4012-        return self.filesize
4013-
4014-    def read(self):
4015-        checksum, rev, props, data = self.ra.get_file(self.path, self.rev,
4016-                                                      want_props = False)
4017-        return data
4018-
4019-
4020-class TopLevelDir(Dir):
4021-    def listdir(self):
4022-        for item in Dir.listdir(self):
4023-            yield item
4024-        yield 'svn:log', Log(self.ra, self.rev)
4025-
4026-class Log:
4027-
4028-    def __init__(self, ra, rev):
4029-        self.ra = ra
4030-        self.rev = rev
4031-
4032-    def getlogentry(self):
4033-        try:
4034-            return self.logentry
4035-        except AttributeError:
4036-            logentries = self.ra.log('', startrev=self.rev, endrev=self.rev)
4037-            try:
4038-                [self.logentry] = logentries
4039-            except ValueError:
4040-                self.logentry = None
4041-            return self.logentry
4042-
4043-    def size(self):
4044-        return len(self.read())
4045-
4046-    def read(self):
4047-        logentry = self.getlogentry()
4048-        if logentry is None:
4049-            return 'r%d | (no change here)\n' % (self.rev,)
4050-        datetuple = time.gmtime(decode(logentry.date))
4051-        date = time.strftime("%c", datetuple)
4052-        return 'r%d | %s | %s\n\n%s' % (self.rev,
4053-                                        logentry.author,
4054-                                        date,
4055-                                        logentry.message)
4056-
4057-
4058-if 0:
4059-    pass
4060-##if USE_SYMLINKS:
4061-##    def getnode(ra, rev, path, stats):
4062-##        committed_rev = stats['svn:entry:committed-rev']
4063-##        if committed_rev == rev:
4064-##            kind = stats['svn:entry:kind']
4065-##            if kind == 'file':
4066-##                return File(ra, rev, path, stats['svn:entry:size'])
4067-##            elif kind == 'dir':
4068-##                return Dir(ra, rev, path)
4069-##            else:
4070-##                raise IOError(errno.EINVAL, "kind %r" % (kind,))
4071-##        else:
4072-##            depth = path.count('/')
4073-##            return SymLink('../' * depth + '../%d/%s' % (committed_rev, path))
4074-else:
4075-    def getnode(ra, rev, path, stats):
4076-        last_changed_rev = stats['svn:entry:committed-rev']
4077-        kind = stats['svn:entry:kind']
4078-        if kind == 'file':
4079-            return File(ra, rev, last_changed_rev, path,
4080-                        stats['svn:entry:size'])
4081-        elif kind == 'dir':
4082-            return Dir(ra, rev, last_changed_rev, path)
4083-        else:
4084-            raise IOError(errno.EINVAL, "kind %r" % (kind,))
4085-
4086-
4087-if __name__ == '__main__':
4088-    import sys
4089-    svnurl, mountpoint = sys.argv[1:]
4090-    handler = Handler(mountpoint, ObjectFs(Root(svnurl)))
4091-    handler.loop_forever()
4092rmfile ./contrib/fuse/impl_b/pyfuse/r_svnfs.py
4093hunk ./contrib/fuse/impl_b/pyfuse/rwobjectfs.py 1
4094-from kernel import *
4095-import stat, errno, os, time
4096-from cStringIO import StringIO
4097-from OrderedDict import OrderedDict
4098-
4099-INFINITE = 86400.0
4100-
4101-
4102-class Wrapper(object):
4103-    def __init__(self, obj):
4104-        self.obj = obj
4105-
4106-    def getuid(self):
4107-        return uid(self.obj)
4108-
4109-    def __hash__(self):
4110-        return hash(self.obj)
4111-
4112-    def __eq__(self, other):
4113-        return self.obj == other
4114-
4115-    def __ne__(self, other):
4116-        return self.obj != other
4117-
4118-
4119-class BaseDir(object):
4120-
4121-    def join(self, name):
4122-        "Return a file or subdirectory object"
4123-        for item in self.listdir():
4124-            if isinstance(item, tuple):
4125-                subname, subnode = item
4126-                if subname == name:
4127-                    return subnode
4128-        raise KeyError(name)
4129-
4130-    def listdir(self):
4131-        "Return a list of names, or a list of (name, object)"
4132-        raise NotImplementedError
4133-
4134-    def create(self, name):
4135-        "Create a file"
4136-        raise NotImplementedError
4137-
4138-    def mkdir(self, name):
4139-        "Create a subdirectory"
4140-        raise NotImplementedError
4141-
4142-    def symlink(self, name, target):
4143-        "Create a symbolic link"
4144-        raise NotImplementedError
4145-
4146-    def unlink(self, name):
4147-        "Remove a file or subdirectory."
4148-        raise NotImplementedError
4149-
4150-    def rename(self, newname, olddirnode, oldname):
4151-        "Move another node into this directory."
4152-        raise NotImplementedError
4153-
4154-    def getuid(self):
4155-        return uid(self)
4156-
4157-    def getattr(self, fs):
4158-        return fs.newattr(stat.S_IFDIR, self.getuid(), mode=0777), INFINITE
4159-
4160-    def setattr(self, **kwds):
4161-        pass
4162-
4163-    def getentries(self):
4164-        entries = OrderedDict()
4165-        for name in self.listdir():
4166-            if isinstance(name, tuple):
4167-                name, subnode = name
4168-            else:
4169-                subnode = None
4170-            entries[name] = subnode
4171-        return entries
4172-
4173-
4174-class BaseFile(object):
4175-
4176-    def size(self):
4177-        "Return the size of the file, or None if not known yet"
4178-        f = self.open()
4179-        if isinstance(f, str):
4180-            return len(f)
4181-        f.seek(0, 2)
4182-        return f.tell()
4183-
4184-    def open(self):
4185-        "Return the content as a string or a file-like object"
4186-        raise NotImplementedError
4187-
4188-    def getuid(self):
4189-        return uid(self)
4190-
4191-    def getattr(self, fs):
4192-        sz = self.size()
4193-        attr = fs.newattr(stat.S_IFREG, self.getuid())
4194-        if sz is None:
4195-            timeout = 0
4196-        else:
4197-            attr.size = sz
4198-            timeout = INFINITE
4199-        return attr, timeout
4200-
4201-    def setattr(self, size, **kwds):
4202-        f = self.open()
4203-        if self.size() == size:
4204-            return
4205-        if isinstance(f, str):
4206-            raise IOError(errno.EPERM)
4207-        f.seek(size)
4208-        f.truncate()
4209-
4210-
4211-class BaseSymLink(object):
4212-
4213-    def readlink(self):
4214-        "Return the symlink's target, as a string"
4215-        raise NotImplementedError
4216-
4217-    def getuid(self):
4218-        return uid(self)
4219-
4220-    def getattr(self, fs):
4221-        target = self.readlink()
4222-        attr = fs.newattr(stat.S_IFLNK, self.getuid())
4223-        attr.size = len(target)
4224-        attr.mode |= 0777
4225-        return attr, INFINITE
4226-
4227-    def setattr(self, **kwds):
4228-        pass
4229-
4230-# ____________________________________________________________
4231-
4232-class Dir(BaseDir):
4233-    def __init__(self, **contents):
4234-        self.contents = contents
4235-    def listdir(self):
4236-        return self.contents.items()
4237-    def join(self, name):
4238-        return self.contents[name]
4239-    def create(self, fs, name):
4240-        node = fs.File()
4241-        self.contents[name] = node
4242-        return node
4243-    def mkdir(self, fs, name):
4244-        node = fs.Dir()
4245-        self.contents[name] = node
4246-        return node
4247-    def symlink(self, fs, name, target):
4248-        node = fs.SymLink(target)
4249-        self.contents[name] = node
4250-        return node
4251-    def unlink(self, name):
4252-        del self.contents[name]
4253-    def rename(self, newname, olddirnode, oldname):
4254-        oldnode = olddirnode.join(oldname)
4255-        olddirnode.unlink(oldname)
4256-        self.contents[newname] = oldnode
4257-
4258-class File(BaseFile):
4259-    def __init__(self):
4260-        self.data = StringIO()
4261-    def size(self):
4262-        self.data.seek(0, 2)
4263-        return self.data.tell()
4264-    def open(self):
4265-        return self.data
4266-
4267-class SymLink(BaseFile):
4268-    def __init__(self, target):
4269-        self.target = target
4270-    def readlink(self):
4271-        return self.target
4272-
4273-# ____________________________________________________________
4274-
4275-
4276-class RWObjectFs(object):
4277-    """A simple read-write file system based on Python objects."""
4278-
4279-    UID = os.getuid()
4280-    GID = os.getgid()
4281-    UMASK = os.umask(0); os.umask(UMASK)
4282-
4283-    Dir = Dir
4284-    File = File
4285-    SymLink = SymLink
4286-
4287-    def __init__(self, rootnode):
4288-        self.nodes = {FUSE_ROOT_ID: rootnode}
4289-        self.starttime = time.time()
4290-
4291-    def newattr(self, s, ino, mode=0666):
4292-        return fuse_attr(ino   = ino,
4293-                         size  = 0,
4294-                         mode  = s | (mode & ~self.UMASK),
4295-                         nlink = 1,  # even on dirs! this confuses 'find' in
4296-                                     # a good way :-)
4297-                         atime = self.starttime,
4298-                         mtime = self.starttime,
4299-                         ctime = self.starttime,
4300-                         uid   = self.UID,
4301-                         gid   = self.GID)
4302-
4303-    def getnode(self, nodeid):
4304-        try:
4305-            return self.nodes[nodeid]
4306-        except KeyError:
4307-            raise IOError(errno.ESTALE, nodeid)
4308-
4309-    def getattr(self, node):
4310-        return node.getattr(self)
4311-
4312-    def setattr(self, node, mode, uid, gid, size, atime, mtime):
4313-        node.setattr(mode=mode, uid=uid, gid=gid, size=size,
4314-                     atime=atime, mtime=mtime)
4315-
4316-    def listdir(self, node):
4317-        entries = node.getentries()
4318-        for name, subnode in entries.items():
4319-            if subnode is None:
4320-                subnode = node.join(name)
4321-                self.nodes[uid(subnode)] = subnode
4322-                entries[name] = subnode
4323-            if isinstance(subnode, str):
4324-                yield name, TYPE_REG
4325-            elif hasattr(subnode, 'readlink'):
4326-                yield name, TYPE_LNK
4327-            elif hasattr(subnode, 'size'):
4328-                yield name, TYPE_REG
4329-            else:
4330-                yield name, TYPE_DIR
4331-
4332-    def lookup(self, node, name):
4333-        try:
4334-            subnode = node.join(name)
4335-        except KeyError:
4336-            raise IOError(errno.ENOENT, name)
4337-        else:
4338-            res = uid(subnode)
4339-            self.nodes[res] = subnode
4340-            return res, INFINITE
4341-
4342-    def mknod(self, dirnode, filename, mode):
4343-        node = dirnode.create(filename)
4344-        return self.newnodeid(node), INFINITE
4345-
4346-    def mkdir(self, dirnode, subdirname, mode):
4347-        node = dirnode.mkdir(subdirname)
4348-        return self.newnodeid(node), INFINITE
4349-
4350-    def symlink(self, dirnode, linkname, target):
4351-        node = dirnode.symlink(linkname, target)
4352-        return self.newnodeid(node), INFINITE
4353-
4354-    def unlink(self, dirnode, filename):
4355-        try:
4356-            dirnode.unlink(filename)
4357-        except KeyError:
4358-            raise IOError(errno.ENOENT, filename)
4359-
4360-    rmdir = unlink
4361-
4362-    def open(self, node, mode):
4363-        f = node.open()
4364-        if isinstance(f, str):
4365-            f = StringIO(f)
4366-        return f
4367-
4368-    def readlink(self, node):
4369-        return node.readlink()
4370-
4371-    def rename(self, olddirnode, oldname, newdirnode, newname):
4372-        try:
4373-            newdirnode.rename(newname, olddirnode, oldname)
4374-        except KeyError:
4375-            raise IOError(errno.ENOENT, oldname)
4376-
4377-    def getxattrs(self, node):
4378-        return getattr(node, '__dict__', {})
4379-
4380-# ____________________________________________________________
4381-
4382-import struct
4383-try:
4384-    HUGEVAL = 256 ** struct.calcsize('P')
4385-except struct.error:
4386-    HUGEVAL = 0
4387-
4388-def fixid(result):
4389-    if result < 0:
4390-        result += HUGEVAL
4391-    return result
4392-
4393-def uid(obj):
4394-    """
4395-    Return the id of an object as an unsigned number so that its hex
4396-    representation makes sense
4397-    """
4398-    return fixid(id(obj))
4399rmfile ./contrib/fuse/impl_b/pyfuse/rwobjectfs.py
4400hunk ./contrib/fuse/impl_b/pyfuse/svnfs.py 1
4401-import py
4402-from handler import Handler
4403-from objectfs import ObjectFs
4404-
4405-
4406-class SvnDir:
4407-    def __init__(self, path):
4408-        self.path = path
4409-
4410-    def listdir(self):
4411-        for p in self.path.listdir():
4412-            if p.check(dir=1):
4413-                cls = SvnDir
4414-            else:
4415-                cls = SvnFile
4416-            yield p.basename, cls(p)
4417-
4418-
4419-class SvnFile:
4420-    data = None
4421-
4422-    def __init__(self, path):
4423-        self.path = path
4424-
4425-    def size(self):
4426-        if self.data is None:
4427-            return None
4428-        else:
4429-            return len(self.data)
4430-
4431-    def read(self):
4432-        if self.data is None:
4433-            self.data = self.path.read()
4434-        return self.data
4435-
4436-
4437-if __name__ == '__main__':
4438-    import sys
4439-    svnurl, mountpoint = sys.argv[1:]
4440-    root = SvnDir(py.path.svnurl(svnurl))
4441-    handler = Handler(mountpoint, ObjectFs(root))
4442-    handler.loop_forever()
4443rmfile ./contrib/fuse/impl_b/pyfuse/svnfs.py
4444hunk ./contrib/fuse/impl_b/pyfuse/tahoe.py 1
4445-"""
4446-PyFuse client for the Tahoe distributed file system.
4447-See http://allmydata.org/
4448-"""
4449-
4450-# Read-only for now.
4451-
4452-# Portions copied from the file contrib/fuse/tahoe_fuse.py distributed
4453-# with Tahoe 1.0.0.
4454-
4455-import os, sys
4456-from objectfs import ObjectFs
4457-from handler import Handler
4458-import simplejson
4459-import urllib
4460-
4461-
4462-### Config:
4463-TahoeConfigDir = '~/.tahoe'
4464-
4465-
4466-### Utilities for debug:
4467-def log(msg, *args):
4468-    print msg % args
4469-
4470-
4471-class TahoeConnection:
4472-    def __init__(self, confdir):
4473-        self.confdir = confdir
4474-        self._init_url()
4475-
4476-    def _init_url(self):
4477-        if os.path.exists(os.path.join(self.confdir, 'node.url')):
4478-            self.url = file(os.path.join(self.confdir, 'node.url'), 'rb').read().strip()
4479-            if not self.url.endswith('/'):
4480-                self.url += '/'
4481-        else:
4482-            f = open(os.path.join(self.confdir, 'webport'), 'r')
4483-            contents = f.read()
4484-            f.close()
4485-            fields = contents.split(':')
4486-            proto, port = fields[:2]
4487-            assert proto == 'tcp'
4488-            port = int(port)
4489-            self.url = 'http://localhost:%d/' % (port,)
4490-
4491-    def get_root(self):
4492-        # For now we just use the same default as the CLI:
4493-        rootdirfn = os.path.join(self.confdir, 'private', 'root_dir.cap')
4494-        f = open(rootdirfn, 'r')
4495-        cap = f.read().strip()
4496-        f.close()
4497-        return TahoeDir(self, canonicalize_cap(cap))
4498-
4499-
4500-class TahoeNode:
4501-    def __init__(self, conn, uri):
4502-        self.conn = conn
4503-        self.uri = uri
4504-
4505-    def get_metadata(self):
4506-        f = self._open('?t=json')
4507-        json = f.read()
4508-        f.close()
4509-        return simplejson.loads(json)
4510-
4511-    def _open(self, postfix=''):
4512-        url = '%suri/%s%s' % (self.conn.url, self.uri, postfix)
4513-        log('*** Fetching: %r', url)
4514-        return urllib.urlopen(url)
4515-
4516-
4517-class TahoeDir(TahoeNode):
4518-    def listdir(self):
4519-        flag, md = self.get_metadata()
4520-        assert flag == 'dirnode'
4521-        result = []
4522-        for name, (childflag, childmd) in md['children'].items():
4523-            if childflag == 'dirnode':
4524-                cls = TahoeDir
4525-            else:
4526-                cls = TahoeFile
4527-            result.append((str(name), cls(self.conn, childmd['ro_uri'])))
4528-        return result
4529-
4530-class TahoeFile(TahoeNode):
4531-    def size(self):
4532-        rawsize = self.get_metadata()[1]['size']
4533-        return rawsize
4534-
4535-    def read(self):
4536-        return self._open().read()
4537-
4538-
4539-def canonicalize_cap(cap):
4540-    cap = urllib.unquote(cap)
4541-    i = cap.find('URI:')
4542-    assert i != -1, 'A cap must contain "URI:...", but this does not: ' + cap
4543-    return cap[i:]
4544-
4545-def main(mountpoint, basedir):
4546-    conn = TahoeConnection(basedir)
4547-    root = conn.get_root()
4548-    handler = Handler(mountpoint, ObjectFs(root))
4549-    handler.loop_forever()
4550-
4551-if __name__ == '__main__':
4552-    basedir = os.path.expanduser(TahoeConfigDir)
4553-    for i, arg in enumerate(sys.argv):
4554-        if arg == '--basedir':
4555-            basedir = sys.argv[i+1]
4556-            sys.argv[i:i+2] = []
4557-
4558-    [mountpoint] = sys.argv[1:]
4559-    main(mountpoint, basedir)
4560rmfile ./contrib/fuse/impl_b/pyfuse/tahoe.py
4561hunk ./contrib/fuse/impl_b/pyfuse/test.py 1
4562-from handler import Handler
4563-import stat, errno, os, time
4564-from cStringIO import StringIO
4565-from kernel import *
4566-
4567-
4568-UID = os.getuid()
4569-GID = os.getgid()
4570-UMASK = os.umask(0); os.umask(UMASK)
4571-INFINITE = 86400.0
4572-
4573-
4574-class Node(object):
4575-    __slots__ = ['attr', 'data']
4576-
4577-    def __init__(self, attr, data=None):
4578-        self.attr = attr
4579-        self.data = data
4580-
4581-    def type(self):
4582-        return mode2type(self.attr.mode)
4583-
4584-    def modified(self):
4585-        self.attr.mtime = self.attr.atime = time.time()
4586-        t = self.type()
4587-        if t == TYPE_REG:
4588-            f = self.data
4589-            pos = f.tell()
4590-            f.seek(0, 2)
4591-            self.attr.size = f.tell()
4592-            f.seek(pos)
4593-        elif t == TYPE_DIR:
4594-            nsubdirs = 0
4595-            for nodeid in self.data.values():
4596-                nsubdirs += nodeid & 1
4597-            self.attr.nlink = 2 + nsubdirs
4598-
4599-
4600-def newattr(s, mode=0666):
4601-    now = time.time()
4602-    return fuse_attr(ino   = INVALID_INO,
4603-                     size  = 0,
4604-                     mode  = s | (mode & ~UMASK),
4605-                     nlink = 1 + (s == stat.S_IFDIR),
4606-                     atime = now,
4607-                     mtime = now,
4608-                     ctime = now,
4609-                     uid   = UID,
4610-                     gid   = GID)
4611-
4612-# ____________________________________________________________
4613-
4614-class Filesystem:
4615-
4616-    def __init__(self, rootnode):
4617-        self.nodes = {FUSE_ROOT_ID: rootnode}
4618-        self.nextid = 2
4619-        assert self.nextid > FUSE_ROOT_ID
4620-
4621-    def getnode(self, nodeid):
4622-        try:
4623-            return self.nodes[nodeid]
4624-        except KeyError:
4625-            raise IOError(errno.ESTALE, nodeid)
4626-
4627-    def forget(self, nodeid):
4628-        pass
4629-
4630-    def cachenode(self, node):
4631-        id = self.nextid
4632-        self.nextid += 2
4633-        if node.type() == TYPE_DIR:
4634-            id += 1
4635-        self.nodes[id] = node
4636-        return id
4637-
4638-    def getattr(self, node):
4639-        return node.attr, INFINITE
4640-
4641-    def setattr(self, node, mode=None, uid=None, gid=None,
4642-                size=None, atime=None, mtime=None):
4643-        if mode  is not None:  node.attr.mode  = (node.attr.mode&~0777) | mode
4644-        if uid   is not None:  node.attr.uid   = uid
4645-        if gid   is not None:  node.attr.gid   = gid
4646-        if atime is not None:  node.attr.atime = atime
4647-        if mtime is not None:  node.attr.mtime = mtime
4648-        if size is not None and node.type() == TYPE_REG:
4649-            node.data.seek(size)
4650-            node.data.truncate()
4651-
4652-    def listdir(self, node):
4653-        for name, subnodeid in node.data.items():
4654-            subnode = self.nodes[subnodeid]
4655-            yield name, subnode.type()
4656-
4657-    def lookup(self, node, name):
4658-        try:
4659-            return node.data[name], INFINITE
4660-        except KeyError:
4661-            pass
4662-        if hasattr(node, 'findnode'):
4663-            try:
4664-                subnode = node.findnode(name)
4665-            except KeyError:
4666-                pass
4667-            else:
4668-                id = self.cachenode(subnode)
4669-                node.data[name] = id
4670-                return  id, INFINITE
4671-        raise IOError(errno.ENOENT, name)
4672-
4673-    def open(self, node, mode):
4674-        return node.data
4675-
4676-    def mknod(self, node, name, mode):
4677-        subnode = Node(newattr(mode & 0170000, mode & 0777))
4678-        if subnode.type() == TYPE_REG:
4679-            subnode.data = StringIO()
4680-        else:
4681-            raise NotImplementedError
4682-        id = self.cachenode(subnode)
4683-        node.data[name] = id
4684-        node.modified()
4685-        return id, INFINITE
4686-
4687-    def mkdir(self, node, name, mode):
4688-        subnode = Node(newattr(stat.S_IFDIR, mode & 0777), {})
4689-        id = self.cachenode(subnode)
4690-        node.data[name] = id
4691-        node.modified()
4692-        return id, INFINITE
4693-
4694-    def symlink(self, node, linkname, target):
4695-        subnode = Node(newattr(stat.S_IFLNK, 0777), target)
4696-        id = self.cachenode(subnode)
4697-        node.data[linkname] = id
4698-        node.modified()
4699-        return id, INFINITE
4700-
4701-    def readlink(self, node):
4702-        assert node.type() == TYPE_LNK
4703-        return node.data
4704-
4705-    def unlink(self, node, name):
4706-        try:
4707-            del node.data[name]
4708-        except KeyError:
4709-            raise IOError(errno.ENOENT, name)
4710-        node.modified()
4711-
4712-    rmdir = unlink
4713-
4714-    def rename(self, oldnode, oldname, newnode, newname):
4715-        if newnode.type() != TYPE_DIR:
4716-            raise IOError(errno.ENOTDIR, newnode)
4717-        try:
4718-            nodeid = oldnode.data.pop(oldname)
4719-        except KeyError:
4720-            raise IOError(errno.ENOENT, oldname)
4721-        oldnode.modified()
4722-        newnode.data[newname] = nodeid
4723-        newnode.modified()
4724-
4725-    def modified(self, node):
4726-        node.modified()
4727-
4728-# ____________________________________________________________
4729-
4730-if __name__ == '__main__':
4731-    root = Node(newattr(stat.S_IFDIR), {})
4732-    handler = Handler('/home/arigo/mnt', Filesystem(root))
4733-    handler.loop_forever()
4734rmfile ./contrib/fuse/impl_b/pyfuse/test.py
4735rmdir ./contrib/fuse/impl_b/pyfuse
4736hunk ./contrib/fuse/impl_b/announce.txt 1
4737-This announcement is archived in the tahoe-dev mailing list archive:
4738-
4739-http://allmydata.org/pipermail/tahoe-dev/2008-March/000465.html
4740-
4741-[tahoe-dev] Another FUSE interface
4742-Armin Rigo arigo at tunes.org
4743-Sat Mar 29 04:35:36 PDT 2008
4744-
4745-    * Previous message: [tahoe-dev] announcing allmydata.org "Tahoe", v1.0
4746-    * Next message: [tahoe-dev] convergent encryption reconsidered -- salting and key-strengthening
4747-    * Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]
4748-
4749-Hi all,
4750-
4751-I implemented for fun another Tahoe-to-FUSE interface using my own set
4752-of FUSE bindings.  If you are interested, you can check out the
4753-following subversion directory:
4754-
4755-    http://codespeak.net/svn/user/arigo/hack/pyfuse
4756-
4757-tahoe.py is a 100-lines, half-an-hour-job interface to Tahoe, limited to
4758-read-only at the moment.  The rest of the directory contains PyFuse, and
4759-many other small usage examples.  PyFuse is a pure Python FUSE daemon
4760-(no messy linking issues, no dependencies).
4761-
4762-
4763-A bientot,
4764-
4765-Armin Rigo
4766-
4767-    * Previous message: [tahoe-dev] announcing allmydata.org "Tahoe", v1.0
4768-    * Next message: [tahoe-dev] convergent encryption reconsidered -- salting and key-strengthening
4769-    * Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]
4770-
4771-More information about the tahoe-dev mailing list
4772-
4773rmfile ./contrib/fuse/impl_b/announce.txt
4774rmdir ./contrib/fuse/impl_b
4775hunk ./contrib/fuse/impl_a/README 1
4776-
4777-Welcome to the tahoe fuse interface prototype!
4778-
4779-
4780-Dependencies:
4781-
4782-In addition to a working tahoe installation, this interface depends
4783-on the python-fuse interface.  This package is available on Ubuntu
4784-systems as "python-fuse".  It is only known to work with ubuntu
4785-package version "2.5-5build1".  The latest ubuntu package (version
4786-"1:0.2-pre3-3") appears to not work currently.
4787-
4788-Unfortunately this package appears poorly maintained (notice the wildy
4789-different version strings and changing API semantics), so if you know
4790-of a good replacement pythonic fuse interface, please let tahoe-dev know
4791-about it!
4792-
4793-
4794-Configuration:
4795-
4796-Currently tahoe-fuse.py uses the same ~/.tahoe/private/root_dir.cap
4797-file (which is also the CLI default).  This is not configurable yet.
4798-Place a directory cap in this file.  (Hint: If you can run "tahoe ls"
4799-and see a directory listing, this file is properly configured.)
4800-
4801-
4802-Commandline:
4803-
4804-The usage is "tahoe-fuse.py <mountpoint>".  The mount point needs to
4805-be an existing directory which should be empty.  (If it's not empty
4806-the contents will be safe, but unavailable while the tahoe-fuse.py
4807-process is mounted there.)
4808-
4809-
4810-Usage:
4811-
4812-To use the interface, use other programs to poke around the
4813-mountpoint.  You should be able to see the same contents as you would
4814-by using the CLI or WUI for the same directory cap.
4815-
4816-
4817-Runtime Behavior Notes:
4818-
4819-Read-only:
4820-Only reading a tahoe grid is supported, which is reflected in
4821-the permission modes.  With Tahoe 0.7.0, write access should be easier
4822-to implement, but is not yet present.
4823-
4824-In-Memory File Caching:
4825-Currently requesting a particular file for read causes the entire file to
4826-be retrieved into tahoe-fuse.py memory before the read operation returns!
4827-This caching is reused for subsequent reads.  Beware large files.
4828-When transitioning to a finer-grained fuse api, this caching should be
4829-replaced with straight-forward calls to the wapi.  In my opinion, the
4830-Tahoe node should do all the caching tricks, so that extensions such as
4831-tahoe-fuse.py can be simple and thin.
4832-
4833-Backgrounding Behavior:
4834-When using the 2.5-5build1 ubuntu package, and no other arguments
4835-besides a mountpoint to tahoe-fuse.py, the process should remain in
4836-the foreground and print debug information.   Other python-fuse
4837-versions appear to alter this behavior and may fork the process to
4838-the background and obscure the log output.  Bonus points to whomever
4839-discovers the fate of these poor log messages in this case.
4840-
4841-"Investigative Logging":
4842-This prototype is designed to aide in further fuse development, so
4843-currently *every* fuse interface call figures out the process from
4844-which the file system request originates, then it figures out that
4845-processes commandline (this uses the /proc file system).  This is handy
4846-for interactive inspection of what kinds of behavior invokes which
4847-file system operations, but may not work for you.  To disable this
4848-inspection, edit the source and comment out all of the "@debugcall"
4849-[FIXME: double check python ref name] method decorators by inserting a
4850-'#' so it looks like "#@debugcall" (without quotes).
4851-
4852-Not-to-spec:
4853-The current version was not implemented according to any spec and
4854-makes quite a few dubious "guesses" for what data to pass the fuse
4855-interface.  You may see bizarre values, which may potentialy confuse
4856-any processes visiting the files under the mount point.
4857-
4858-Serial, blocking operations:
4859-Most fuse operations result in one or more http calls to the WAPI.
4860-These are serial and blocking (at least for the tested python-fuse
4861-version 2.5-5build1), so access to this file system is quite
4862-inefficient.
4863-
4864-
4865-Good luck!
4866rmfile ./contrib/fuse/impl_a/README
4867hunk ./contrib/fuse/impl_a/tahoe_fuse.py 1
4868-#! /usr/bin/env python
4869-'''
4870-Tahoe thin-client fuse module.
4871-
4872-See the accompanying README for configuration/usage details.
4873-
4874-Goals:
4875-
4876-- Delegate to Tahoe webapi as much as possible.
4877-- Thin rather than clever.  (Even when that means clunky.)
4878-
4879-
4880-Warts:
4881-
4882-- Reads cache entire file contents, violating the thinness goal.  Can we GET spans of files?
4883-- Single threaded.
4884-
4885-
4886-Road-map:
4887-1. Add unit tests where possible with little code modification.
4888-2. Make unit tests pass for a variety of python-fuse module versions.
4889-3. Modify the design to make possible unit test coverage of larger portions of code.
4890-
4891-Wishlist:
4892-- Perhaps integrate cli aliases or root_dir.cap.
4893-- Research pkg_resources; see if it can replace the try-import-except-import-error pattern.
4894-- Switch to logging instead of homebrew logging.
4895-'''
4896-
4897-
4898-#import bindann
4899-#bindann.install_exception_handler()
4900-
4901-import sys, stat, os, errno, urllib, time
4902-
4903-try:
4904-    import simplejson
4905-except ImportError, e:
4906-    raise SystemExit('''\
4907-Could not import simplejson, which is bundled with Tahoe.  Please
4908-update your PYTHONPATH environment variable to include the tahoe
4909-"support/lib/python<VERSION>/site-packages" directory.
4910-
4911-If you run this from the Tahoe source directory, use this command:
4912-PYTHONPATH="$PYTHONPATH:./support/lib/python%d.%d/site-packages/" python %s
4913-''' % (sys.version_info[:2] + (' '.join(sys.argv),)))
4914-   
4915-
4916-try:
4917-    import fuse
4918-except ImportError, e:
4919-    raise SystemExit('''\
4920-Could not import fuse, the pythonic fuse bindings.  This dependency
4921-of tahoe-fuse.py is *not* bundled with tahoe.  Please install it.
4922-On debian/ubuntu systems run: sudo apt-get install python-fuse
4923-''')
4924-
4925-# FIXME: Check for non-working fuse versions here.
4926-# FIXME: Make this work for all common python-fuse versions.
4927-
4928-# FIXME: Currently uses the old, silly path-based (non-stateful) interface:
4929-fuse.fuse_python_api = (0, 1) # Use the silly path-based api for now.
4930-
4931-
4932-### Config:
4933-TahoeConfigDir = '~/.tahoe'
4934-MagicDevNumber = 42
4935-UnknownSize = -1
4936-
4937-
4938-def main():
4939-    basedir = os.path.expanduser(TahoeConfigDir)
4940-
4941-    for i, arg in enumerate(sys.argv):
4942-        if arg == '--basedir':
4943-            try:
4944-                basedir = sys.argv[i+1]
4945-                sys.argv[i:i+2] = []
4946-            except IndexError:
4947-                sys.argv = [sys.argv[0], '--help']
4948-               
4949-
4950-    log_init(basedir)
4951-    log('Commandline: %r', sys.argv)
4952-
4953-    fs = TahoeFS(basedir)
4954-    fs.main()
4955-
4956-
4957-### Utilities for debug:
4958-_logfile = None # Private to log* functions.
4959-
4960-def log_init(confdir):
4961-    global _logfile
4962-   
4963-    logpath = os.path.join(confdir, 'logs', 'tahoe_fuse.log')
4964-    _logfile = open(logpath, 'a')
4965-    log('Log opened at: %s\n', time.strftime('%Y-%m-%d %H:%M:%S'))
4966-
4967-
4968-def log(msg, *args):
4969-    _logfile.write((msg % args) + '\n')
4970-    _logfile.flush()
4971-   
4972-   
4973-def trace_calls(m):
4974-    def dbmeth(self, *a, **kw):
4975-        pid = self.GetContext()['pid']
4976-        log('[%d %r]\n%s%r%r', pid, get_cmdline(pid), m.__name__, a, kw)
4977-        try:
4978-            r = m(self, *a, **kw)
4979-            if (type(r) is int) and (r < 0):
4980-                log('-> -%s\n', errno.errorcode[-r],)
4981-            else:
4982-                repstr = repr(r)[:256]
4983-                log('-> %s\n', repstr)
4984-            return r
4985-        except:
4986-            sys.excepthook(*sys.exc_info())
4987-           
4988-    return dbmeth
4989-
4990-
4991-def get_cmdline(pid):
4992-    f = open('/proc/%d/cmdline' % pid, 'r')
4993-    args = f.read().split('\0')
4994-    f.close()
4995-    assert args[-1] == ''
4996-    return args[:-1]
4997-
4998-
4999-class SystemError (Exception):
5000-    def __init__(self, eno):
5001-        self.eno = eno
5002-        Exception.__init__(self, errno.errorcode[eno])
5003-
5004-    @staticmethod
5005-    def wrap_returns(meth):
5006-        def wrapper(*args, **kw):
5007-            try:
5008-                return meth(*args, **kw)
5009-            except SystemError, e:
5010-                return -e.eno
5011-        wrapper.__name__ = meth.__name__
5012-        return wrapper
5013-
5014-
5015-### Heart of the Matter:
5016-class TahoeFS (fuse.Fuse):
5017-    def __init__(self, confdir):
5018-        log('Initializing with confdir = %r', confdir)
5019-        fuse.Fuse.__init__(self)
5020-        self.confdir = confdir
5021-       
5022-        self.flags = 0 # FIXME: What goes here?
5023-        self.multithreaded = 0
5024-
5025-        # silly path-based file handles.
5026-        self.filecontents = {} # {path -> contents}
5027-
5028-        self._init_url()
5029-        self._init_rootdir()
5030-
5031-    def _init_url(self):
5032-        if os.path.exists(os.path.join(self.confdir, 'node.url')):
5033-            self.url = file(os.path.join(self.confdir, 'node.url'), 'rb').read().strip()
5034-            if not self.url.endswith('/'):
5035-                self.url += '/'
5036-        else:
5037-            f = open(os.path.join(self.confdir, 'webport'), 'r')
5038-            contents = f.read()
5039-            f.close()
5040-            fields = contents.split(':')
5041-            proto, port = fields[:2]
5042-            assert proto == 'tcp'
5043-            port = int(port)
5044-            self.url = 'http://localhost:%d' % (port,)
5045-
5046-    def _init_rootdir(self):
5047-        # For now we just use the same default as the CLI:
5048-        rootdirfn = os.path.join(self.confdir, 'private', 'root_dir.cap')
5049-        try:
5050-            f = open(rootdirfn, 'r')
5051-            cap = f.read().strip()
5052-            f.close()
5053-        except EnvironmentError, le:
5054-            # FIXME: This user-friendly help message may be platform-dependent because it checks the exception description.
5055-            if le.args[1].find('No such file or directory') != -1:
5056-                raise SystemExit('%s requires a directory capability in %s, but it was not found.\n' % (sys.argv[0], rootdirfn))
5057-            else:
5058-                raise le
5059-
5060-        self.rootdir = TahoeDir(self.url, canonicalize_cap(cap))
5061-
5062-    def _get_node(self, path):
5063-        assert path.startswith('/')
5064-        if path == '/':
5065-            return self.rootdir.resolve_path([])
5066-        else:
5067-            parts = path.split('/')[1:]
5068-            return self.rootdir.resolve_path(parts)
5069-   
5070-    def _get_contents(self, path):
5071-        contents = self.filecontents.get(path)
5072-        if contents is None:
5073-            node = self._get_node(path)
5074-            contents = node.open().read()
5075-            self.filecontents[path] = contents
5076-        return contents
5077-   
5078-    @trace_calls
5079-    @SystemError.wrap_returns
5080-    def getattr(self, path):
5081-        node = self._get_node(path)
5082-        return node.getattr()
5083-               
5084-    @trace_calls
5085-    @SystemError.wrap_returns
5086-    def getdir(self, path):
5087-        """
5088-        return: [(name, typeflag), ... ]
5089-        """
5090-        node = self._get_node(path)
5091-        return node.getdir()
5092-
5093-    @trace_calls
5094-    @SystemError.wrap_returns
5095-    def mythread(self):
5096-        return -errno.ENOSYS
5097-
5098-    @trace_calls
5099-    @SystemError.wrap_returns
5100-    def chmod(self, path, mode):
5101-        return -errno.ENOSYS
5102-
5103-    @trace_calls
5104-    @SystemError.wrap_returns
5105-    def chown(self, path, uid, gid):
5106-        return -errno.ENOSYS
5107-
5108-    @trace_calls
5109-    @SystemError.wrap_returns
5110-    def fsync(self, path, isFsyncFile):
5111-        return -errno.ENOSYS
5112-
5113-    @trace_calls
5114-    @SystemError.wrap_returns
5115-    def link(self, target, link):
5116-        return -errno.ENOSYS
5117-
5118-    @trace_calls
5119-    @SystemError.wrap_returns
5120-    def mkdir(self, path, mode):
5121-        return -errno.ENOSYS
5122-
5123-    @trace_calls
5124-    @SystemError.wrap_returns
5125-    def mknod(self, path, mode, dev_ignored):
5126-        return -errno.ENOSYS
5127-
5128-    @trace_calls
5129-    @SystemError.wrap_returns
5130-    def open(self, path, mode):
5131-        IgnoredFlags = os.O_RDONLY | os.O_NONBLOCK | os.O_SYNC | os.O_LARGEFILE
5132-        # Note: IgnoredFlags are all ignored!
5133-        for fname in dir(os):
5134-            if fname.startswith('O_'):
5135-                flag = getattr(os, fname)
5136-                if flag & IgnoredFlags:
5137-                    continue
5138-                elif mode & flag:
5139-                    log('Flag not supported: %s', fname)
5140-                    raise SystemError(errno.ENOSYS)
5141-
5142-        self._get_contents(path)
5143-        return 0
5144-
5145-    @trace_calls
5146-    @SystemError.wrap_returns
5147-    def read(self, path, length, offset):
5148-        return self._get_contents(path)[offset:length]
5149-
5150-    @trace_calls
5151-    @SystemError.wrap_returns
5152-    def release(self, path):
5153-        del self.filecontents[path]
5154-        return 0
5155-
5156-    @trace_calls
5157-    @SystemError.wrap_returns
5158-    def readlink(self, path):
5159-        return -errno.ENOSYS
5160-
5161-    @trace_calls
5162-    @SystemError.wrap_returns
5163-    def rename(self, oldpath, newpath):
5164-        return -errno.ENOSYS
5165-
5166-    @trace_calls
5167-    @SystemError.wrap_returns
5168-    def rmdir(self, path):
5169-        return -errno.ENOSYS
5170-
5171-    #@trace_calls
5172-    @SystemError.wrap_returns
5173-    def statfs(self):
5174-        return -errno.ENOSYS
5175-
5176-    @trace_calls
5177-    @SystemError.wrap_returns
5178-    def symlink ( self, targetPath, linkPath ):
5179-        return -errno.ENOSYS
5180-
5181-    @trace_calls
5182-    @SystemError.wrap_returns
5183-    def truncate(self, path, size):
5184-        return -errno.ENOSYS
5185-
5186-    @trace_calls
5187-    @SystemError.wrap_returns
5188-    def unlink(self, path):
5189-        return -errno.ENOSYS
5190-
5191-    @trace_calls
5192-    @SystemError.wrap_returns
5193-    def utime(self, path, times):
5194-        return -errno.ENOSYS
5195-
5196-
5197-class TahoeNode (object):
5198-    NextInode = 0
5199-   
5200-    @staticmethod
5201-    def make(baseurl, uri):
5202-        typefield = uri.split(':', 2)[1]
5203-        # FIXME: is this check correct?
5204-        if uri.find('URI:DIR2') != -1:
5205-            return TahoeDir(baseurl, uri)
5206-        else:
5207-            return TahoeFile(baseurl, uri)
5208-       
5209-    def __init__(self, baseurl, uri):
5210-        if not baseurl.endswith('/'):
5211-            baseurl += '/'
5212-        self.burl = baseurl
5213-        self.uri = uri
5214-        self.fullurl = '%suri/%s' % (self.burl, self.uri)
5215-        self.inode = TahoeNode.NextInode
5216-        TahoeNode.NextInode += 1
5217-
5218-    def getattr(self):
5219-        """
5220-        - st_mode (protection bits)
5221-        - st_ino (inode number)
5222-        - st_dev (device)
5223-        - st_nlink (number of hard links)
5224-        - st_uid (user ID of owner)
5225-        - st_gid (group ID of owner)
5226-        - st_size (size of file, in bytes)
5227-        - st_atime (time of most recent access)
5228-        - st_mtime (time of most recent content modification)
5229-        - st_ctime (platform dependent; time of most recent metadata change on Unix,
5230-                    or the time of creation on Windows).
5231-        """
5232-        # FIXME: Return metadata that isn't completely fabricated.
5233-        return (self.get_mode(),
5234-                self.inode,
5235-                MagicDevNumber,
5236-                self.get_linkcount(),
5237-                os.getuid(),
5238-                os.getgid(),
5239-                self.get_size(),
5240-                0,
5241-                0,
5242-                0)
5243-
5244-    def get_metadata(self):
5245-        f = self.open('?t=json')
5246-        json = f.read()
5247-        f.close()
5248-        return simplejson.loads(json)
5249-       
5250-    def open(self, postfix=''):
5251-        url = self.fullurl + postfix
5252-        log('*** Fetching: %r', url)
5253-        return urllib.urlopen(url)
5254-
5255-
5256-class TahoeFile (TahoeNode):
5257-    def __init__(self, baseurl, uri):
5258-        #assert uri.split(':', 2)[1] in ('CHK', 'LIT'), `uri` # fails as of 0.7.0
5259-        TahoeNode.__init__(self, baseurl, uri)
5260-
5261-    # nonfuse:
5262-    def get_mode(self):
5263-        return stat.S_IFREG | 0400 # Read only regular file.
5264-
5265-    def get_linkcount(self):
5266-        return 1
5267-   
5268-    def get_size(self):
5269-        rawsize = self.get_metadata()[1]['size']
5270-        if type(rawsize) is not int: # FIXME: What about sizes which do not fit in python int?
5271-            assert rawsize == u'?', `rawsize`
5272-            return UnknownSize
5273-        else:
5274-            return rawsize
5275-   
5276-    def resolve_path(self, path):
5277-        assert path == []
5278-        return self
5279-   
5280-
5281-class TahoeDir (TahoeNode):
5282-    def __init__(self, baseurl, uri):
5283-        TahoeNode.__init__(self, baseurl, uri)
5284-
5285-        self.mode = stat.S_IFDIR | 0500 # Read only directory.
5286-
5287-    # FUSE:
5288-    def getdir(self):
5289-        d = [('.', self.get_mode()), ('..', self.get_mode())]
5290-        for name, child in self.get_children().items():
5291-            if name: # Just ignore this crazy case!
5292-                d.append((name, child.get_mode()))
5293-        return d
5294-
5295-    # nonfuse:
5296-    def get_mode(self):
5297-        return stat.S_IFDIR | 0500 # Read only directory.
5298-
5299-    def get_linkcount(self):
5300-        return len(self.getdir())
5301-   
5302-    def get_size(self):
5303-        return 2 ** 12 # FIXME: What do we return here?  len(self.get_metadata())
5304-   
5305-    def resolve_path(self, path):
5306-        assert type(path) is list
5307-
5308-        if path:
5309-            head = path[0]
5310-            child = self.get_child(head)
5311-            return child.resolve_path(path[1:])
5312-        else:
5313-            return self
5314-       
5315-    def get_child(self, name):
5316-        c = self.get_children()
5317-        return c[name]
5318-
5319-    def get_children(self):
5320-        flag, md = self.get_metadata()
5321-        assert flag == 'dirnode'
5322-
5323-        c = {}
5324-        for name, (childflag, childmd) in md['children'].items():
5325-            if childflag == 'dirnode':
5326-                cls = TahoeDir
5327-            else:
5328-                cls = TahoeFile
5329-
5330-            c[str(name)] = cls(self.burl, childmd['ro_uri'])
5331-        return c
5332-       
5333-       
5334-def canonicalize_cap(cap):
5335-    cap = urllib.unquote(cap)
5336-    i = cap.find('URI:')
5337-    assert i != -1, 'A cap must contain "URI:...", but this does not: ' + cap
5338-    return cap[i:]
5339-   
5340-
5341-if __name__ == '__main__':
5342-    main()
5343-
5344rmfile ./contrib/fuse/impl_a/tahoe_fuse.py
5345rmdir ./contrib/fuse/impl_a
5346hunk ./contrib/fuse/runtests.py 1
5347-#! /usr/bin/env python
5348-'''
5349-Unit and system tests for tahoe-fuse.
5350-'''
5351-
5352-# Note: It's always a SetupFailure, not a TestFailure if a webapi
5353-# operation fails, because this does not indicate a fuse interface
5354-# failure.
5355-
5356-# TODO: Unmount after tests regardless of failure or success!
5357-
5358-# TODO: Test mismatches between tahoe and fuse/posix.  What about nodes
5359-# with crazy names ('\0', unicode, '/', '..')?  Huuuuge files?
5360-# Huuuuge directories...  As tahoe approaches production quality, it'd
5361-# be nice if the fuse interface did so also by hardening against such cases.
5362-
5363-# FIXME: Only create / launch necessary nodes.  Do we still need an introducer and three nodes?
5364-
5365-# FIXME: This framework might be replaceable with twisted.trial,
5366-# especially the "layer" design, which is a bit cumbersome when
5367-# using recursion to manage multiple clients.
5368-
5369-# FIXME: Identify all race conditions (hint: starting clients, versus
5370-# using the grid fs).
5371-
5372-import sys, os, shutil, unittest, subprocess
5373-import tempfile, re, time, random, httplib, urllib
5374-#import traceback
5375-
5376-from twisted.python import usage
5377-
5378-if sys.platform.startswith('darwin'):
5379-    UNMOUNT_CMD = ['umount']
5380-else:
5381-    # linux, and until we hear otherwise, all other platforms with fuse, by assumption
5382-    UNMOUNT_CMD = ['fusermount', '-u']
5383-
5384-# Import fuse implementations:
5385-#FuseDir = os.path.join('.', 'contrib', 'fuse')
5386-#if not os.path.isdir(FuseDir):
5387-#    raise SystemExit('''
5388-#Could not find directory "%s".  Please run this script from the tahoe
5389-#source base directory.
5390-#''' % (FuseDir,))
5391-FuseDir = '.'
5392-
5393-
5394-### Load each implementation
5395-sys.path.append(os.path.join(FuseDir, 'impl_a'))
5396-import tahoe_fuse as impl_a
5397-sys.path.append(os.path.join(FuseDir, 'impl_b'))
5398-import pyfuse.tahoe as impl_b
5399-sys.path.append(os.path.join(FuseDir, 'impl_c'))
5400-import blackmatch as impl_c
5401-
5402-### config info about each impl, including which make sense to run
5403-implementations = {
5404-    'impl_a': dict(module=impl_a,
5405-                   mount_args=['--basedir', '%(nodedir)s', '%(mountpath)s', ],
5406-                   mount_wait=True,
5407-                   suites=['read', ]),
5408-    'impl_b': dict(module=impl_b,
5409-                   todo=True,
5410-                   mount_args=['--basedir', '%(nodedir)s', '%(mountpath)s', ],
5411-                   mount_wait=False,
5412-                   suites=['read', ]),
5413-    'impl_c': dict(module=impl_c,
5414-                   mount_args=['--cache-timeout', '0', '--root-uri', '%(root-uri)s',
5415-                               '--node-directory', '%(nodedir)s', '%(mountpath)s', ],
5416-                   mount_wait=True,
5417-                   suites=['read', 'write', ]),
5418-    'impl_c_no_split': dict(module=impl_c,
5419-                   mount_args=['--cache-timeout', '0', '--root-uri', '%(root-uri)s',
5420-                               '--no-split',
5421-                               '--node-directory', '%(nodedir)s', '%(mountpath)s', ],
5422-                   mount_wait=True,
5423-                   suites=['read', 'write', ]),
5424-    }
5425-
5426-if sys.platform == 'darwin':
5427-    del implementations['impl_a']
5428-    del implementations['impl_b']
5429-
5430-default_catch_up_pause = 0
5431-if sys.platform == 'linux2':
5432-    default_catch_up_pause = 2
5433-
5434-class FuseTestsOptions(usage.Options):
5435-    optParameters = [
5436-        ["test-type", None, "both",
5437-         "Type of test to run; unit, system or both"
5438-         ],
5439-        ["implementations", None, "all",
5440-         "Comma separated list of implementations to test, or 'all'"
5441-         ],
5442-        ["suites", None, "all",
5443-         "Comma separated list of test suites to run, or 'all'"
5444-         ],
5445-        ["tests", None, None,
5446-         "Comma separated list of specific tests to run"
5447-         ],
5448-        ["path-to-tahoe", None, "../../bin/tahoe",
5449-         "Which 'tahoe' script to use to create test nodes"],
5450-        ["tmp-dir", None, "/tmp",
5451-         "Where the test should create temporary files"],
5452-         # Note; this is '/tmp' because on leopard, tempfile.mkdtemp creates
5453-         # directories in a location which leads paths to exceed what macfuse
5454-         # can handle without leaking un-umount-able fuse processes.
5455-        ["catch-up-pause", None, str(default_catch_up_pause),
5456-         "Pause between tahoe operations and fuse tests thereon"],
5457-        ]
5458-    optFlags = [
5459-        ["debug-wait", None,
5460-         "Causes the test system to pause at various points, to facilitate debugging"],
5461-        ["web-open", None,
5462-         "Opens a web browser to the web ui at the start of each impl's tests"],
5463-        ["no-cleanup", False,
5464-         "Prevents the cleanup of the working directories, to allow analysis thereof"],
5465-         ]
5466-
5467-    def postOptions(self):
5468-        if self['suites'] == 'all':
5469-            self.suites = ['read', 'write']
5470-            # [ ] todo: deduce this from looking for test_ in dir(self)
5471-        else:
5472-            self.suites = map(str.strip, self['suites'].split(','))
5473-        if self['implementations'] == 'all':
5474-            self.implementations = implementations.keys()
5475-        else:
5476-            self.implementations = map(str.strip, self['implementations'].split(','))
5477-        if self['tests']:
5478-            self.tests = map(str.strip, self['tests'].split(','))
5479-        else:
5480-            self.tests = None
5481-        self.catch_up_pause = float(self['catch-up-pause'])
5482-
5483-### Main flow control:
5484-def main(args):
5485-    config = FuseTestsOptions()
5486-    config.parseOptions(args[1:])
5487-
5488-    target = 'all'
5489-    if len(args) > 1:
5490-        target = args.pop(1)
5491-
5492-    test_type = config['test-type']
5493-    if test_type not in ('both', 'unit', 'system'):
5494-        raise usage.error('test-type %r not supported' % (test_type,))
5495-
5496-    if test_type in ('both', 'unit'):
5497-        run_unit_tests([args[0]])
5498-
5499-    if test_type in ('both', 'system'):
5500-        return run_system_test(config)
5501-
5502-
5503-def run_unit_tests(argv):
5504-    print 'Running Unit Tests.'
5505-    try:
5506-        unittest.main(argv=argv)
5507-    except SystemExit, se:
5508-        pass
5509-    print 'Unit Tests complete.\n'
5510-
5511-
5512-def run_system_test(config):
5513-    return SystemTest(config).run()
5514-
5515-def drepr(obj):
5516-    r = repr(obj)
5517-    if len(r) > 200:
5518-        return '%s ... %s [%d]' % (r[:100], r[-100:], len(r))
5519-    else:
5520-        return r
5521-
5522-### System Testing:
5523-class SystemTest (object):
5524-    def __init__(self, config):
5525-        self.config = config
5526-
5527-        # These members represent test state:
5528-        self.cliexec = None
5529-        self.testroot = None
5530-
5531-        # This test state is specific to the first client:
5532-        self.port = None
5533-        self.clientbase = None
5534-
5535-    ## Top-level flow control:
5536-    # These "*_layer" methods call each other in a linear fashion, using
5537-    # exception unwinding to do cleanup properly.  Each "layer" invokes
5538-    # a deeper layer, and each layer does its own cleanup upon exit.
5539-
5540-    def run(self):
5541-        print '\n*** Setting up system tests.'
5542-        try:
5543-            results = self.init_cli_layer()
5544-            print '\n*** System Tests complete:'
5545-            total_failures = todo_failures = 0
5546-            for result in results:
5547-                impl_name, failures, total = result
5548-                if implementations[impl_name].get('todo'):
5549-                    todo_failures += failures
5550-                else:
5551-                    total_failures += failures
5552-                print 'Implementation %s: %d failed out of %d.' % result           
5553-            if total_failures:
5554-                print '%s total failures, %s todo' % (total_failures, todo_failures)
5555-                return 1
5556-            else:
5557-                return 0
5558-        except SetupFailure, sfail:
5559-            print
5560-            print sfail
5561-            print '\n*** System Tests were not successfully completed.'
5562-            return 1
5563-
5564-    def maybe_wait(self, msg='waiting', or_if_webopen=False):
5565-        if self.config['debug-wait'] or or_if_webopen and self.config['web-open']:
5566-            print msg
5567-            raw_input()
5568-
5569-    def maybe_webopen(self, where=None):
5570-        if self.config['web-open']:
5571-            import webbrowser
5572-            url = self.weburl
5573-            if where is not None:
5574-                url += urllib.quote(where)
5575-            webbrowser.open(url)
5576-
5577-    def maybe_pause(self):
5578-        time.sleep(self.config.catch_up_pause)
5579-
5580-    def init_cli_layer(self):
5581-        '''This layer finds the appropriate tahoe executable.'''
5582-        #self.cliexec = os.path.join('.', 'bin', 'tahoe')
5583-        self.cliexec = self.config['path-to-tahoe']
5584-        version = self.run_tahoe('--version')
5585-        print 'Using %r with version:\n%s' % (self.cliexec, version.rstrip())
5586-
5587-        return self.create_testroot_layer()
5588-
5589-    def create_testroot_layer(self):
5590-        print 'Creating test base directory.'
5591-        #self.testroot = tempfile.mkdtemp(prefix='tahoe_fuse_test_')
5592-        #self.testroot = tempfile.mkdtemp(prefix='tahoe_fuse_test_', dir='/tmp/')
5593-        tmpdir = self.config['tmp-dir']
5594-        if tmpdir:
5595-            self.testroot = tempfile.mkdtemp(prefix='tahoe_fuse_test_', dir=tmpdir)
5596-        else:
5597-            self.testroot = tempfile.mkdtemp(prefix='tahoe_fuse_test_')
5598-        try:
5599-            return self.launch_introducer_layer()
5600-        finally:
5601-            if not self.config['no-cleanup']:
5602-                print 'Cleaning up test root directory.'
5603-                try:
5604-                    shutil.rmtree(self.testroot)
5605-                except Exception, e:
5606-                    print 'Exception removing test root directory: %r' % (self.testroot, )
5607-                    print 'Ignoring cleanup exception: %r' % (e,)
5608-            else:
5609-                print 'Leaving test root directory: %r' % (self.testroot, )
5610-
5611-
5612-    def launch_introducer_layer(self):
5613-        print 'Launching introducer.'
5614-        introbase = os.path.join(self.testroot, 'introducer')
5615-
5616-        # NOTE: We assume if tahoe exits with non-zero status, no separate
5617-        # tahoe child process is still running.
5618-        createoutput = self.run_tahoe('create-introducer', '--basedir', introbase)
5619-
5620-        self.check_tahoe_output(createoutput, ExpectedCreationOutput, introbase)
5621-
5622-        startoutput = self.run_tahoe('start', '--basedir', introbase)
5623-        try:
5624-            self.check_tahoe_output(startoutput, ExpectedStartOutput, introbase)
5625-
5626-            return self.launch_clients_layer(introbase)
5627-
5628-        finally:
5629-            print 'Stopping introducer node.'
5630-            self.stop_node(introbase)
5631-
5632-    def set_tahoe_option(self, base, key, value):
5633-        import re
5634-
5635-        filename = os.path.join(base, 'tahoe.cfg')
5636-        content = open(filename).read()
5637-        content = re.sub('%s = (.+)' % key, '%s = %s' % (key, value), content)
5638-        open(filename, 'w').write(content)
5639-
5640-    TotalClientsNeeded = 3
5641-    def launch_clients_layer(self, introbase, clientnum = 0):
5642-        if clientnum >= self.TotalClientsNeeded:
5643-            self.maybe_wait('waiting (launched clients)')
5644-            ret = self.create_test_dirnode_layer()
5645-            self.maybe_wait('waiting (ran tests)', or_if_webopen=True)
5646-            return ret
5647-
5648-        tmpl = 'Launching client %d of %d.'
5649-        print tmpl % (clientnum,
5650-                      self.TotalClientsNeeded)
5651-
5652-        base = os.path.join(self.testroot, 'client_%d' % (clientnum,))
5653-
5654-        output = self.run_tahoe('create-node', '--basedir', base)
5655-        self.check_tahoe_output(output, ExpectedCreationOutput, base)
5656-
5657-        if clientnum == 0:
5658-            # The first client is special:
5659-            self.clientbase = base
5660-            self.port = random.randrange(1024, 2**15)
5661-
5662-            self.set_tahoe_option(base, 'web.port', 'tcp:%d:interface=127.0.0.1' % self.port)
5663-
5664-            self.weburl = "http://127.0.0.1:%d/" % (self.port,)
5665-            print self.weburl
5666-        else:
5667-            self.set_tahoe_option(base, 'web.port', '')
5668-
5669-        introfurl = os.path.join(introbase, 'introducer.furl')
5670-
5671-        furl = open(introfurl).read().strip()
5672-        self.set_tahoe_option(base, 'introducer.furl', furl)
5673-
5674-        # NOTE: We assume if tahoe exist with non-zero status, no separate
5675-        # tahoe child process is still running.
5676-        startoutput = self.run_tahoe('start', '--basedir', base)
5677-        try:
5678-            self.check_tahoe_output(startoutput, ExpectedStartOutput, base)
5679-
5680-            return self.launch_clients_layer(introbase, clientnum+1)
5681-
5682-        finally:
5683-            print 'Stopping client node %d.' % (clientnum,)
5684-            self.stop_node(base)
5685-
5686-    def create_test_dirnode_layer(self):
5687-        print 'Creating test dirnode.'
5688-
5689-        cap = self.create_dirnode()
5690-
5691-        f = open(os.path.join(self.clientbase, 'private', 'root_dir.cap'), 'w')
5692-        f.write(cap)
5693-        f.close()
5694-
5695-        return self.mount_fuse_layer(cap)
5696-
5697-    def mount_fuse_layer(self, root_uri):
5698-        mpbase = os.path.join(self.testroot, 'mountpoint')
5699-        os.mkdir(mpbase)
5700-        results = []
5701-
5702-        if self.config['debug-wait']:
5703-            ImplProcessManager.debug_wait = True
5704-
5705-        #for name, kwargs in implementations.items():
5706-        for name in self.config.implementations:
5707-            kwargs = implementations[name]
5708-            #print 'instantiating %s: %r' % (name, kwargs)
5709-            implprocmgr = ImplProcessManager(name, **kwargs)
5710-            print '\n*** Testing impl: %r' % (implprocmgr.name)
5711-            implprocmgr.configure(self.clientbase, mpbase)
5712-            implprocmgr.mount()
5713-            try:
5714-                failures, total = self.run_test_layer(root_uri, implprocmgr)
5715-                result = (implprocmgr.name, failures, total)
5716-                tmpl = '\n*** Test Results implementation %s: %d failed out of %d.'
5717-                print tmpl % result
5718-                results.append(result)
5719-            finally:
5720-                implprocmgr.umount()
5721-        return results
5722-
5723-    def run_test_layer(self, root_uri, iman):
5724-        self.maybe_webopen('uri/'+root_uri)
5725-        failures = 0
5726-        testnum = 0
5727-        numtests = 0
5728-        if self.config.tests:
5729-            tests = self.config.tests
5730-        else:
5731-            tests = list(set(self.config.suites).intersection(set(iman.suites)))
5732-        self.maybe_wait('waiting (about to run tests)')
5733-        for test in tests:
5734-            testnames = [n for n in sorted(dir(self)) if n.startswith('test_'+test)]
5735-            numtests += len(testnames)
5736-            print 'running %s %r tests' % (len(testnames), test,)
5737-            for testname in testnames:
5738-                testnum += 1
5739-                print '\n*** Running test #%d: %s' % (testnum, testname)
5740-                try:
5741-                    testcap = self.create_dirnode()
5742-                    dirname = '%s_%s' % (iman.name, testname)
5743-                    self.attach_node(root_uri, testcap, dirname)
5744-                    method = getattr(self, testname)
5745-                    method(testcap, testdir = os.path.join(iman.mountpath, dirname))
5746-                    print 'Test succeeded.'
5747-                except TestFailure, f:
5748-                    print f
5749-                    #print traceback.format_exc()
5750-                    failures += 1
5751-                except:
5752-                    print 'Error in test code...  Cleaning up.'
5753-                    raise
5754-        return (failures, numtests)
5755-
5756-    # Tests:
5757-    def test_read_directory_existence(self, testcap, testdir):
5758-        if not wrap_os_error(os.path.isdir, testdir):
5759-            raise TestFailure('Attached test directory not found: %r', testdir)
5760-
5761-    def test_read_empty_directory_listing(self, testcap, testdir):
5762-        listing = wrap_os_error(os.listdir, testdir)
5763-        if listing:
5764-            raise TestFailure('Expected empty directory, found: %r', listing)
5765-
5766-    def test_read_directory_listing(self, testcap, testdir):
5767-        names = []
5768-        filesizes = {}
5769-
5770-        for i in range(3):
5771-            fname = 'file_%d' % (i,)
5772-            names.append(fname)
5773-            body = 'Hello World #%d!' % (i,)
5774-            filesizes[fname] = len(body)
5775-
5776-            cap = self.webapi_call('PUT', '/uri', body)
5777-            self.attach_node(testcap, cap, fname)
5778-
5779-            dname = 'dir_%d' % (i,)
5780-            names.append(dname)
5781-
5782-            cap = self.create_dirnode()
5783-            self.attach_node(testcap, cap, dname)
5784-
5785-        names.sort()
5786-
5787-        listing = wrap_os_error(os.listdir, testdir)
5788-        listing.sort()
5789-
5790-        if listing != names:
5791-            tmpl = 'Expected directory list containing %r but fuse gave %r'
5792-            raise TestFailure(tmpl, names, listing)
5793-
5794-        for file, size in filesizes.items():
5795-            st = wrap_os_error(os.stat, os.path.join(testdir, file))
5796-            if st.st_size != size:
5797-                tmpl = 'Expected %r size of %r but fuse returned %r'
5798-                raise TestFailure(tmpl, file, size, st.st_size)
5799-
5800-    def test_read_file_contents(self, testcap, testdir):
5801-        name = 'hw.txt'
5802-        body = 'Hello World!'
5803-
5804-        cap = self.webapi_call('PUT', '/uri', body)
5805-        self.attach_node(testcap, cap, name)
5806-
5807-        path = os.path.join(testdir, name)
5808-        try:
5809-            found = open(path, 'r').read()
5810-        except Exception, err:
5811-            tmpl = 'Could not read file contents of %r: %r'
5812-            raise TestFailure(tmpl, path, err)
5813-
5814-        if found != body:
5815-            tmpl = 'Expected file contents %r but found %r'
5816-            raise TestFailure(tmpl, body, found)
5817-
5818-    def test_read_in_random_order(self, testcap, testdir):
5819-        sz = 2**20
5820-        bs = 2**10
5821-        assert(sz % bs == 0)
5822-        name = 'random_read_order'
5823-        body = os.urandom(sz)
5824-
5825-        cap = self.webapi_call('PUT', '/uri', body)
5826-        self.attach_node(testcap, cap, name)
5827-
5828-        # XXX this should also do a test where sz%bs != 0, so that it correctly tests
5829-        # the edge case where the last read is a 'short' block
5830-        path = os.path.join(testdir, name)
5831-        try:
5832-            fsize = os.path.getsize(path)
5833-            if fsize != len(body):
5834-                tmpl = 'Expected file size %s but found %s'
5835-                raise TestFailure(tmpl, len(body), fsize)
5836-        except Exception, err:
5837-            tmpl = 'Could not read file size for %r: %r'
5838-            raise TestFailure(tmpl, path, err)
5839-
5840-        try:
5841-            f = open(path, 'r')
5842-            posns = range(0,sz,bs)
5843-            random.shuffle(posns)
5844-            data = [None] * (sz/bs)
5845-            for p in posns:
5846-                f.seek(p)
5847-                data[p/bs] = f.read(bs)
5848-            found = ''.join(data)
5849-        except Exception, err:
5850-            tmpl = 'Could not read file %r: %r'
5851-            raise TestFailure(tmpl, path, err)
5852-
5853-        if found != body:
5854-            tmpl = 'Expected file contents %s but found %s'
5855-            raise TestFailure(tmpl, drepr(body), drepr(found))
5856-
5857-    def get_file(self, dircap, path):
5858-        body = self.webapi_call('GET', '/uri/%s/%s' % (dircap, path))
5859-        return body
5860-
5861-    def test_write_tiny_file(self, testcap, testdir):
5862-        self._write_test_linear(testcap, testdir, name='tiny.junk', bs=2**9, sz=2**9)
5863-
5864-    def test_write_linear_small_writes(self, testcap, testdir):
5865-        self._write_test_linear(testcap, testdir, name='large_linear.junk', bs=2**9, sz=2**20)
5866-
5867-    def test_write_linear_large_writes(self, testcap, testdir):
5868-        # at least on the mac, large io block sizes are reduced to 64k writes through fuse
5869-        self._write_test_linear(testcap, testdir, name='small_linear.junk', bs=2**18, sz=2**20)
5870-
5871-    def _write_test_linear(self, testcap, testdir, name, bs, sz):
5872-        body = os.urandom(sz)
5873-        try:
5874-            path = os.path.join(testdir, name)
5875-            f = file(path, 'w')
5876-        except Exception, err:
5877-            tmpl = 'Could not open file for write at %r: %r'
5878-            raise TestFailure(tmpl, path, err)
5879-        try:
5880-            for posn in range(0,sz,bs):
5881-                f.write(body[posn:posn+bs])
5882-            f.close()
5883-        except Exception, err:
5884-            tmpl = 'Could not write to file %r: %r'
5885-            raise TestFailure(tmpl, path, err)
5886-
5887-        self.maybe_pause()
5888-        self._check_write(testcap, name, body)
5889-
5890-    def _check_write(self, testcap, name, expected_body):
5891-        uploaded_body = self.get_file(testcap, name)
5892-        if uploaded_body != expected_body:
5893-            tmpl = 'Expected file contents %s but found %s'
5894-            raise TestFailure(tmpl, drepr(expected_body), drepr(uploaded_body))
5895-
5896-    def test_write_overlapping_small_writes(self, testcap, testdir):
5897-        self._write_test_overlap(testcap, testdir, name='large_overlap', bs=2**9, sz=2**20)
5898-
5899-    def test_write_overlapping_large_writes(self, testcap, testdir):
5900-        self._write_test_overlap(testcap, testdir, name='small_overlap', bs=2**18, sz=2**20)
5901-
5902-    def _write_test_overlap(self, testcap, testdir, name, bs, sz):
5903-        body = os.urandom(sz)
5904-        try:
5905-            path = os.path.join(testdir, name)
5906-            f = file(path, 'w')
5907-        except Exception, err:
5908-            tmpl = 'Could not open file for write at %r: %r'
5909-            raise TestFailure(tmpl, path, err)
5910-        try:
5911-            for posn in range(0,sz,bs):
5912-                start = max(0, posn-bs)
5913-                end = min(sz, posn+bs)
5914-                f.seek(start)
5915-                f.write(body[start:end])
5916-            f.close()
5917-        except Exception, err:
5918-            tmpl = 'Could not write to file %r: %r'
5919-            raise TestFailure(tmpl, path, err)
5920-
5921-        self.maybe_pause()
5922-        self._check_write(testcap, name, body)
5923-
5924-
5925-    def test_write_random_scatter(self, testcap, testdir):
5926-        sz = 2**20
5927-        name = 'random_scatter'
5928-        body = os.urandom(sz)
5929-
5930-        def rsize(sz=sz):
5931-            return min(int(random.paretovariate(.25)), sz/12)
5932-
5933-        # first chop up whole file into random sized chunks
5934-        slices = []
5935-        posn = 0
5936-        while posn < sz:
5937-            size = rsize()
5938-            slices.append( (posn, body[posn:posn+size]) )
5939-            posn += size
5940-        random.shuffle(slices) # and randomise their order
5941-
5942-        try:
5943-            path = os.path.join(testdir, name)
5944-            f = file(path, 'w')
5945-        except Exception, err:
5946-            tmpl = 'Could not open file for write at %r: %r'
5947-            raise TestFailure(tmpl, path, err)
5948-        try:
5949-            # write all slices: we hence know entire file is ultimately written
5950-            # write random excerpts: this provides for mixed and varied overlaps
5951-            for posn,slice in slices:
5952-                f.seek(posn)
5953-                f.write(slice)
5954-                rposn = random.randint(0,sz)
5955-                f.seek(rposn)
5956-                f.write(body[rposn:rposn+rsize()])
5957-            f.close()
5958-        except Exception, err:
5959-            tmpl = 'Could not write to file %r: %r'
5960-            raise TestFailure(tmpl, path, err)
5961-
5962-        self.maybe_pause()
5963-        self._check_write(testcap, name, body)
5964-
5965-    def test_write_partial_overwrite(self, testcap, testdir):
5966-        name = 'partial_overwrite'
5967-        body = '_'*132
5968-        overwrite = '^'*8
5969-        position = 26
5970-
5971-        def write_file(path, mode, contents, position=None):
5972-            try:
5973-                f = file(path, mode)
5974-                if position is not None:
5975-                    f.seek(position)
5976-                f.write(contents)
5977-                f.close()
5978-            except Exception, err:
5979-                tmpl = 'Could not write to file %r: %r'
5980-                raise TestFailure(tmpl, path, err)
5981-
5982-        def read_file(path):
5983-            try:
5984-                f = file(path, 'rb')
5985-                contents = f.read()
5986-                f.close()
5987-            except Exception, err:
5988-                tmpl = 'Could not read file %r: %r'
5989-                raise TestFailure(tmpl, path, err)
5990-            return contents
5991-
5992-        path = os.path.join(testdir, name)
5993-        #write_file(path, 'w', body)
5994-
5995-        cap = self.webapi_call('PUT', '/uri', body)
5996-        self.attach_node(testcap, cap, name)
5997-        self.maybe_pause()
5998-
5999-        contents = read_file(path)
6000-        if contents != body:
6001-            raise TestFailure('File contents mismatch (%r) %r v.s. %r', path, contents, body)
6002-
6003-        write_file(path, 'r+', overwrite, position)
6004-        contents = read_file(path)
6005-        expected = body[:position] + overwrite + body[position+len(overwrite):]
6006-        if contents != expected:
6007-            raise TestFailure('File contents mismatch (%r) %r v.s. %r', path, contents, expected)
6008-
6009-
6010-    # Utilities:
6011-    def run_tahoe(self, *args):
6012-        realargs = ('tahoe',) + args
6013-        status, output = gather_output(realargs, executable=self.cliexec)
6014-        if status != 0:
6015-            tmpl = 'The tahoe cli exited with nonzero status.\n'
6016-            tmpl += 'Executable: %r\n'
6017-            tmpl += 'Command arguments: %r\n'
6018-            tmpl += 'Exit status: %r\n'
6019-            tmpl += 'Output:\n%s\n[End of tahoe output.]\n'
6020-            raise SetupFailure(tmpl,
6021-                                    self.cliexec,
6022-                                    realargs,
6023-                                    status,
6024-                                    output)
6025-        return output
6026-
6027-    def check_tahoe_output(self, output, expected, expdir):
6028-        ignorable_lines = map(re.compile, [
6029-            '.*site-packages/zope\.interface.*\.egg/zope/__init__.py:3: UserWarning: Module twisted was already imported from .*egg is being added to sys.path',
6030-            '  import pkg_resources',
6031-            ])
6032-        def ignore_line(line):
6033-            for ignorable_line in ignorable_lines:
6034-                if ignorable_line.match(line):
6035-                    return True
6036-            else:
6037-                return False
6038-        output = '\n'.join( [ line
6039-                              for line in output.split('\n')+['']
6040-                              #if line not in ignorable_lines ] )
6041-                              if not ignore_line(line) ] )
6042-        m = re.match(expected, output, re.M)
6043-        if m is None:
6044-            tmpl = 'The output of tahoe did not match the expectation:\n'
6045-            tmpl += 'Expected regex: %s\n'
6046-            tmpl += 'Actual output: %r\n'
6047-            self.warn(tmpl, expected, output)
6048-
6049-        elif expdir != m.group('path'):
6050-            tmpl = 'The output of tahoe refers to an unexpected directory:\n'
6051-            tmpl += 'Expected directory: %r\n'
6052-            tmpl += 'Actual directory: %r\n'
6053-            self.warn(tmpl, expdir, m.group(1))
6054-
6055-    def stop_node(self, basedir):
6056-        try:
6057-            self.run_tahoe('stop', '--basedir', basedir)
6058-        except Exception, e:
6059-            print 'Failed to stop tahoe node.'
6060-            print 'Ignoring cleanup exception:'
6061-            # Indent the exception description:
6062-            desc = str(e).rstrip()
6063-            print '  ' + desc.replace('\n', '\n  ')
6064-
6065-    def webapi_call(self, method, path, body=None, **options):
6066-        if options:
6067-            path = path + '?' + ('&'.join(['%s=%s' % kv for kv in options.items()]))
6068-
6069-        conn = httplib.HTTPConnection('127.0.0.1', self.port)
6070-        conn.request(method, path, body = body)
6071-        resp = conn.getresponse()
6072-
6073-        if resp.status != 200:
6074-            tmpl = 'A webapi operation failed.\n'
6075-            tmpl += 'Request: %r %r\n'
6076-            tmpl += 'Body:\n%s\n'
6077-            tmpl += 'Response:\nStatus %r\nBody:\n%s'
6078-            raise SetupFailure(tmpl,
6079-                                    method, path,
6080-                                    body or '',
6081-                                    resp.status, body)
6082-
6083-        return resp.read()
6084-
6085-    def create_dirnode(self):
6086-        return self.webapi_call('PUT', '/uri', t='mkdir').strip()
6087-
6088-    def attach_node(self, dircap, childcap, childname):
6089-        body = self.webapi_call('PUT',
6090-                                '/uri/%s/%s' % (dircap, childname),
6091-                                body = childcap,
6092-                                t = 'uri',
6093-                                replace = 'false')
6094-        assert body.strip() == childcap, `body, dircap, childcap, childname`
6095-
6096-    def polling_operation(self, operation, polldesc, timeout = 10.0, pollinterval = 0.2):
6097-        totaltime = timeout # Fudging for edge-case SetupFailure description...
6098-
6099-        totalattempts = int(timeout / pollinterval)
6100-
6101-        starttime = time.time()
6102-        for attempt in range(totalattempts):
6103-            opstart = time.time()
6104-
6105-            try:
6106-                result = operation()
6107-            except KeyboardInterrupt, e:
6108-                raise
6109-            except Exception, e:
6110-                result = False
6111-
6112-            totaltime = time.time() - starttime
6113-
6114-            if result is not False:
6115-                #tmpl = '(Polling took over %.2f seconds.)'
6116-                #print tmpl % (totaltime,)
6117-                return result
6118-
6119-            elif totaltime > timeout:
6120-                break
6121-
6122-            else:
6123-                opdelay = time.time() - opstart
6124-                realinterval = max(0., pollinterval - opdelay)
6125-
6126-                #tmpl = '(Poll attempt %d failed after %.2f seconds, sleeping %.2f seconds.)'
6127-                #print tmpl % (attempt+1, opdelay, realinterval)
6128-                time.sleep(realinterval)
6129-
6130-
6131-        tmpl = 'Timeout while polling for: %s\n'
6132-        tmpl += 'Waited %.2f seconds (%d polls).'
6133-        raise SetupFailure(tmpl, polldesc, totaltime, attempt+1)
6134-
6135-    def warn(self, tmpl, *args):
6136-        print ('Test Warning: ' + tmpl) % args
6137-
6138-
6139-# SystemTest Exceptions:
6140-class Failure (Exception):
6141-    def __init__(self, tmpl, *args):
6142-        msg = self.Prefix + (tmpl % args)
6143-        Exception.__init__(self, msg)
6144-
6145-class SetupFailure (Failure):
6146-    Prefix = 'Setup Failure - The test framework encountered an error:\n'
6147-
6148-class TestFailure (Failure):
6149-    Prefix = 'TestFailure: '
6150-
6151-
6152-### Unit Tests:
6153-class Impl_A_UnitTests (unittest.TestCase):
6154-    '''Tests small stand-alone functions.'''
6155-    def test_canonicalize_cap(self):
6156-        iopairs = [('http://127.0.0.1:3456/uri/URI:DIR2:yar9nnzsho6czczieeesc65sry:upp1pmypwxits3w9izkszgo1zbdnsyk3nm6h7e19s7os7s6yhh9y',
6157-                    'URI:DIR2:yar9nnzsho6czczieeesc65sry:upp1pmypwxits3w9izkszgo1zbdnsyk3nm6h7e19s7os7s6yhh9y'),
6158-                   ('http://127.0.0.1:3456/uri/URI%3ACHK%3Ak7ktp1qr7szmt98s1y3ha61d9w%3A8tiy8drttp65u79pjn7hs31po83e514zifdejidyeo1ee8nsqfyy%3A3%3A12%3A242?filename=welcome.html',
6159-                    'URI:CHK:k7ktp1qr7szmt98s1y3ha61d9w:8tiy8drttp65u79pjn7hs31po83e514zifdejidyeo1ee8nsqfyy:3:12:242?filename=welcome.html')]
6160-
6161-        for input, output in iopairs:
6162-            result = impl_a.canonicalize_cap(input)
6163-            self.failUnlessEqual(output, result, 'input == %r' % (input,))
6164-
6165-
6166-
6167-### Misc:
6168-class ImplProcessManager(object):
6169-    debug_wait = False
6170-
6171-    def __init__(self, name, module, mount_args, mount_wait, suites, todo=False):
6172-        self.name = name
6173-        self.module = module
6174-        self.script = module.__file__
6175-        self.mount_args = mount_args
6176-        self.mount_wait = mount_wait
6177-        self.suites = suites
6178-        self.todo = todo
6179-
6180-    def maybe_wait(self, msg='waiting'):
6181-        if self.debug_wait:
6182-            print msg
6183-            raw_input()
6184-
6185-    def configure(self, client_nodedir, mountpoint):
6186-        self.client_nodedir = client_nodedir
6187-        self.mountpath = os.path.join(mountpoint, self.name)
6188-        os.mkdir(self.mountpath)
6189-
6190-    def mount(self):
6191-        print 'Mounting implementation: %s (%s)' % (self.name, self.script)
6192-
6193-        rootdirfile = os.path.join(self.client_nodedir, 'private', 'root_dir.cap')
6194-        root_uri = file(rootdirfile, 'r').read().strip()
6195-        fields = {'mountpath': self.mountpath,
6196-                  'nodedir': self.client_nodedir,
6197-                  'root-uri': root_uri,
6198-                 }
6199-        args = ['python', self.script] + [ arg%fields for arg in self.mount_args ]
6200-        print ' '.join(args)
6201-        self.maybe_wait('waiting (about to launch fuse)')
6202-
6203-        if self.mount_wait:
6204-            exitcode, output = gather_output(args)
6205-            if exitcode != 0:
6206-                tmpl = '%r failed to launch:\n'
6207-                tmpl += 'Exit Status: %r\n'
6208-                tmpl += 'Output:\n%s\n'
6209-                raise SetupFailure(tmpl, self.script, exitcode, output)
6210-        else:
6211-            self.proc = subprocess.Popen(args)
6212-
6213-    def umount(self):
6214-        print 'Unmounting implementation: %s' % (self.name,)
6215-        args = UNMOUNT_CMD + [self.mountpath]
6216-        print args
6217-        self.maybe_wait('waiting (unmount)')
6218-        #print os.system('ls -l '+self.mountpath)
6219-        ec, out = gather_output(args)
6220-        if ec != 0 or out:
6221-            tmpl = '%r failed to unmount:\n' % (' '.join(UNMOUNT_CMD),)
6222-            tmpl += 'Arguments: %r\n'
6223-            tmpl += 'Exit Status: %r\n'
6224-            tmpl += 'Output:\n%s\n'
6225-            raise SetupFailure(tmpl, args, ec, out)
6226-
6227-
6228-def gather_output(*args, **kwargs):
6229-    '''
6230-    This expects the child does not require input and that it closes
6231-    stdout/err eventually.
6232-    '''
6233-    p = subprocess.Popen(stdout = subprocess.PIPE,
6234-                         stderr = subprocess.STDOUT,
6235-                         *args,
6236-                         **kwargs)
6237-    output = p.stdout.read()
6238-    exitcode = p.wait()
6239-    return (exitcode, output)
6240-
6241-
6242-def wrap_os_error(meth, *args):
6243-    try:
6244-        return meth(*args)
6245-    except os.error, e:
6246-        raise TestFailure('%s', e)
6247-
6248-
6249-ExpectedCreationOutput = r'(introducer|client) created in (?P<path>.*?)\n'
6250-ExpectedStartOutput = r'(.*\n)*STARTING (?P<path>.*?)\n(introducer|client) node probably started'
6251-
6252-
6253-if __name__ == '__main__':
6254-    sys.exit(main(sys.argv))
6255rmfile ./contrib/fuse/runtests.py
6256rmdir ./contrib/fuse
6257hunk ./contrib/README 1
6258-This directory contains code and extensions which are not strictly a part
6259-of Tahoe. They may or may not currently work.
6260-
6261rmfile ./contrib/README
6262rmdir ./contrib
6263hunk ./Makefile 103
6264 test: build src/allmydata/_version.py
6265        $(PYTHON) setup.py test $(TRIALARGS) -s $(TEST)
6266 
6267-fuse-test: .built .checked-deps
6268-       $(RUNPP) -d contrib/fuse -p -c runtests.py
6269-
6270 test-coverage: build src/allmydata/_version.py
6271        rm -f .coverage
6272        $(TAHOE) debug trial --reporter=bwverbose-coverage $(TEST)
6273hunk ./misc/debian/copyright 224
6274        this License.
6275 ------- end TGPPL1 licence
6276 
6277-The files mac/fuse.py and mac/fuseparts/subbedopts.py are licensed under
6278-the GNU Lesser General Public Licence.  In addition, on 2009-09-21 Csaba
6279-Henk granted permission for those files to be under the same terms as
6280-Tahoe-LAFS itself.
6281-
6282-See /usr/share/common-licenses/GPL for a copy of the GNU General Public
6283-License, and /usr/share/common-licenses/LGPL for the GNU Lesser General Public
6284-License.
6285-
6286-The file src/allmydata/util/figleaf.py is licensed under the BSD licence.
6287-
6288 ------- begin BSD licence
6289 Copyright (c) <YEAR>, <OWNER>
6290 All rights reserved.
6291hunk ./misc/debian_helpers/etch/debian/copyright 224
6292        this License.
6293 ------- end TGPPL1 licence
6294 
6295-The files mac/fuse.py and mac/fuseparts/subbedopts.py are licensed under
6296-the GNU Lesser General Public Licence.  In addition, on 2009-09-21 Csaba
6297-Henk granted permission for those files to be under the same terms as
6298-Tahoe-LAFS itself.
6299-
6300-See /usr/share/common-licenses/GPL for a copy of the GNU General Public
6301-License, and /usr/share/common-licenses/LGPL for the GNU Lesser General Public
6302-License.
6303-
6304-The file src/allmydata/util/figleaf.py is licensed under the BSD licence.
6305-
6306 ------- begin BSD licence
6307 Copyright (c) <YEAR>, <OWNER>
6308 All rights reserved.
6309hunk ./misc/debian_helpers/lenny/debian/copyright 224
6310        this License.
6311 ------- end TGPPL1 licence
6312 
6313-The files mac/fuse.py and mac/fuseparts/subbedopts.py are licensed under
6314-the GNU Lesser General Public Licence.  In addition, on 2009-09-21 Csaba
6315-Henk granted permission for those files to be under the same terms as
6316-Tahoe-LAFS itself.
6317-
6318-See /usr/share/common-licenses/GPL for a copy of the GNU General Public
6319-License, and /usr/share/common-licenses/LGPL for the GNU Lesser General Public
6320-License.
6321-
6322-The file src/allmydata/util/figleaf.py is licensed under the BSD licence.
6323-
6324 ------- begin BSD licence
6325 Copyright (c) <YEAR>, <OWNER>
6326 All rights reserved.
6327hunk ./misc/debian_helpers/sid/debian/copyright 224
6328        this License.
6329 ------- end TGPPL1 licence
6330 
6331-The files mac/fuse.py and mac/fuseparts/subbedopts.py are licensed under
6332-the GNU Lesser General Public Licence.  In addition, on 2009-09-21 Csaba
6333-Henk granted permission for those files to be under the same terms as
6334-Tahoe-LAFS itself.
6335-
6336-See /usr/share/common-licenses/GPL for a copy of the GNU General Public
6337-License, and /usr/share/common-licenses/LGPL for the GNU Lesser General Public
6338-License.
6339-
6340-The file src/allmydata/util/figleaf.py is licensed under the BSD licence.
6341-
6342 ------- begin BSD licence
6343 Copyright (c) <YEAR>, <OWNER>
6344 All rights reserved.
6345hunk ./src/allmydata/test/figleaf.excludes 1
6346-^/home/warner/stuff/python/twisted/Twisted/
6347-^/var/lib
6348rmfile ./src/allmydata/test/figleaf.excludes
6349}
6350
6351Context:
6352
6353[Rename test_package_initialization.py to (much shorter) test_import.py .
6354Brian Warner <warner@lothar.com>**20110611190234
6355 Ignore-this: 3eb3dbac73600eeff5cfa6b65d65822
6356 
6357 The former name was making my 'ls' listings hard to read, by forcing them
6358 down to just two columns.
6359]
6360[tests: fix tests to accomodate [20110611153758-92b7f-0ba5e4726fb6318dac28fb762a6512a003f4c430]
6361zooko@zooko.com**20110611163741
6362 Ignore-this: 64073a5f39e7937e8e5e1314c1a302d1
6363 Apparently none of the two authors (stercor, terrell), three reviewers (warner, davidsarah, terrell), or one committer (me) actually ran the tests. This is presumably due to #20.
6364 fixes #1412
6365]
6366[docs/running.rst: fix stray HTML (not .rst) link noticed by ChosenOne.
6367david-sarah@jacaranda.org**20110609223719
6368 Ignore-this: fc50ac9c94792dcac6f1067df8ac0d4a
6369]
6370[wui: right-align the size column in the WUI
6371zooko@zooko.com**20110611153758
6372 Ignore-this: 492bdaf4373c96f59f90581c7daf7cd7
6373 Thanks to Ted "stercor" Rolle Jr. and Terrell Russell.
6374 fixes #1412
6375]
6376[docs: three minor fixes
6377zooko@zooko.com**20110610121656
6378 Ignore-this: fec96579eb95aceb2ad5fc01a814c8a2
6379 CREDITS for arc for stats tweak
6380 fix link to .zip file in quickstart.rst (thanks to ChosenOne for noticing)
6381 English usage tweak
6382]
6383[server.py:  get_latencies now reports percentiles _only_ if there are sufficient observations for the interpretation of the percentile to be unambiguous.
6384wilcoxjg@gmail.com**20110527120135
6385 Ignore-this: 2e7029764bffc60e26f471d7c2b6611e
6386 interfaces.py:  modified the return type of RIStatsProvider.get_stats to allow for None as a return value
6387 NEWS.rst, stats.py: documentation of change to get_latencies
6388 stats.rst: now documents percentile modification in get_latencies
6389 test_storage.py:  test_latencies now expects None in output categories that contain too few samples for the associated percentile to be unambiguously reported.
6390 fixes #1392
6391]
6392[corrected "k must never be smaller than N" to "k must never be greater than N"
6393secorp@allmydata.org**20110425010308
6394 Ignore-this: 233129505d6c70860087f22541805eac
6395]
6396[docs: revert link in relnotes.txt from NEWS.rst to NEWS, since the former did not exist at revision 5000.
6397david-sarah@jacaranda.org**20110517011214
6398 Ignore-this: 6a5be6e70241e3ec0575641f64343df7
6399]
6400[docs: convert NEWS to NEWS.rst and change all references to it.
6401david-sarah@jacaranda.org**20110517010255
6402 Ignore-this: a820b93ea10577c77e9c8206dbfe770d
6403]
6404[docs: remove out-of-date docs/testgrid/introducer.furl and containing directory. fixes #1404
6405david-sarah@jacaranda.org**20110512140559
6406 Ignore-this: 784548fc5367fac5450df1c46890876d
6407]
6408[scripts/common.py: don't assume that the default alias is always 'tahoe' (it is, but the API of get_alias doesn't say so). refs #1342
6409david-sarah@jacaranda.org**20110130164923
6410 Ignore-this: a271e77ce81d84bb4c43645b891d92eb
6411]
6412[setup: don't catch all Exception from check_requirement(), but only PackagingError and ImportError
6413zooko@zooko.com**20110128142006
6414 Ignore-this: 57d4bc9298b711e4bc9dc832c75295de
6415 I noticed this because I had accidentally inserted a bug which caused AssertionError to be raised from check_requirement().
6416]
6417[M-x whitespace-cleanup
6418zooko@zooko.com**20110510193653
6419 Ignore-this: dea02f831298c0f65ad096960e7df5c7
6420]
6421[docs: fix typo in running.rst, thanks to arch_o_median
6422zooko@zooko.com**20110510193633
6423 Ignore-this: ca06de166a46abbc61140513918e79e8
6424]
6425[relnotes.txt: don't claim to work on Cygwin (which has been untested for some time). refs #1342
6426david-sarah@jacaranda.org**20110204204902
6427 Ignore-this: 85ef118a48453d93fa4cddc32d65b25b
6428]
6429[relnotes.txt: forseeable -> foreseeable. refs #1342
6430david-sarah@jacaranda.org**20110204204116
6431 Ignore-this: 746debc4d82f4031ebf75ab4031b3a9
6432]
6433[replace remaining .html docs with .rst docs
6434zooko@zooko.com**20110510191650
6435 Ignore-this: d557d960a986d4ac8216d1677d236399
6436 Remove install.html (long since deprecated).
6437 Also replace some obsolete references to install.html with references to quickstart.rst.
6438 Fix some broken internal references within docs/historical/historical_known_issues.txt.
6439 Thanks to Ravi Pinjala and Patrick McDonald.
6440 refs #1227
6441]
6442[docs: FTP-and-SFTP.rst: fix a minor error and update the information about which version of Twisted fixes #1297
6443zooko@zooko.com**20110428055232
6444 Ignore-this: b63cfb4ebdbe32fb3b5f885255db4d39
6445]
6446[munin tahoe_files plugin: fix incorrect file count
6447francois@ctrlaltdel.ch**20110428055312
6448 Ignore-this: 334ba49a0bbd93b4a7b06a25697aba34
6449 fixes #1391
6450]
6451[Fix a test failure in test_package_initialization on Python 2.4.x due to exceptions being stringified differently than in later versions of Python. refs #1389
6452david-sarah@jacaranda.org**20110411190738
6453 Ignore-this: 7847d26bc117c328c679f08a7baee519
6454]
6455[tests: add test for including the ImportError message and traceback entry in the summary of errors from importing dependencies. refs #1389
6456david-sarah@jacaranda.org**20110410155844
6457 Ignore-this: fbecdbeb0d06a0f875fe8d4030aabafa
6458]
6459[allmydata/__init__.py: preserve the message and last traceback entry (file, line number, function, and source line) of ImportErrors in the package versions string. fixes #1389
6460david-sarah@jacaranda.org**20110410155705
6461 Ignore-this: 2f87b8b327906cf8bfca9440a0904900
6462]
6463[remove unused variable detected by pyflakes
6464zooko@zooko.com**20110407172231
6465 Ignore-this: 7344652d5e0720af822070d91f03daf9
6466]
6467[allmydata/__init__.py: Nicer reporting of unparseable version numbers in dependencies. fixes #1388
6468david-sarah@jacaranda.org**20110401202750
6469 Ignore-this: 9c6bd599259d2405e1caadbb3e0d8c7f
6470]
6471[update FTP-and-SFTP.rst: the necessary patch is included in Twisted-10.1
6472Brian Warner <warner@lothar.com>**20110325232511
6473 Ignore-this: d5307faa6900f143193bfbe14e0f01a
6474]
6475[control.py: remove all uses of s.get_serverid()
6476warner@lothar.com**20110227011203
6477 Ignore-this: f80a787953bd7fa3d40e828bde00e855
6478]
6479[web: remove some uses of s.get_serverid(), not all
6480warner@lothar.com**20110227011159
6481 Ignore-this: a9347d9cf6436537a47edc6efde9f8be
6482]
6483[immutable/downloader/fetcher.py: remove all get_serverid() calls
6484warner@lothar.com**20110227011156
6485 Ignore-this: fb5ef018ade1749348b546ec24f7f09a
6486]
6487[immutable/downloader/fetcher.py: fix diversity bug in server-response handling
6488warner@lothar.com**20110227011153
6489 Ignore-this: bcd62232c9159371ae8a16ff63d22c1b
6490 
6491 When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the
6492 _shares_from_server dict was being popped incorrectly (using shnum as the
6493 index instead of serverid). I'm still thinking through the consequences of
6494 this bug. It was probably benign and really hard to detect. I think it would
6495 cause us to incorrectly believe that we're pulling too many shares from a
6496 server, and thus prefer a different server rather than asking for a second
6497 share from the first server. The diversity code is intended to spread out the
6498 number of shares simultaneously being requested from each server, but with
6499 this bug, it might be spreading out the total number of shares requested at
6500 all, not just simultaneously. (note that SegmentFetcher is scoped to a single
6501 segment, so the effect doesn't last very long).
6502]
6503[immutable/downloader/share.py: reduce get_serverid(), one left, update ext deps
6504warner@lothar.com**20110227011150
6505 Ignore-this: d8d56dd8e7b280792b40105e13664554
6506 
6507 test_download.py: create+check MyShare instances better, make sure they share
6508 Server objects, now that finder.py cares
6509]
6510[immutable/downloader/finder.py: reduce use of get_serverid(), one left
6511warner@lothar.com**20110227011146
6512 Ignore-this: 5785be173b491ae8a78faf5142892020
6513]
6514[immutable/offloaded.py: reduce use of get_serverid() a bit more
6515warner@lothar.com**20110227011142
6516 Ignore-this: b48acc1b2ae1b311da7f3ba4ffba38f
6517]
6518[immutable/upload.py: reduce use of get_serverid()
6519warner@lothar.com**20110227011138
6520 Ignore-this: ffdd7ff32bca890782119a6e9f1495f6
6521]
6522[immutable/checker.py: remove some uses of s.get_serverid(), not all
6523warner@lothar.com**20110227011134
6524 Ignore-this: e480a37efa9e94e8016d826c492f626e
6525]
6526[add remaining get_* methods to storage_client.Server, NoNetworkServer, and
6527warner@lothar.com**20110227011132
6528 Ignore-this: 6078279ddf42b179996a4b53bee8c421
6529 MockIServer stubs
6530]
6531[upload.py: rearrange _make_trackers a bit, no behavior changes
6532warner@lothar.com**20110227011128
6533 Ignore-this: 296d4819e2af452b107177aef6ebb40f
6534]
6535[happinessutil.py: finally rename merge_peers to merge_servers
6536warner@lothar.com**20110227011124
6537 Ignore-this: c8cd381fea1dd888899cb71e4f86de6e
6538]
6539[test_upload.py: factor out FakeServerTracker
6540warner@lothar.com**20110227011120
6541 Ignore-this: 6c182cba90e908221099472cc159325b
6542]
6543[test_upload.py: server-vs-tracker cleanup
6544warner@lothar.com**20110227011115
6545 Ignore-this: 2915133be1a3ba456e8603885437e03
6546]
6547[happinessutil.py: server-vs-tracker cleanup
6548warner@lothar.com**20110227011111
6549 Ignore-this: b856c84033562d7d718cae7cb01085a9
6550]
6551[upload.py: more tracker-vs-server cleanup
6552warner@lothar.com**20110227011107
6553 Ignore-this: bb75ed2afef55e47c085b35def2de315
6554]
6555[upload.py: fix var names to avoid confusion between 'trackers' and 'servers'
6556warner@lothar.com**20110227011103
6557 Ignore-this: 5d5e3415b7d2732d92f42413c25d205d
6558]
6559[refactor: s/peer/server/ in immutable/upload, happinessutil.py, test_upload
6560warner@lothar.com**20110227011100
6561 Ignore-this: 7ea858755cbe5896ac212a925840fe68
6562 
6563 No behavioral changes, just updating variable/method names and log messages.
6564 The effects outside these three files should be minimal: some exception
6565 messages changed (to say "server" instead of "peer"), and some internal class
6566 names were changed. A few things still use "peer" to minimize external
6567 changes, like UploadResults.timings["peer_selection"] and
6568 happinessutil.merge_peers, which can be changed later.
6569]
6570[storage_client.py: clean up test_add_server/test_add_descriptor, remove .test_servers
6571warner@lothar.com**20110227011056
6572 Ignore-this: efad933e78179d3d5fdcd6d1ef2b19cc
6573]
6574[test_client.py, upload.py:: remove KiB/MiB/etc constants, and other dead code
6575warner@lothar.com**20110227011051
6576 Ignore-this: dc83c5794c2afc4f81e592f689c0dc2d
6577]
6578[test: increase timeout on a network test because Francois's ARM machine hit that timeout
6579zooko@zooko.com**20110317165909
6580 Ignore-this: 380c345cdcbd196268ca5b65664ac85b
6581 I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish.
6582]
6583[docs/configuration.rst: add a "Frontend Configuration" section
6584Brian Warner <warner@lothar.com>**20110222014323
6585 Ignore-this: 657018aa501fe4f0efef9851628444ca
6586 
6587 this points to docs/frontends/*.rst, which were previously underlinked
6588]
6589[web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366
6590"Brian Warner <warner@lothar.com>"**20110221061544
6591 Ignore-this: 799d4de19933f2309b3c0c19a63bb888
6592]
6593[Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable.
6594david-sarah@jacaranda.org**20110221015817
6595 Ignore-this: 51d181698f8c20d3aca58b057e9c475a
6596]
6597[allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355.
6598david-sarah@jacaranda.org**20110221020125
6599 Ignore-this: b0744ed58f161bf188e037bad077fc48
6600]
6601[Refactor StorageFarmBroker handling of servers
6602Brian Warner <warner@lothar.com>**20110221015804
6603 Ignore-this: 842144ed92f5717699b8f580eab32a51
6604 
6605 Pass around IServer instance instead of (peerid, rref) tuple. Replace
6606 "descriptor" with "server". Other replacements:
6607 
6608  get_all_servers -> get_connected_servers/get_known_servers
6609  get_servers_for_index -> get_servers_for_psi (now returns IServers)
6610 
6611 This change still needs to be pushed further down: lots of code is now
6612 getting the IServer and then distributing (peerid, rref) internally.
6613 Instead, it ought to distribute the IServer internally and delay
6614 extracting a serverid or rref until the last moment.
6615 
6616 no_network.py was updated to retain parallelism.
6617]
6618[TAG allmydata-tahoe-1.8.2
6619warner@lothar.com**20110131020101]
6620Patch bundle hash:
6621476c42adb1e9b2af5b63429d91efb103561e42e3