Ticket #999: snapshotofbackendimplementation.darcs.patch

File snapshotofbackendimplementation.darcs.patch, 94.2 KB (added by arch_o_median, at 2011-06-26T05:35:28Z)

just so I don't lose it all...

Line 
1Fri Mar 25 14:35:14 MDT 2011  wilcoxjg@gmail.com
2  * storage: new mocking tests of storage server read and write
3  There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls.
4
5Fri Jun 24 14:28:50 MDT 2011  wilcoxjg@gmail.com
6  * server.py, test_backends.py, interfaces.py, immutable.py (others?): working patch for implementation of backends plugin
7  sloppy not for production
8
9Sat Jun 25 23:27:32 MDT 2011  wilcoxjg@gmail.com
10  * a temp patch used as a snapshot
11
12Sat Jun 25 23:32:44 MDT 2011  wilcoxjg@gmail.com
13  * snapshot of progress on backend implementation (not suitable for trunk)
14
15New patches:
16
17[storage: new mocking tests of storage server read and write
18wilcoxjg@gmail.com**20110325203514
19 Ignore-this: df65c3c4f061dd1516f88662023fdb41
20 There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls.
21] {
22addfile ./src/allmydata/test/test_server.py
23hunk ./src/allmydata/test/test_server.py 1
24+from twisted.trial import unittest
25+
26+from StringIO import StringIO
27+
28+from allmydata.test.common_util import ReallyEqualMixin
29+
30+import mock
31+
32+# This is the code that we're going to be testing.
33+from allmydata.storage.server import StorageServer
34+
35+# The following share file contents was generated with
36+# storage.immutable.ShareFile from Tahoe-LAFS v1.8.2
37+# with share data == 'a'.
38+share_data = 'a\x00\x00\x00\x00xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\x00(\xde\x80'
39+share_file_data = '\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01' + share_data
40+
41+sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
42+
43+class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
44+    @mock.patch('__builtin__.open')
45+    def test_create_server(self, mockopen):
46+        """ This tests whether a server instance can be constructed. """
47+
48+        def call_open(fname, mode):
49+            if fname == 'testdir/bucket_counter.state':
50+                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
51+            elif fname == 'testdir/lease_checker.state':
52+                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
53+            elif fname == 'testdir/lease_checker.history':
54+                return StringIO()
55+        mockopen.side_effect = call_open
56+
57+        # Now begin the test.
58+        s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
59+
60+        # You passed!
61+
62+class TestServer(unittest.TestCase, ReallyEqualMixin):
63+    @mock.patch('__builtin__.open')
64+    def setUp(self, mockopen):
65+        def call_open(fname, mode):
66+            if fname == 'testdir/bucket_counter.state':
67+                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
68+            elif fname == 'testdir/lease_checker.state':
69+                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
70+            elif fname == 'testdir/lease_checker.history':
71+                return StringIO()
72+        mockopen.side_effect = call_open
73+
74+        self.s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
75+
76+
77+    @mock.patch('time.time')
78+    @mock.patch('os.mkdir')
79+    @mock.patch('__builtin__.open')
80+    @mock.patch('os.listdir')
81+    @mock.patch('os.path.isdir')
82+    def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
83+        """Handle a report of corruption."""
84+
85+        def call_listdir(dirname):
86+            self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
87+            raise OSError(2, "No such file or directory: 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a'")
88+
89+        mocklistdir.side_effect = call_listdir
90+
91+        class MockFile:
92+            def __init__(self):
93+                self.buffer = ''
94+                self.pos = 0
95+            def write(self, instring):
96+                begin = self.pos
97+                padlen = begin - len(self.buffer)
98+                if padlen > 0:
99+                    self.buffer += '\x00' * padlen
100+                end = self.pos + len(instring)
101+                self.buffer = self.buffer[:begin]+instring+self.buffer[end:]
102+                self.pos = end
103+            def close(self):
104+                pass
105+            def seek(self, pos):
106+                self.pos = pos
107+            def read(self, numberbytes):
108+                return self.buffer[self.pos:self.pos+numberbytes]
109+            def tell(self):
110+                return self.pos
111+
112+        mocktime.return_value = 0
113+
114+        sharefile = MockFile()
115+        def call_open(fname, mode):
116+            self.failUnlessReallyEqual(fname, 'testdir/shares/incoming/or/orsxg5dtorxxeylhmvpws3temv4a/0' )
117+            return sharefile
118+
119+        mockopen.side_effect = call_open
120+        # Now begin the test.
121+        alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
122+        print bs
123+        bs[0].remote_write(0, 'a')
124+        self.failUnlessReallyEqual(sharefile.buffer, share_file_data)
125+
126+
127+    @mock.patch('os.path.exists')
128+    @mock.patch('os.path.getsize')
129+    @mock.patch('__builtin__.open')
130+    @mock.patch('os.listdir')
131+    def test_read_share(self, mocklistdir, mockopen, mockgetsize, mockexists):
132+        """ This tests whether the code correctly finds and reads
133+        shares written out by old (Tahoe-LAFS <= v1.8.2)
134+        servers. There is a similar test in test_download, but that one
135+        is from the perspective of the client and exercises a deeper
136+        stack of code. This one is for exercising just the
137+        StorageServer object. """
138+
139+        def call_listdir(dirname):
140+            self.failUnlessReallyEqual(dirname,'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
141+            return ['0']
142+
143+        mocklistdir.side_effect = call_listdir
144+
145+        def call_open(fname, mode):
146+            self.failUnlessReallyEqual(fname, sharefname)
147+            self.failUnless('r' in mode, mode)
148+            self.failUnless('b' in mode, mode)
149+
150+            return StringIO(share_file_data)
151+        mockopen.side_effect = call_open
152+
153+        datalen = len(share_file_data)
154+        def call_getsize(fname):
155+            self.failUnlessReallyEqual(fname, sharefname)
156+            return datalen
157+        mockgetsize.side_effect = call_getsize
158+
159+        def call_exists(fname):
160+            self.failUnlessReallyEqual(fname, sharefname)
161+            return True
162+        mockexists.side_effect = call_exists
163+
164+        # Now begin the test.
165+        bs = self.s.remote_get_buckets('teststorage_index')
166+
167+        self.failUnlessEqual(len(bs), 1)
168+        b = bs[0]
169+        self.failUnlessReallyEqual(b.remote_read(0, datalen), share_data)
170+        # If you try to read past the end you get the as much data as is there.
171+        self.failUnlessReallyEqual(b.remote_read(0, datalen+20), share_data)
172+        # If you start reading past the end of the file you get the empty string.
173+        self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '')
174}
175[server.py, test_backends.py, interfaces.py, immutable.py (others?): working patch for implementation of backends plugin
176wilcoxjg@gmail.com**20110624202850
177 Ignore-this: ca6f34987ee3b0d25cac17c1fc22d50c
178 sloppy not for production
179] {
180move ./src/allmydata/test/test_server.py ./src/allmydata/test/test_backends.py
181hunk ./src/allmydata/storage/crawler.py 13
182     pass
183 
184 class ShareCrawler(service.MultiService):
185-    """A ShareCrawler subclass is attached to a StorageServer, and
186+    """A subcless of ShareCrawler is attached to a StorageServer, and
187     periodically walks all of its shares, processing each one in some
188     fashion. This crawl is rate-limited, to reduce the IO burden on the host,
189     since large servers can easily have a terabyte of shares, in several
190hunk ./src/allmydata/storage/crawler.py 31
191     We assume that the normal upload/download/get_buckets traffic of a tahoe
192     grid will cause the prefixdir contents to be mostly cached in the kernel,
193     or that the number of buckets in each prefixdir will be small enough to
194-    load quickly. A 1TB allmydata.com server was measured to have 2.56M
195+    load quickly. A 1TB allmydata.com server was measured to have 2.56 * 10^6
196     buckets, spread into the 1024 prefixdirs, with about 2500 buckets per
197     prefix. On this server, each prefixdir took 130ms-200ms to list the first
198     time, and 17ms to list the second time.
199hunk ./src/allmydata/storage/crawler.py 68
200     cpu_slice = 1.0 # use up to 1.0 seconds before yielding
201     minimum_cycle_time = 300 # don't run a cycle faster than this
202 
203-    def __init__(self, server, statefile, allowed_cpu_percentage=None):
204+    def __init__(self, backend, statefile, allowed_cpu_percentage=None):
205         service.MultiService.__init__(self)
206         if allowed_cpu_percentage is not None:
207             self.allowed_cpu_percentage = allowed_cpu_percentage
208hunk ./src/allmydata/storage/crawler.py 72
209-        self.server = server
210-        self.sharedir = server.sharedir
211-        self.statefile = statefile
212+        self.backend = backend
213         self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2]
214                          for i in range(2**10)]
215         self.prefixes.sort()
216hunk ./src/allmydata/storage/crawler.py 446
217 
218     minimum_cycle_time = 60*60 # we don't need this more than once an hour
219 
220-    def __init__(self, server, statefile, num_sample_prefixes=1):
221-        ShareCrawler.__init__(self, server, statefile)
222+    def __init__(self, statefile, num_sample_prefixes=1):
223+        ShareCrawler.__init__(self, statefile)
224         self.num_sample_prefixes = num_sample_prefixes
225 
226     def add_initial_state(self):
227hunk ./src/allmydata/storage/expirer.py 15
228     removed.
229 
230     I collect statistics on the leases and make these available to a web
231-    status page, including::
232+    status page, including:
233 
234     Space recovered during this cycle-so-far:
235      actual (only if expiration_enabled=True):
236hunk ./src/allmydata/storage/expirer.py 51
237     slow_start = 360 # wait 6 minutes after startup
238     minimum_cycle_time = 12*60*60 # not more than twice per day
239 
240-    def __init__(self, server, statefile, historyfile,
241+    def __init__(self, statefile, historyfile,
242                  expiration_enabled, mode,
243                  override_lease_duration, # used if expiration_mode=="age"
244                  cutoff_date, # used if expiration_mode=="cutoff-date"
245hunk ./src/allmydata/storage/expirer.py 71
246         else:
247             raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % mode)
248         self.sharetypes_to_expire = sharetypes
249-        ShareCrawler.__init__(self, server, statefile)
250+        ShareCrawler.__init__(self, statefile)
251 
252     def add_initial_state(self):
253         # we fill ["cycle-to-date"] here (even though they will be reset in
254hunk ./src/allmydata/storage/immutable.py 44
255     sharetype = "immutable"
256 
257     def __init__(self, filename, max_size=None, create=False):
258-        """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """
259+        """ If max_size is not None then I won't allow more than
260+        max_size to be written to me. If create=True then max_size
261+        must not be None. """
262         precondition((max_size is not None) or (not create), max_size, create)
263         self.home = filename
264         self._max_size = max_size
265hunk ./src/allmydata/storage/immutable.py 87
266 
267     def read_share_data(self, offset, length):
268         precondition(offset >= 0)
269-        # reads beyond the end of the data are truncated. Reads that start
270-        # beyond the end of the data return an empty string. I wonder why
271-        # Python doesn't do the following computation for me?
272+        # Reads beyond the end of the data are truncated. Reads that start
273+        # beyond the end of the data return an empty string.
274         seekpos = self._data_offset+offset
275         fsize = os.path.getsize(self.home)
276         actuallength = max(0, min(length, fsize-seekpos))
277hunk ./src/allmydata/storage/immutable.py 198
278             space_freed += os.stat(self.home)[stat.ST_SIZE]
279             self.unlink()
280         return space_freed
281+class NullBucketWriter(Referenceable):
282+    implements(RIBucketWriter)
283 
284hunk ./src/allmydata/storage/immutable.py 201
285+    def remote_write(self, offset, data):
286+        return
287 
288 class BucketWriter(Referenceable):
289     implements(RIBucketWriter)
290hunk ./src/allmydata/storage/server.py 7
291 from twisted.application import service
292 
293 from zope.interface import implements
294-from allmydata.interfaces import RIStorageServer, IStatsProducer
295+from allmydata.interfaces import RIStorageServer, IStatsProducer, IShareStore
296 from allmydata.util import fileutil, idlib, log, time_format
297 import allmydata # for __full_version__
298 
299hunk ./src/allmydata/storage/server.py 16
300 from allmydata.storage.lease import LeaseInfo
301 from allmydata.storage.mutable import MutableShareFile, EmptyShare, \
302      create_mutable_sharefile
303-from allmydata.storage.immutable import ShareFile, BucketWriter, BucketReader
304+from allmydata.storage.immutable import ShareFile, NullBucketWriter, BucketWriter, BucketReader
305 from allmydata.storage.crawler import BucketCountingCrawler
306 from allmydata.storage.expirer import LeaseCheckingCrawler
307 
308hunk ./src/allmydata/storage/server.py 20
309+from zope.interface import implements
310+
311+# A Backend is a MultiService so that its server's crawlers (if the server has any) can
312+# be started and stopped.
313+class Backend(service.MultiService):
314+    implements(IStatsProducer)
315+    def __init__(self):
316+        service.MultiService.__init__(self)
317+
318+    def get_bucket_shares(self):
319+        """XXX"""
320+        raise NotImplementedError
321+
322+    def get_share(self):
323+        """XXX"""
324+        raise NotImplementedError
325+
326+    def make_bucket_writer(self):
327+        """XXX"""
328+        raise NotImplementedError
329+
330+class NullBackend(Backend):
331+    def __init__(self):
332+        Backend.__init__(self)
333+
334+    def get_available_space(self):
335+        return None
336+
337+    def get_bucket_shares(self, storage_index):
338+        return set()
339+
340+    def get_share(self, storage_index, sharenum):
341+        return None
342+
343+    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
344+        return NullBucketWriter()
345+
346+class FSBackend(Backend):
347+    def __init__(self, storedir, readonly=False, reserved_space=0):
348+        Backend.__init__(self)
349+
350+        self._setup_storage(storedir, readonly, reserved_space)
351+        self._setup_corruption_advisory()
352+        self._setup_bucket_counter()
353+        self._setup_lease_checkerf()
354+
355+    def _setup_storage(self, storedir, readonly, reserved_space):
356+        self.storedir = storedir
357+        self.readonly = readonly
358+        self.reserved_space = int(reserved_space)
359+        if self.reserved_space:
360+            if self.get_available_space() is None:
361+                log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored",
362+                        umid="0wZ27w", level=log.UNUSUAL)
363+
364+        self.sharedir = os.path.join(self.storedir, "shares")
365+        fileutil.make_dirs(self.sharedir)
366+        self.incomingdir = os.path.join(self.sharedir, 'incoming')
367+        self._clean_incomplete()
368+
369+    def _clean_incomplete(self):
370+        fileutil.rm_dir(self.incomingdir)
371+        fileutil.make_dirs(self.incomingdir)
372+
373+    def _setup_corruption_advisory(self):
374+        # we don't actually create the corruption-advisory dir until necessary
375+        self.corruption_advisory_dir = os.path.join(self.storedir,
376+                                                    "corruption-advisories")
377+
378+    def _setup_bucket_counter(self):
379+        statefile = os.path.join(self.storedir, "bucket_counter.state")
380+        self.bucket_counter = BucketCountingCrawler(statefile)
381+        self.bucket_counter.setServiceParent(self)
382+
383+    def _setup_lease_checkerf(self):
384+        statefile = os.path.join(self.storedir, "lease_checker.state")
385+        historyfile = os.path.join(self.storedir, "lease_checker.history")
386+        self.lease_checker = LeaseCheckingCrawler(statefile, historyfile,
387+                                   expiration_enabled, expiration_mode,
388+                                   expiration_override_lease_duration,
389+                                   expiration_cutoff_date,
390+                                   expiration_sharetypes)
391+        self.lease_checker.setServiceParent(self)
392+
393+    def get_available_space(self):
394+        if self.readonly:
395+            return 0
396+        return fileutil.get_available_space(self.storedir, self.reserved_space)
397+
398+    def get_bucket_shares(self, storage_index):
399+        """Return a list of (shnum, pathname) tuples for files that hold
400+        shares for this storage_index. In each tuple, 'shnum' will always be
401+        the integer form of the last component of 'pathname'."""
402+        storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
403+        try:
404+            for f in os.listdir(storagedir):
405+                if NUM_RE.match(f):
406+                    filename = os.path.join(storagedir, f)
407+                    yield (int(f), filename)
408+        except OSError:
409+            # Commonly caused by there being no buckets at all.
410+            pass
411+
412 # storage/
413 # storage/shares/incoming
414 #   incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will
415hunk ./src/allmydata/storage/server.py 143
416     name = 'storage'
417     LeaseCheckerClass = LeaseCheckingCrawler
418 
419-    def __init__(self, storedir, nodeid, reserved_space=0,
420-                 discard_storage=False, readonly_storage=False,
421+    def __init__(self, nodeid, backend, reserved_space=0,
422+                 readonly_storage=False,
423                  stats_provider=None,
424                  expiration_enabled=False,
425                  expiration_mode="age",
426hunk ./src/allmydata/storage/server.py 155
427         assert isinstance(nodeid, str)
428         assert len(nodeid) == 20
429         self.my_nodeid = nodeid
430-        self.storedir = storedir
431-        sharedir = os.path.join(storedir, "shares")
432-        fileutil.make_dirs(sharedir)
433-        self.sharedir = sharedir
434-        # we don't actually create the corruption-advisory dir until necessary
435-        self.corruption_advisory_dir = os.path.join(storedir,
436-                                                    "corruption-advisories")
437-        self.reserved_space = int(reserved_space)
438-        self.no_storage = discard_storage
439-        self.readonly_storage = readonly_storage
440         self.stats_provider = stats_provider
441         if self.stats_provider:
442             self.stats_provider.register_producer(self)
443hunk ./src/allmydata/storage/server.py 158
444-        self.incomingdir = os.path.join(sharedir, 'incoming')
445-        self._clean_incomplete()
446-        fileutil.make_dirs(self.incomingdir)
447         self._active_writers = weakref.WeakKeyDictionary()
448hunk ./src/allmydata/storage/server.py 159
449+        self.backend = backend
450+        self.backend.setServiceParent(self)
451         log.msg("StorageServer created", facility="tahoe.storage")
452 
453hunk ./src/allmydata/storage/server.py 163
454-        if reserved_space:
455-            if self.get_available_space() is None:
456-                log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored",
457-                        umin="0wZ27w", level=log.UNUSUAL)
458-
459         self.latencies = {"allocate": [], # immutable
460                           "write": [],
461                           "close": [],
462hunk ./src/allmydata/storage/server.py 174
463                           "renew": [],
464                           "cancel": [],
465                           }
466-        self.add_bucket_counter()
467-
468-        statefile = os.path.join(self.storedir, "lease_checker.state")
469-        historyfile = os.path.join(self.storedir, "lease_checker.history")
470-        klass = self.LeaseCheckerClass
471-        self.lease_checker = klass(self, statefile, historyfile,
472-                                   expiration_enabled, expiration_mode,
473-                                   expiration_override_lease_duration,
474-                                   expiration_cutoff_date,
475-                                   expiration_sharetypes)
476-        self.lease_checker.setServiceParent(self)
477 
478     def __repr__(self):
479         return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),)
480hunk ./src/allmydata/storage/server.py 178
481 
482-    def add_bucket_counter(self):
483-        statefile = os.path.join(self.storedir, "bucket_counter.state")
484-        self.bucket_counter = BucketCountingCrawler(self, statefile)
485-        self.bucket_counter.setServiceParent(self)
486-
487     def count(self, name, delta=1):
488         if self.stats_provider:
489             self.stats_provider.count("storage_server." + name, delta)
490hunk ./src/allmydata/storage/server.py 233
491             kwargs["facility"] = "tahoe.storage"
492         return log.msg(*args, **kwargs)
493 
494-    def _clean_incomplete(self):
495-        fileutil.rm_dir(self.incomingdir)
496-
497     def get_stats(self):
498         # remember: RIStatsProvider requires that our return dict
499         # contains numeric values.
500hunk ./src/allmydata/storage/server.py 269
501             stats['storage_server.total_bucket_count'] = bucket_count
502         return stats
503 
504-    def get_available_space(self):
505-        """Returns available space for share storage in bytes, or None if no
506-        API to get this information is available."""
507-
508-        if self.readonly_storage:
509-            return 0
510-        return fileutil.get_available_space(self.storedir, self.reserved_space)
511-
512     def allocated_size(self):
513         space = 0
514         for bw in self._active_writers:
515hunk ./src/allmydata/storage/server.py 276
516         return space
517 
518     def remote_get_version(self):
519-        remaining_space = self.get_available_space()
520+        remaining_space = self.backend.get_available_space()
521         if remaining_space is None:
522             # We're on a platform that has no API to get disk stats.
523             remaining_space = 2**64
524hunk ./src/allmydata/storage/server.py 301
525         self.count("allocate")
526         alreadygot = set()
527         bucketwriters = {} # k: shnum, v: BucketWriter
528-        si_dir = storage_index_to_dir(storage_index)
529-        si_s = si_b2a(storage_index)
530 
531hunk ./src/allmydata/storage/server.py 302
532+        si_s = si_b2a(storage_index)
533         log.msg("storage: allocate_buckets %s" % si_s)
534 
535         # in this implementation, the lease information (including secrets)
536hunk ./src/allmydata/storage/server.py 316
537 
538         max_space_per_bucket = allocated_size
539 
540-        remaining_space = self.get_available_space()
541+        remaining_space = self.backend.get_available_space()
542         limited = remaining_space is not None
543         if limited:
544             # this is a bit conservative, since some of this allocated_size()
545hunk ./src/allmydata/storage/server.py 329
546         # they asked about: this will save them a lot of work. Add or update
547         # leases for all of them: if they want us to hold shares for this
548         # file, they'll want us to hold leases for this file.
549-        for (shnum, fn) in self._get_bucket_shares(storage_index):
550+        for (shnum, fn) in self.backend.get_bucket_shares(storage_index):
551             alreadygot.add(shnum)
552             sf = ShareFile(fn)
553             sf.add_or_renew_lease(lease_info)
554hunk ./src/allmydata/storage/server.py 335
555 
556         for shnum in sharenums:
557-            incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum)
558-            finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum)
559-            if os.path.exists(finalhome):
560+            share = self.backend.get_share(storage_index, shnum)
561+
562+            if not share:
563+                if (not limited) or (remaining_space >= max_space_per_bucket):
564+                    # ok! we need to create the new share file.
565+                    bw = self.backend.make_bucket_writer(storage_index, shnum,
566+                                      max_space_per_bucket, lease_info, canary)
567+                    bucketwriters[shnum] = bw
568+                    self._active_writers[bw] = 1
569+                    if limited:
570+                        remaining_space -= max_space_per_bucket
571+                else:
572+                    # bummer! not enough space to accept this bucket
573+                    pass
574+
575+            elif share.is_complete():
576                 # great! we already have it. easy.
577                 pass
578hunk ./src/allmydata/storage/server.py 353
579-            elif os.path.exists(incominghome):
580+            elif not share.is_complete():
581                 # Note that we don't create BucketWriters for shnums that
582                 # have a partial share (in incoming/), so if a second upload
583                 # occurs while the first is still in progress, the second
584hunk ./src/allmydata/storage/server.py 359
585                 # uploader will use different storage servers.
586                 pass
587-            elif (not limited) or (remaining_space >= max_space_per_bucket):
588-                # ok! we need to create the new share file.
589-                bw = BucketWriter(self, incominghome, finalhome,
590-                                  max_space_per_bucket, lease_info, canary)
591-                if self.no_storage:
592-                    bw.throw_out_all_data = True
593-                bucketwriters[shnum] = bw
594-                self._active_writers[bw] = 1
595-                if limited:
596-                    remaining_space -= max_space_per_bucket
597-            else:
598-                # bummer! not enough space to accept this bucket
599-                pass
600-
601-        if bucketwriters:
602-            fileutil.make_dirs(os.path.join(self.sharedir, si_dir))
603 
604         self.add_latency("allocate", time.time() - start)
605         return alreadygot, bucketwriters
606hunk ./src/allmydata/storage/server.py 437
607             self.stats_provider.count('storage_server.bytes_added', consumed_size)
608         del self._active_writers[bw]
609 
610-    def _get_bucket_shares(self, storage_index):
611-        """Return a list of (shnum, pathname) tuples for files that hold
612-        shares for this storage_index. In each tuple, 'shnum' will always be
613-        the integer form of the last component of 'pathname'."""
614-        storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
615-        try:
616-            for f in os.listdir(storagedir):
617-                if NUM_RE.match(f):
618-                    filename = os.path.join(storagedir, f)
619-                    yield (int(f), filename)
620-        except OSError:
621-            # Commonly caused by there being no buckets at all.
622-            pass
623 
624     def remote_get_buckets(self, storage_index):
625         start = time.time()
626hunk ./src/allmydata/storage/server.py 444
627         si_s = si_b2a(storage_index)
628         log.msg("storage: get_buckets %s" % si_s)
629         bucketreaders = {} # k: sharenum, v: BucketReader
630-        for shnum, filename in self._get_bucket_shares(storage_index):
631+        for shnum, filename in self.backend.get_bucket_shares(storage_index):
632             bucketreaders[shnum] = BucketReader(self, filename,
633                                                 storage_index, shnum)
634         self.add_latency("get", time.time() - start)
635hunk ./src/allmydata/test/test_backends.py 10
636 import mock
637 
638 # This is the code that we're going to be testing.
639-from allmydata.storage.server import StorageServer
640+from allmydata.storage.server import StorageServer, FSBackend, NullBackend
641 
642 # The following share file contents was generated with
643 # storage.immutable.ShareFile from Tahoe-LAFS v1.8.2
644hunk ./src/allmydata/test/test_backends.py 21
645 sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
646 
647 class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
648+    @mock.patch('time.time')
649+    @mock.patch('os.mkdir')
650+    @mock.patch('__builtin__.open')
651+    @mock.patch('os.listdir')
652+    @mock.patch('os.path.isdir')
653+    def test_create_server_null_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
654+        """ This tests whether a server instance can be constructed
655+        with a null backend. The server instance fails the test if it
656+        tries to read or write to the file system. """
657+
658+        # Now begin the test.
659+        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend())
660+
661+        self.failIf(mockisdir.called)
662+        self.failIf(mocklistdir.called)
663+        self.failIf(mockopen.called)
664+        self.failIf(mockmkdir.called)
665+
666+        # You passed!
667+
668+    @mock.patch('time.time')
669+    @mock.patch('os.mkdir')
670     @mock.patch('__builtin__.open')
671hunk ./src/allmydata/test/test_backends.py 44
672-    def test_create_server(self, mockopen):
673-        """ This tests whether a server instance can be constructed. """
674+    @mock.patch('os.listdir')
675+    @mock.patch('os.path.isdir')
676+    def test_create_server_fs_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
677+        """ This tests whether a server instance can be constructed
678+        with a filesystem backend. To pass the test, it has to use the
679+        filesystem in only the prescribed ways. """
680 
681         def call_open(fname, mode):
682             if fname == 'testdir/bucket_counter.state':
683hunk ./src/allmydata/test/test_backends.py 58
684                 raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
685             elif fname == 'testdir/lease_checker.history':
686                 return StringIO()
687+            else:
688+                self.fail("Server with FS backend tried to open '%s' in mode '%s'" % (fname, mode))
689         mockopen.side_effect = call_open
690 
691         # Now begin the test.
692hunk ./src/allmydata/test/test_backends.py 63
693-        s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
694+        s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir'))
695+
696+        self.failIf(mockisdir.called)
697+        self.failIf(mocklistdir.called)
698+        self.failIf(mockopen.called)
699+        self.failIf(mockmkdir.called)
700+        self.failIf(mocktime.called)
701 
702         # You passed!
703 
704hunk ./src/allmydata/test/test_backends.py 73
705-class TestServer(unittest.TestCase, ReallyEqualMixin):
706+class TestServerNullBackend(unittest.TestCase, ReallyEqualMixin):
707+    def setUp(self):
708+        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend())
709+
710+    @mock.patch('os.mkdir')
711+    @mock.patch('__builtin__.open')
712+    @mock.patch('os.listdir')
713+    @mock.patch('os.path.isdir')
714+    def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir):
715+        """ Write a new share. """
716+
717+        # Now begin the test.
718+        alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
719+        bs[0].remote_write(0, 'a')
720+        self.failIf(mockisdir.called)
721+        self.failIf(mocklistdir.called)
722+        self.failIf(mockopen.called)
723+        self.failIf(mockmkdir.called)
724+
725+    @mock.patch('os.path.exists')
726+    @mock.patch('os.path.getsize')
727+    @mock.patch('__builtin__.open')
728+    @mock.patch('os.listdir')
729+    def test_read_share(self, mocklistdir, mockopen, mockgetsize, mockexists):
730+        """ This tests whether the code correctly finds and reads
731+        shares written out by old (Tahoe-LAFS <= v1.8.2)
732+        servers. There is a similar test in test_download, but that one
733+        is from the perspective of the client and exercises a deeper
734+        stack of code. This one is for exercising just the
735+        StorageServer object. """
736+
737+        # Now begin the test.
738+        bs = self.s.remote_get_buckets('teststorage_index')
739+
740+        self.failUnlessEqual(len(bs), 0)
741+        self.failIf(mocklistdir.called)
742+        self.failIf(mockopen.called)
743+        self.failIf(mockgetsize.called)
744+        self.failIf(mockexists.called)
745+
746+
747+class TestServerFSBackend(unittest.TestCase, ReallyEqualMixin):
748     @mock.patch('__builtin__.open')
749     def setUp(self, mockopen):
750         def call_open(fname, mode):
751hunk ./src/allmydata/test/test_backends.py 126
752                 return StringIO()
753         mockopen.side_effect = call_open
754 
755-        self.s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
756-
757+        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir'))
758 
759     @mock.patch('time.time')
760     @mock.patch('os.mkdir')
761hunk ./src/allmydata/test/test_backends.py 134
762     @mock.patch('os.listdir')
763     @mock.patch('os.path.isdir')
764     def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
765-        """Handle a report of corruption."""
766+        """ Write a new share. """
767 
768         def call_listdir(dirname):
769             self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
770hunk ./src/allmydata/test/test_backends.py 173
771         mockopen.side_effect = call_open
772         # Now begin the test.
773         alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
774-        print bs
775         bs[0].remote_write(0, 'a')
776         self.failUnlessReallyEqual(sharefile.buffer, share_file_data)
777 
778hunk ./src/allmydata/test/test_backends.py 176
779-
780     @mock.patch('os.path.exists')
781     @mock.patch('os.path.getsize')
782     @mock.patch('__builtin__.open')
783hunk ./src/allmydata/test/test_backends.py 218
784 
785         self.failUnlessEqual(len(bs), 1)
786         b = bs[0]
787+        # These should match by definition, the next two cases cover cases without (completely) unambiguous behaviors.
788         self.failUnlessReallyEqual(b.remote_read(0, datalen), share_data)
789         # If you try to read past the end you get the as much data as is there.
790         self.failUnlessReallyEqual(b.remote_read(0, datalen+20), share_data)
791hunk ./src/allmydata/test/test_backends.py 224
792         # If you start reading past the end of the file you get the empty string.
793         self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '')
794+
795+
796}
797[a temp patch used as a snapshot
798wilcoxjg@gmail.com**20110626052732
799 Ignore-this: 95f05e314eaec870afa04c76d979aa44
800] {
801hunk ./docs/configuration.rst 637
802   [storage]
803   enabled = True
804   readonly = True
805-  sizelimit = 10000000000
806 
807 
808   [helper]
809hunk ./docs/garbage-collection.rst 16
810 
811 When a file or directory in the virtual filesystem is no longer referenced,
812 the space that its shares occupied on each storage server can be freed,
813-making room for other shares. Tahoe currently uses a garbage collection
814+making room for other shares. Tahoe uses a garbage collection
815 ("GC") mechanism to implement this space-reclamation process. Each share has
816 one or more "leases", which are managed by clients who want the
817 file/directory to be retained. The storage server accepts each share for a
818hunk ./docs/garbage-collection.rst 34
819 the `<lease-tradeoffs.svg>`_ diagram to get an idea for the tradeoffs involved.
820 If lease renewal occurs quickly and with 100% reliability, than any renewal
821 time that is shorter than the lease duration will suffice, but a larger ratio
822-of duration-over-renewal-time will be more robust in the face of occasional
823+of lease duration to renewal time will be more robust in the face of occasional
824 delays or failures.
825 
826 The current recommended values for a small Tahoe grid are to renew the leases
827replace ./docs/garbage-collection.rst [A-Za-z_0-9\-\.] Tahoe Tahoe-LAFS
828hunk ./src/allmydata/client.py 260
829             sharetypes.append("mutable")
830         expiration_sharetypes = tuple(sharetypes)
831 
832+        if self.get_config("storage", "backend", "filesystem") == "filesystem":
833+            xyz
834+        xyz
835         ss = StorageServer(storedir, self.nodeid,
836                            reserved_space=reserved,
837                            discard_storage=discard,
838hunk ./src/allmydata/storage/crawler.py 234
839         f = open(tmpfile, "wb")
840         pickle.dump(self.state, f)
841         f.close()
842-        fileutil.move_into_place(tmpfile, self.statefile)
843+        fileutil.move_into_place(tmpfile, self.statefname)
844 
845     def startService(self):
846         # arrange things to look like we were just sleeping, so
847}
848[snapshot of progress on backend implementation (not suitable for trunk)
849wilcoxjg@gmail.com**20110626053244
850 Ignore-this: 50c764af791c2b99ada8289546806a0a
851] {
852adddir ./src/allmydata/storage/backends
853adddir ./src/allmydata/storage/backends/das
854move ./src/allmydata/storage/expirer.py ./src/allmydata/storage/backends/das/expirer.py
855adddir ./src/allmydata/storage/backends/null
856hunk ./src/allmydata/interfaces.py 270
857         store that on disk.
858         """
859 
860+class IStorageBackend(Interface):
861+    """
862+    Objects of this kind live on the server side and are used by the
863+    storage server object.
864+    """
865+    def get_available_space(self, reserved_space):
866+        """ Returns available space for share storage in bytes, or
867+        None if this information is not available or if the available
868+        space is unlimited.
869+
870+        If the backend is configured for read-only mode then this will
871+        return 0.
872+
873+        reserved_space is how many bytes to subtract from the answer, so
874+        you can pass how many bytes you would like to leave unused on this
875+        filesystem as reserved_space. """
876+
877+    def get_bucket_shares(self):
878+        """XXX"""
879+
880+    def get_share(self):
881+        """XXX"""
882+
883+    def make_bucket_writer(self):
884+        """XXX"""
885+
886+class IStorageBackendShare(Interface):
887+    """
888+    This object contains as much as all of the share data.  It is intended
889+    for lazy evaluation such that in many use cases substantially less than
890+    all of the share data will be accessed.
891+    """
892+    def is_complete(self):
893+        """
894+        Returns the share state, or None if the share does not exist.
895+        """
896+
897 class IStorageBucketWriter(Interface):
898     """
899     Objects of this kind live on the client side.
900hunk ./src/allmydata/interfaces.py 2492
901 
902 class EmptyPathnameComponentError(Exception):
903     """The webapi disallows empty pathname components."""
904+
905+class IShareStore(Interface):
906+    pass
907+
908addfile ./src/allmydata/storage/backends/__init__.py
909addfile ./src/allmydata/storage/backends/das/__init__.py
910addfile ./src/allmydata/storage/backends/das/core.py
911hunk ./src/allmydata/storage/backends/das/core.py 1
912+from allmydata.interfaces import IStorageBackend
913+from allmydata.storage.backends.base import Backend
914+from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir
915+from allmydata.util.assertutil import precondition
916+
917+import os, re, weakref, struct, time
918+
919+from foolscap.api import Referenceable
920+from twisted.application import service
921+
922+from zope.interface import implements
923+from allmydata.interfaces import RIStorageServer, IStatsProducer, IShareStore
924+from allmydata.util import fileutil, idlib, log, time_format
925+import allmydata # for __full_version__
926+
927+from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir
928+_pyflakes_hush = [si_b2a, si_a2b, storage_index_to_dir] # re-exported
929+from allmydata.storage.lease import LeaseInfo
930+from allmydata.storage.mutable import MutableShareFile, EmptyShare, \
931+     create_mutable_sharefile
932+from allmydata.storage.backends.das.immutable import NullBucketWriter, BucketWriter, BucketReader
933+from allmydata.storage.crawler import FSBucketCountingCrawler
934+from allmydata.storage.backends.das.expirer import FSLeaseCheckingCrawler
935+
936+from zope.interface import implements
937+
938+class DASCore(Backend):
939+    implements(IStorageBackend)
940+    def __init__(self, storedir, expiration_policy, readonly=False, reserved_space=0):
941+        Backend.__init__(self)
942+
943+        self._setup_storage(storedir, readonly, reserved_space)
944+        self._setup_corruption_advisory()
945+        self._setup_bucket_counter()
946+        self._setup_lease_checkerf(expiration_policy)
947+
948+    def _setup_storage(self, storedir, readonly, reserved_space):
949+        self.storedir = storedir
950+        self.readonly = readonly
951+        self.reserved_space = int(reserved_space)
952+        if self.reserved_space:
953+            if self.get_available_space() is None:
954+                log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored",
955+                        umid="0wZ27w", level=log.UNUSUAL)
956+
957+        self.sharedir = os.path.join(self.storedir, "shares")
958+        fileutil.make_dirs(self.sharedir)
959+        self.incomingdir = os.path.join(self.sharedir, 'incoming')
960+        self._clean_incomplete()
961+
962+    def _clean_incomplete(self):
963+        fileutil.rm_dir(self.incomingdir)
964+        fileutil.make_dirs(self.incomingdir)
965+
966+    def _setup_corruption_advisory(self):
967+        # we don't actually create the corruption-advisory dir until necessary
968+        self.corruption_advisory_dir = os.path.join(self.storedir,
969+                                                    "corruption-advisories")
970+
971+    def _setup_bucket_counter(self):
972+        statefname = os.path.join(self.storedir, "bucket_counter.state")
973+        self.bucket_counter = FSBucketCountingCrawler(statefname)
974+        self.bucket_counter.setServiceParent(self)
975+
976+    def _setup_lease_checkerf(self, expiration_policy):
977+        statefile = os.path.join(self.storedir, "lease_checker.state")
978+        historyfile = os.path.join(self.storedir, "lease_checker.history")
979+        self.lease_checker = FSLeaseCheckingCrawler(statefile, historyfile, expiration_policy)
980+        self.lease_checker.setServiceParent(self)
981+
982+    def get_available_space(self):
983+        if self.readonly:
984+            return 0
985+        return fileutil.get_available_space(self.storedir, self.reserved_space)
986+
987+    def get_shares(self, storage_index):
988+        """Return a list of the FSBShare objects that correspond to the passed storage_index."""
989+        finalstoragedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
990+        try:
991+            for f in os.listdir(finalstoragedir):
992+                if NUM_RE.match(f):
993+                    filename = os.path.join(finalstoragedir, f)
994+                    yield FSBShare(filename, int(f))
995+        except OSError:
996+            # Commonly caused by there being no buckets at all.
997+            pass
998+       
999+    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
1000+        immsh = ImmutableShare(self.sharedir, storage_index, shnum, max_size=max_space_per_bucket, create=True)
1001+        bw = BucketWriter(self.ss, immsh, max_space_per_bucket, lease_info, canary)
1002+        return bw
1003+       
1004+
1005+# each share file (in storage/shares/$SI/$SHNUM) contains lease information
1006+# and share data. The share data is accessed by RIBucketWriter.write and
1007+# RIBucketReader.read . The lease information is not accessible through these
1008+# interfaces.
1009+
1010+# The share file has the following layout:
1011+#  0x00: share file version number, four bytes, current version is 1
1012+#  0x04: share data length, four bytes big-endian = A # See Footnote 1 below.
1013+#  0x08: number of leases, four bytes big-endian
1014+#  0x0c: beginning of share data (see immutable.layout.WriteBucketProxy)
1015+#  A+0x0c = B: first lease. Lease format is:
1016+#   B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner
1017+#   B+0x04: renew secret, 32 bytes (SHA256)
1018+#   B+0x24: cancel secret, 32 bytes (SHA256)
1019+#   B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch
1020+#   B+0x48: next lease, or end of record
1021+
1022+# Footnote 1: as of Tahoe v1.3.0 this field is not used by storage servers,
1023+# but it is still filled in by storage servers in case the storage server
1024+# software gets downgraded from >= Tahoe v1.3.0 to < Tahoe v1.3.0, or the
1025+# share file is moved from one storage server to another. The value stored in
1026+# this field is truncated, so if the actual share data length is >= 2**32,
1027+# then the value stored in this field will be the actual share data length
1028+# modulo 2**32.
1029+
1030+class ImmutableShare:
1031+    LEASE_SIZE = struct.calcsize(">L32s32sL")
1032+    sharetype = "immutable"
1033+
1034+    def __init__(self, sharedir, storageindex, shnum, max_size=None, create=False):
1035+        """ If max_size is not None then I won't allow more than
1036+        max_size to be written to me. If create=True then max_size
1037+        must not be None. """
1038+        precondition((max_size is not None) or (not create), max_size, create)
1039+        self.shnum = shnum
1040+        self.fname = os.path.join(sharedir, storage_index_to_dir(storageindex), str(shnum))
1041+        self._max_size = max_size
1042+        if create:
1043+            # touch the file, so later callers will see that we're working on
1044+            # it. Also construct the metadata.
1045+            assert not os.path.exists(self.fname)
1046+            fileutil.make_dirs(os.path.dirname(self.fname))
1047+            f = open(self.fname, 'wb')
1048+            # The second field -- the four-byte share data length -- is no
1049+            # longer used as of Tahoe v1.3.0, but we continue to write it in
1050+            # there in case someone downgrades a storage server from >=
1051+            # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one
1052+            # server to another, etc. We do saturation -- a share data length
1053+            # larger than 2**32-1 (what can fit into the field) is marked as
1054+            # the largest length that can fit into the field. That way, even
1055+            # if this does happen, the old < v1.3.0 server will still allow
1056+            # clients to read the first part of the share.
1057+            f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
1058+            f.close()
1059+            self._lease_offset = max_size + 0x0c
1060+            self._num_leases = 0
1061+        else:
1062+            f = open(self.fname, 'rb')
1063+            filesize = os.path.getsize(self.fname)
1064+            (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
1065+            f.close()
1066+            if version != 1:
1067+                msg = "sharefile %s had version %d but we wanted 1" % \
1068+                      (self.fname, version)
1069+                raise UnknownImmutableContainerVersionError(msg)
1070+            self._num_leases = num_leases
1071+            self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
1072+        self._data_offset = 0xc
1073+
1074+    def unlink(self):
1075+        os.unlink(self.fname)
1076+
1077+    def read_share_data(self, offset, length):
1078+        precondition(offset >= 0)
1079+        # Reads beyond the end of the data are truncated. Reads that start
1080+        # beyond the end of the data return an empty string.
1081+        seekpos = self._data_offset+offset
1082+        fsize = os.path.getsize(self.fname)
1083+        actuallength = max(0, min(length, fsize-seekpos))
1084+        if actuallength == 0:
1085+            return ""
1086+        f = open(self.fname, 'rb')
1087+        f.seek(seekpos)
1088+        return f.read(actuallength)
1089+
1090+    def write_share_data(self, offset, data):
1091+        length = len(data)
1092+        precondition(offset >= 0, offset)
1093+        if self._max_size is not None and offset+length > self._max_size:
1094+            raise DataTooLargeError(self._max_size, offset, length)
1095+        f = open(self.fname, 'rb+')
1096+        real_offset = self._data_offset+offset
1097+        f.seek(real_offset)
1098+        assert f.tell() == real_offset
1099+        f.write(data)
1100+        f.close()
1101+
1102+    def _write_lease_record(self, f, lease_number, lease_info):
1103+        offset = self._lease_offset + lease_number * self.LEASE_SIZE
1104+        f.seek(offset)
1105+        assert f.tell() == offset
1106+        f.write(lease_info.to_immutable_data())
1107+
1108+    def _read_num_leases(self, f):
1109+        f.seek(0x08)
1110+        (num_leases,) = struct.unpack(">L", f.read(4))
1111+        return num_leases
1112+
1113+    def _write_num_leases(self, f, num_leases):
1114+        f.seek(0x08)
1115+        f.write(struct.pack(">L", num_leases))
1116+
1117+    def _truncate_leases(self, f, num_leases):
1118+        f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE)
1119+
1120+    def get_leases(self):
1121+        """Yields a LeaseInfo instance for all leases."""
1122+        f = open(self.fname, 'rb')
1123+        (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
1124+        f.seek(self._lease_offset)
1125+        for i in range(num_leases):
1126+            data = f.read(self.LEASE_SIZE)
1127+            if data:
1128+                yield LeaseInfo().from_immutable_data(data)
1129+
1130+    def add_lease(self, lease_info):
1131+        f = open(self.fname, 'rb+')
1132+        num_leases = self._read_num_leases(f)
1133+        self._write_lease_record(f, num_leases, lease_info)
1134+        self._write_num_leases(f, num_leases+1)
1135+        f.close()
1136+
1137+    def renew_lease(self, renew_secret, new_expire_time):
1138+        for i,lease in enumerate(self.get_leases()):
1139+            if constant_time_compare(lease.renew_secret, renew_secret):
1140+                # yup. See if we need to update the owner time.
1141+                if new_expire_time > lease.expiration_time:
1142+                    # yes
1143+                    lease.expiration_time = new_expire_time
1144+                    f = open(self.fname, 'rb+')
1145+                    self._write_lease_record(f, i, lease)
1146+                    f.close()
1147+                return
1148+        raise IndexError("unable to renew non-existent lease")
1149+
1150+    def add_or_renew_lease(self, lease_info):
1151+        try:
1152+            self.renew_lease(lease_info.renew_secret,
1153+                             lease_info.expiration_time)
1154+        except IndexError:
1155+            self.add_lease(lease_info)
1156+
1157+
1158+    def cancel_lease(self, cancel_secret):
1159+        """Remove a lease with the given cancel_secret. If the last lease is
1160+        cancelled, the file will be removed. Return the number of bytes that
1161+        were freed (by truncating the list of leases, and possibly by
1162+        deleting the file. Raise IndexError if there was no lease with the
1163+        given cancel_secret.
1164+        """
1165+
1166+        leases = list(self.get_leases())
1167+        num_leases_removed = 0
1168+        for i,lease in enumerate(leases):
1169+            if constant_time_compare(lease.cancel_secret, cancel_secret):
1170+                leases[i] = None
1171+                num_leases_removed += 1
1172+        if not num_leases_removed:
1173+            raise IndexError("unable to find matching lease to cancel")
1174+        if num_leases_removed:
1175+            # pack and write out the remaining leases. We write these out in
1176+            # the same order as they were added, so that if we crash while
1177+            # doing this, we won't lose any non-cancelled leases.
1178+            leases = [l for l in leases if l] # remove the cancelled leases
1179+            f = open(self.fname, 'rb+')
1180+            for i,lease in enumerate(leases):
1181+                self._write_lease_record(f, i, lease)
1182+            self._write_num_leases(f, len(leases))
1183+            self._truncate_leases(f, len(leases))
1184+            f.close()
1185+        space_freed = self.LEASE_SIZE * num_leases_removed
1186+        if not len(leases):
1187+            space_freed += os.stat(self.fname)[stat.ST_SIZE]
1188+            self.unlink()
1189+        return space_freed
1190hunk ./src/allmydata/storage/backends/das/expirer.py 2
1191 import time, os, pickle, struct
1192-from allmydata.storage.crawler import ShareCrawler
1193-from allmydata.storage.shares import get_share_file
1194+from allmydata.storage.crawler import FSShareCrawler
1195 from allmydata.storage.common import UnknownMutableContainerVersionError, \
1196      UnknownImmutableContainerVersionError
1197 from twisted.python import log as twlog
1198hunk ./src/allmydata/storage/backends/das/expirer.py 7
1199 
1200-class LeaseCheckingCrawler(ShareCrawler):
1201+class FSLeaseCheckingCrawler(FSShareCrawler):
1202     """I examine the leases on all shares, determining which are still valid
1203     and which have expired. I can remove the expired leases (if so
1204     configured), and the share will be deleted when the last lease is
1205hunk ./src/allmydata/storage/backends/das/expirer.py 50
1206     slow_start = 360 # wait 6 minutes after startup
1207     minimum_cycle_time = 12*60*60 # not more than twice per day
1208 
1209-    def __init__(self, statefile, historyfile,
1210-                 expiration_enabled, mode,
1211-                 override_lease_duration, # used if expiration_mode=="age"
1212-                 cutoff_date, # used if expiration_mode=="cutoff-date"
1213-                 sharetypes):
1214+    def __init__(self, statefile, historyfile, expiration_policy):
1215         self.historyfile = historyfile
1216hunk ./src/allmydata/storage/backends/das/expirer.py 52
1217-        self.expiration_enabled = expiration_enabled
1218-        self.mode = mode
1219+        self.expiration_enabled = expiration_policy['enabled']
1220+        self.mode = expiration_policy['mode']
1221         self.override_lease_duration = None
1222         self.cutoff_date = None
1223         if self.mode == "age":
1224hunk ./src/allmydata/storage/backends/das/expirer.py 57
1225-            assert isinstance(override_lease_duration, (int, type(None)))
1226-            self.override_lease_duration = override_lease_duration # seconds
1227+            assert isinstance(expiration_policy['override_lease_duration'], (int, type(None)))
1228+            self.override_lease_duration = expiration_policy['override_lease_duration']# seconds
1229         elif self.mode == "cutoff-date":
1230hunk ./src/allmydata/storage/backends/das/expirer.py 60
1231-            assert isinstance(cutoff_date, int) # seconds-since-epoch
1232+            assert isinstance(expiration_policy['cutoff_date'], int) # seconds-since-epoch
1233             assert cutoff_date is not None
1234hunk ./src/allmydata/storage/backends/das/expirer.py 62
1235-            self.cutoff_date = cutoff_date
1236+            self.cutoff_date = expiration_policy['cutoff_date']
1237         else:
1238hunk ./src/allmydata/storage/backends/das/expirer.py 64
1239-            raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % mode)
1240-        self.sharetypes_to_expire = sharetypes
1241-        ShareCrawler.__init__(self, statefile)
1242+            raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % expiration_policy['mode'])
1243+        self.sharetypes_to_expire = expiration_policy['sharetypes']
1244+        FSShareCrawler.__init__(self, statefile)
1245 
1246     def add_initial_state(self):
1247         # we fill ["cycle-to-date"] here (even though they will be reset in
1248hunk ./src/allmydata/storage/backends/das/expirer.py 156
1249 
1250     def process_share(self, sharefilename):
1251         # first, find out what kind of a share it is
1252-        sf = get_share_file(sharefilename)
1253+        f = open(sharefilename, "rb")
1254+        prefix = f.read(32)
1255+        f.close()
1256+        if prefix == MutableShareFile.MAGIC:
1257+            sf = MutableShareFile(sharefilename)
1258+        else:
1259+            # otherwise assume it's immutable
1260+            sf = FSBShare(sharefilename)
1261         sharetype = sf.sharetype
1262         now = time.time()
1263         s = self.stat(sharefilename)
1264addfile ./src/allmydata/storage/backends/null/__init__.py
1265addfile ./src/allmydata/storage/backends/null/core.py
1266hunk ./src/allmydata/storage/backends/null/core.py 1
1267+from allmydata.storage.backends.base import Backend
1268+
1269+class NullCore(Backend):
1270+    def __init__(self):
1271+        Backend.__init__(self)
1272+
1273+    def get_available_space(self):
1274+        return None
1275+
1276+    def get_shares(self, storage_index):
1277+        return set()
1278+
1279+    def get_share(self, storage_index, sharenum):
1280+        return None
1281+
1282+    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
1283+        return NullBucketWriter()
1284hunk ./src/allmydata/storage/crawler.py 12
1285 class TimeSliceExceeded(Exception):
1286     pass
1287 
1288-class ShareCrawler(service.MultiService):
1289+class FSShareCrawler(service.MultiService):
1290     """A subcless of ShareCrawler is attached to a StorageServer, and
1291     periodically walks all of its shares, processing each one in some
1292     fashion. This crawl is rate-limited, to reduce the IO burden on the host,
1293hunk ./src/allmydata/storage/crawler.py 68
1294     cpu_slice = 1.0 # use up to 1.0 seconds before yielding
1295     minimum_cycle_time = 300 # don't run a cycle faster than this
1296 
1297-    def __init__(self, backend, statefile, allowed_cpu_percentage=None):
1298+    def __init__(self, statefname, allowed_cpu_percentage=None):
1299         service.MultiService.__init__(self)
1300         if allowed_cpu_percentage is not None:
1301             self.allowed_cpu_percentage = allowed_cpu_percentage
1302hunk ./src/allmydata/storage/crawler.py 72
1303-        self.backend = backend
1304+        self.statefname = statefname
1305         self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2]
1306                          for i in range(2**10)]
1307         self.prefixes.sort()
1308hunk ./src/allmydata/storage/crawler.py 192
1309         #                            of the last bucket to be processed, or
1310         #                            None if we are sleeping between cycles
1311         try:
1312-            f = open(self.statefile, "rb")
1313+            f = open(self.statefname, "rb")
1314             state = pickle.load(f)
1315             f.close()
1316         except EnvironmentError:
1317hunk ./src/allmydata/storage/crawler.py 230
1318         else:
1319             last_complete_prefix = self.prefixes[lcpi]
1320         self.state["last-complete-prefix"] = last_complete_prefix
1321-        tmpfile = self.statefile + ".tmp"
1322+        tmpfile = self.statefname + ".tmp"
1323         f = open(tmpfile, "wb")
1324         pickle.dump(self.state, f)
1325         f.close()
1326hunk ./src/allmydata/storage/crawler.py 433
1327         pass
1328 
1329 
1330-class BucketCountingCrawler(ShareCrawler):
1331+class FSBucketCountingCrawler(FSShareCrawler):
1332     """I keep track of how many buckets are being managed by this server.
1333     This is equivalent to the number of distributed files and directories for
1334     which I am providing storage. The actual number of files+directories in
1335hunk ./src/allmydata/storage/crawler.py 446
1336 
1337     minimum_cycle_time = 60*60 # we don't need this more than once an hour
1338 
1339-    def __init__(self, statefile, num_sample_prefixes=1):
1340-        ShareCrawler.__init__(self, statefile)
1341+    def __init__(self, statefname, num_sample_prefixes=1):
1342+        FSShareCrawler.__init__(self, statefname)
1343         self.num_sample_prefixes = num_sample_prefixes
1344 
1345     def add_initial_state(self):
1346hunk ./src/allmydata/storage/immutable.py 14
1347 from allmydata.storage.common import UnknownImmutableContainerVersionError, \
1348      DataTooLargeError
1349 
1350-# each share file (in storage/shares/$SI/$SHNUM) contains lease information
1351-# and share data. The share data is accessed by RIBucketWriter.write and
1352-# RIBucketReader.read . The lease information is not accessible through these
1353-# interfaces.
1354-
1355-# The share file has the following layout:
1356-#  0x00: share file version number, four bytes, current version is 1
1357-#  0x04: share data length, four bytes big-endian = A # See Footnote 1 below.
1358-#  0x08: number of leases, four bytes big-endian
1359-#  0x0c: beginning of share data (see immutable.layout.WriteBucketProxy)
1360-#  A+0x0c = B: first lease. Lease format is:
1361-#   B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner
1362-#   B+0x04: renew secret, 32 bytes (SHA256)
1363-#   B+0x24: cancel secret, 32 bytes (SHA256)
1364-#   B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch
1365-#   B+0x48: next lease, or end of record
1366-
1367-# Footnote 1: as of Tahoe v1.3.0 this field is not used by storage servers,
1368-# but it is still filled in by storage servers in case the storage server
1369-# software gets downgraded from >= Tahoe v1.3.0 to < Tahoe v1.3.0, or the
1370-# share file is moved from one storage server to another. The value stored in
1371-# this field is truncated, so if the actual share data length is >= 2**32,
1372-# then the value stored in this field will be the actual share data length
1373-# modulo 2**32.
1374-
1375-class ShareFile:
1376-    LEASE_SIZE = struct.calcsize(">L32s32sL")
1377-    sharetype = "immutable"
1378-
1379-    def __init__(self, filename, max_size=None, create=False):
1380-        """ If max_size is not None then I won't allow more than
1381-        max_size to be written to me. If create=True then max_size
1382-        must not be None. """
1383-        precondition((max_size is not None) or (not create), max_size, create)
1384-        self.home = filename
1385-        self._max_size = max_size
1386-        if create:
1387-            # touch the file, so later callers will see that we're working on
1388-            # it. Also construct the metadata.
1389-            assert not os.path.exists(self.home)
1390-            fileutil.make_dirs(os.path.dirname(self.home))
1391-            f = open(self.home, 'wb')
1392-            # The second field -- the four-byte share data length -- is no
1393-            # longer used as of Tahoe v1.3.0, but we continue to write it in
1394-            # there in case someone downgrades a storage server from >=
1395-            # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one
1396-            # server to another, etc. We do saturation -- a share data length
1397-            # larger than 2**32-1 (what can fit into the field) is marked as
1398-            # the largest length that can fit into the field. That way, even
1399-            # if this does happen, the old < v1.3.0 server will still allow
1400-            # clients to read the first part of the share.
1401-            f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
1402-            f.close()
1403-            self._lease_offset = max_size + 0x0c
1404-            self._num_leases = 0
1405-        else:
1406-            f = open(self.home, 'rb')
1407-            filesize = os.path.getsize(self.home)
1408-            (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
1409-            f.close()
1410-            if version != 1:
1411-                msg = "sharefile %s had version %d but we wanted 1" % \
1412-                      (filename, version)
1413-                raise UnknownImmutableContainerVersionError(msg)
1414-            self._num_leases = num_leases
1415-            self._lease_offset = filesize - (num_leases * self.LEASE_SIZE)
1416-        self._data_offset = 0xc
1417-
1418-    def unlink(self):
1419-        os.unlink(self.home)
1420-
1421-    def read_share_data(self, offset, length):
1422-        precondition(offset >= 0)
1423-        # Reads beyond the end of the data are truncated. Reads that start
1424-        # beyond the end of the data return an empty string.
1425-        seekpos = self._data_offset+offset
1426-        fsize = os.path.getsize(self.home)
1427-        actuallength = max(0, min(length, fsize-seekpos))
1428-        if actuallength == 0:
1429-            return ""
1430-        f = open(self.home, 'rb')
1431-        f.seek(seekpos)
1432-        return f.read(actuallength)
1433-
1434-    def write_share_data(self, offset, data):
1435-        length = len(data)
1436-        precondition(offset >= 0, offset)
1437-        if self._max_size is not None and offset+length > self._max_size:
1438-            raise DataTooLargeError(self._max_size, offset, length)
1439-        f = open(self.home, 'rb+')
1440-        real_offset = self._data_offset+offset
1441-        f.seek(real_offset)
1442-        assert f.tell() == real_offset
1443-        f.write(data)
1444-        f.close()
1445-
1446-    def _write_lease_record(self, f, lease_number, lease_info):
1447-        offset = self._lease_offset + lease_number * self.LEASE_SIZE
1448-        f.seek(offset)
1449-        assert f.tell() == offset
1450-        f.write(lease_info.to_immutable_data())
1451-
1452-    def _read_num_leases(self, f):
1453-        f.seek(0x08)
1454-        (num_leases,) = struct.unpack(">L", f.read(4))
1455-        return num_leases
1456-
1457-    def _write_num_leases(self, f, num_leases):
1458-        f.seek(0x08)
1459-        f.write(struct.pack(">L", num_leases))
1460-
1461-    def _truncate_leases(self, f, num_leases):
1462-        f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE)
1463-
1464-    def get_leases(self):
1465-        """Yields a LeaseInfo instance for all leases."""
1466-        f = open(self.home, 'rb')
1467-        (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
1468-        f.seek(self._lease_offset)
1469-        for i in range(num_leases):
1470-            data = f.read(self.LEASE_SIZE)
1471-            if data:
1472-                yield LeaseInfo().from_immutable_data(data)
1473-
1474-    def add_lease(self, lease_info):
1475-        f = open(self.home, 'rb+')
1476-        num_leases = self._read_num_leases(f)
1477-        self._write_lease_record(f, num_leases, lease_info)
1478-        self._write_num_leases(f, num_leases+1)
1479-        f.close()
1480-
1481-    def renew_lease(self, renew_secret, new_expire_time):
1482-        for i,lease in enumerate(self.get_leases()):
1483-            if constant_time_compare(lease.renew_secret, renew_secret):
1484-                # yup. See if we need to update the owner time.
1485-                if new_expire_time > lease.expiration_time:
1486-                    # yes
1487-                    lease.expiration_time = new_expire_time
1488-                    f = open(self.home, 'rb+')
1489-                    self._write_lease_record(f, i, lease)
1490-                    f.close()
1491-                return
1492-        raise IndexError("unable to renew non-existent lease")
1493-
1494-    def add_or_renew_lease(self, lease_info):
1495-        try:
1496-            self.renew_lease(lease_info.renew_secret,
1497-                             lease_info.expiration_time)
1498-        except IndexError:
1499-            self.add_lease(lease_info)
1500-
1501-
1502-    def cancel_lease(self, cancel_secret):
1503-        """Remove a lease with the given cancel_secret. If the last lease is
1504-        cancelled, the file will be removed. Return the number of bytes that
1505-        were freed (by truncating the list of leases, and possibly by
1506-        deleting the file. Raise IndexError if there was no lease with the
1507-        given cancel_secret.
1508-        """
1509-
1510-        leases = list(self.get_leases())
1511-        num_leases_removed = 0
1512-        for i,lease in enumerate(leases):
1513-            if constant_time_compare(lease.cancel_secret, cancel_secret):
1514-                leases[i] = None
1515-                num_leases_removed += 1
1516-        if not num_leases_removed:
1517-            raise IndexError("unable to find matching lease to cancel")
1518-        if num_leases_removed:
1519-            # pack and write out the remaining leases. We write these out in
1520-            # the same order as they were added, so that if we crash while
1521-            # doing this, we won't lose any non-cancelled leases.
1522-            leases = [l for l in leases if l] # remove the cancelled leases
1523-            f = open(self.home, 'rb+')
1524-            for i,lease in enumerate(leases):
1525-                self._write_lease_record(f, i, lease)
1526-            self._write_num_leases(f, len(leases))
1527-            self._truncate_leases(f, len(leases))
1528-            f.close()
1529-        space_freed = self.LEASE_SIZE * num_leases_removed
1530-        if not len(leases):
1531-            space_freed += os.stat(self.home)[stat.ST_SIZE]
1532-            self.unlink()
1533-        return space_freed
1534-class NullBucketWriter(Referenceable):
1535-    implements(RIBucketWriter)
1536-
1537-    def remote_write(self, offset, data):
1538-        return
1539-
1540 class BucketWriter(Referenceable):
1541     implements(RIBucketWriter)
1542 
1543hunk ./src/allmydata/storage/immutable.py 17
1544-    def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary):
1545+    def __init__(self, ss, immutableshare, max_size, lease_info, canary):
1546         self.ss = ss
1547hunk ./src/allmydata/storage/immutable.py 19
1548-        self.incominghome = incominghome
1549-        self.finalhome = finalhome
1550         self._max_size = max_size # don't allow the client to write more than this
1551         self._canary = canary
1552         self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected)
1553hunk ./src/allmydata/storage/immutable.py 24
1554         self.closed = False
1555         self.throw_out_all_data = False
1556-        self._sharefile = ShareFile(incominghome, create=True, max_size=max_size)
1557+        self._sharefile = immutableshare
1558         # also, add our lease to the file now, so that other ones can be
1559         # added by simultaneous uploaders
1560         self._sharefile.add_lease(lease_info)
1561hunk ./src/allmydata/storage/server.py 16
1562 from allmydata.storage.lease import LeaseInfo
1563 from allmydata.storage.mutable import MutableShareFile, EmptyShare, \
1564      create_mutable_sharefile
1565-from allmydata.storage.immutable import ShareFile, NullBucketWriter, BucketWriter, BucketReader
1566-from allmydata.storage.crawler import BucketCountingCrawler
1567-from allmydata.storage.expirer import LeaseCheckingCrawler
1568 
1569 from zope.interface import implements
1570 
1571hunk ./src/allmydata/storage/server.py 19
1572-# A Backend is a MultiService so that its server's crawlers (if the server has any) can
1573-# be started and stopped.
1574-class Backend(service.MultiService):
1575-    implements(IStatsProducer)
1576-    def __init__(self):
1577-        service.MultiService.__init__(self)
1578-
1579-    def get_bucket_shares(self):
1580-        """XXX"""
1581-        raise NotImplementedError
1582-
1583-    def get_share(self):
1584-        """XXX"""
1585-        raise NotImplementedError
1586-
1587-    def make_bucket_writer(self):
1588-        """XXX"""
1589-        raise NotImplementedError
1590-
1591-class NullBackend(Backend):
1592-    def __init__(self):
1593-        Backend.__init__(self)
1594-
1595-    def get_available_space(self):
1596-        return None
1597-
1598-    def get_bucket_shares(self, storage_index):
1599-        return set()
1600-
1601-    def get_share(self, storage_index, sharenum):
1602-        return None
1603-
1604-    def make_bucket_writer(self, storage_index, shnum, max_space_per_bucket, lease_info, canary):
1605-        return NullBucketWriter()
1606-
1607-class FSBackend(Backend):
1608-    def __init__(self, storedir, readonly=False, reserved_space=0):
1609-        Backend.__init__(self)
1610-
1611-        self._setup_storage(storedir, readonly, reserved_space)
1612-        self._setup_corruption_advisory()
1613-        self._setup_bucket_counter()
1614-        self._setup_lease_checkerf()
1615-
1616-    def _setup_storage(self, storedir, readonly, reserved_space):
1617-        self.storedir = storedir
1618-        self.readonly = readonly
1619-        self.reserved_space = int(reserved_space)
1620-        if self.reserved_space:
1621-            if self.get_available_space() is None:
1622-                log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored",
1623-                        umid="0wZ27w", level=log.UNUSUAL)
1624-
1625-        self.sharedir = os.path.join(self.storedir, "shares")
1626-        fileutil.make_dirs(self.sharedir)
1627-        self.incomingdir = os.path.join(self.sharedir, 'incoming')
1628-        self._clean_incomplete()
1629-
1630-    def _clean_incomplete(self):
1631-        fileutil.rm_dir(self.incomingdir)
1632-        fileutil.make_dirs(self.incomingdir)
1633-
1634-    def _setup_corruption_advisory(self):
1635-        # we don't actually create the corruption-advisory dir until necessary
1636-        self.corruption_advisory_dir = os.path.join(self.storedir,
1637-                                                    "corruption-advisories")
1638-
1639-    def _setup_bucket_counter(self):
1640-        statefile = os.path.join(self.storedir, "bucket_counter.state")
1641-        self.bucket_counter = BucketCountingCrawler(statefile)
1642-        self.bucket_counter.setServiceParent(self)
1643-
1644-    def _setup_lease_checkerf(self):
1645-        statefile = os.path.join(self.storedir, "lease_checker.state")
1646-        historyfile = os.path.join(self.storedir, "lease_checker.history")
1647-        self.lease_checker = LeaseCheckingCrawler(statefile, historyfile,
1648-                                   expiration_enabled, expiration_mode,
1649-                                   expiration_override_lease_duration,
1650-                                   expiration_cutoff_date,
1651-                                   expiration_sharetypes)
1652-        self.lease_checker.setServiceParent(self)
1653-
1654-    def get_available_space(self):
1655-        if self.readonly:
1656-            return 0
1657-        return fileutil.get_available_space(self.storedir, self.reserved_space)
1658-
1659-    def get_bucket_shares(self, storage_index):
1660-        """Return a list of (shnum, pathname) tuples for files that hold
1661-        shares for this storage_index. In each tuple, 'shnum' will always be
1662-        the integer form of the last component of 'pathname'."""
1663-        storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index))
1664-        try:
1665-            for f in os.listdir(storagedir):
1666-                if NUM_RE.match(f):
1667-                    filename = os.path.join(storagedir, f)
1668-                    yield (int(f), filename)
1669-        except OSError:
1670-            # Commonly caused by there being no buckets at all.
1671-            pass
1672-
1673 # storage/
1674 # storage/shares/incoming
1675 #   incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will
1676hunk ./src/allmydata/storage/server.py 32
1677 # $SHARENUM matches this regex:
1678 NUM_RE=re.compile("^[0-9]+$")
1679 
1680-
1681-
1682 class StorageServer(service.MultiService, Referenceable):
1683     implements(RIStorageServer, IStatsProducer)
1684     name = 'storage'
1685hunk ./src/allmydata/storage/server.py 35
1686-    LeaseCheckerClass = LeaseCheckingCrawler
1687 
1688     def __init__(self, nodeid, backend, reserved_space=0,
1689                  readonly_storage=False,
1690hunk ./src/allmydata/storage/server.py 38
1691-                 stats_provider=None,
1692-                 expiration_enabled=False,
1693-                 expiration_mode="age",
1694-                 expiration_override_lease_duration=None,
1695-                 expiration_cutoff_date=None,
1696-                 expiration_sharetypes=("mutable", "immutable")):
1697+                 stats_provider=None ):
1698         service.MultiService.__init__(self)
1699         assert isinstance(nodeid, str)
1700         assert len(nodeid) == 20
1701hunk ./src/allmydata/storage/server.py 217
1702         # they asked about: this will save them a lot of work. Add or update
1703         # leases for all of them: if they want us to hold shares for this
1704         # file, they'll want us to hold leases for this file.
1705-        for (shnum, fn) in self.backend.get_bucket_shares(storage_index):
1706-            alreadygot.add(shnum)
1707-            sf = ShareFile(fn)
1708-            sf.add_or_renew_lease(lease_info)
1709-
1710-        for shnum in sharenums:
1711-            share = self.backend.get_share(storage_index, shnum)
1712+        for share in self.backend.get_shares(storage_index):
1713+            alreadygot.add(share.shnum)
1714+            share.add_or_renew_lease(lease_info)
1715 
1716hunk ./src/allmydata/storage/server.py 221
1717-            if not share:
1718-                if (not limited) or (remaining_space >= max_space_per_bucket):
1719-                    # ok! we need to create the new share file.
1720-                    bw = self.backend.make_bucket_writer(storage_index, shnum,
1721-                                      max_space_per_bucket, lease_info, canary)
1722-                    bucketwriters[shnum] = bw
1723-                    self._active_writers[bw] = 1
1724-                    if limited:
1725-                        remaining_space -= max_space_per_bucket
1726-                else:
1727-                    # bummer! not enough space to accept this bucket
1728-                    pass
1729+        for shnum in (sharenums - alreadygot):
1730+            if (not limited) or (remaining_space >= max_space_per_bucket):
1731+                #XXX or should the following line occur in storage server construtor? ok! we need to create the new share file.
1732+                self.backend.set_storage_server(self)
1733+                bw = self.backend.make_bucket_writer(storage_index, shnum,
1734+                                                     max_space_per_bucket, lease_info, canary)
1735+                bucketwriters[shnum] = bw
1736+                self._active_writers[bw] = 1
1737+                if limited:
1738+                    remaining_space -= max_space_per_bucket
1739 
1740hunk ./src/allmydata/storage/server.py 232
1741-            elif share.is_complete():
1742-                # great! we already have it. easy.
1743-                pass
1744-            elif not share.is_complete():
1745-                # Note that we don't create BucketWriters for shnums that
1746-                # have a partial share (in incoming/), so if a second upload
1747-                # occurs while the first is still in progress, the second
1748-                # uploader will use different storage servers.
1749-                pass
1750+        #XXX We SHOULD DOCUMENT LATER.
1751 
1752         self.add_latency("allocate", time.time() - start)
1753         return alreadygot, bucketwriters
1754hunk ./src/allmydata/storage/server.py 238
1755 
1756     def _iter_share_files(self, storage_index):
1757-        for shnum, filename in self._get_bucket_shares(storage_index):
1758+        for shnum, filename in self._get_shares(storage_index):
1759             f = open(filename, 'rb')
1760             header = f.read(32)
1761             f.close()
1762hunk ./src/allmydata/storage/server.py 318
1763         si_s = si_b2a(storage_index)
1764         log.msg("storage: get_buckets %s" % si_s)
1765         bucketreaders = {} # k: sharenum, v: BucketReader
1766-        for shnum, filename in self.backend.get_bucket_shares(storage_index):
1767+        for shnum, filename in self.backend.get_shares(storage_index):
1768             bucketreaders[shnum] = BucketReader(self, filename,
1769                                                 storage_index, shnum)
1770         self.add_latency("get", time.time() - start)
1771hunk ./src/allmydata/storage/server.py 334
1772         # since all shares get the same lease data, we just grab the leases
1773         # from the first share
1774         try:
1775-            shnum, filename = self._get_bucket_shares(storage_index).next()
1776+            shnum, filename = self._get_shares(storage_index).next()
1777             sf = ShareFile(filename)
1778             return sf.get_leases()
1779         except StopIteration:
1780hunk ./src/allmydata/storage/shares.py 1
1781-#! /usr/bin/python
1782-
1783-from allmydata.storage.mutable import MutableShareFile
1784-from allmydata.storage.immutable import ShareFile
1785-
1786-def get_share_file(filename):
1787-    f = open(filename, "rb")
1788-    prefix = f.read(32)
1789-    f.close()
1790-    if prefix == MutableShareFile.MAGIC:
1791-        return MutableShareFile(filename)
1792-    # otherwise assume it's immutable
1793-    return ShareFile(filename)
1794-
1795rmfile ./src/allmydata/storage/shares.py
1796hunk ./src/allmydata/test/common_util.py 20
1797 
1798 def flip_one_bit(s, offset=0, size=None):
1799     """ flip one random bit of the string s, in a byte greater than or equal to offset and less
1800-    than offset+size. """
1801+    than offset+size. Return the new string. """
1802     if size is None:
1803         size=len(s)-offset
1804     i = randrange(offset, offset+size)
1805hunk ./src/allmydata/test/test_backends.py 7
1806 
1807 from allmydata.test.common_util import ReallyEqualMixin
1808 
1809-import mock
1810+import mock, os
1811 
1812 # This is the code that we're going to be testing.
1813hunk ./src/allmydata/test/test_backends.py 10
1814-from allmydata.storage.server import StorageServer, FSBackend, NullBackend
1815+from allmydata.storage.server import StorageServer
1816+
1817+from allmydata.storage.backends.das.core import DASCore
1818+from allmydata.storage.backends.null.core import NullCore
1819+
1820 
1821 # The following share file contents was generated with
1822 # storage.immutable.ShareFile from Tahoe-LAFS v1.8.2
1823hunk ./src/allmydata/test/test_backends.py 22
1824 share_data = 'a\x00\x00\x00\x00xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\x00(\xde\x80'
1825 share_file_data = '\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01' + share_data
1826 
1827-sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
1828+tempdir = 'teststoredir'
1829+sharedirname = os.path.join(tempdir, 'shares', 'or', 'orsxg5dtorxxeylhmvpws3temv4a')
1830+sharefname = os.path.join(sharedirname, '0')
1831 
1832 class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
1833     @mock.patch('time.time')
1834hunk ./src/allmydata/test/test_backends.py 58
1835         filesystem in only the prescribed ways. """
1836 
1837         def call_open(fname, mode):
1838-            if fname == 'testdir/bucket_counter.state':
1839-                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
1840-            elif fname == 'testdir/lease_checker.state':
1841-                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
1842-            elif fname == 'testdir/lease_checker.history':
1843+            if fname == os.path.join(tempdir,'bucket_counter.state'):
1844+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'bucket_counter.state'))
1845+            elif fname == os.path.join(tempdir, 'lease_checker.state'):
1846+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'lease_checker.state'))
1847+            elif fname == os.path.join(tempdir, 'lease_checker.history'):
1848                 return StringIO()
1849             else:
1850                 self.fail("Server with FS backend tried to open '%s' in mode '%s'" % (fname, mode))
1851hunk ./src/allmydata/test/test_backends.py 124
1852     @mock.patch('__builtin__.open')
1853     def setUp(self, mockopen):
1854         def call_open(fname, mode):
1855-            if fname == 'testdir/bucket_counter.state':
1856-                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
1857-            elif fname == 'testdir/lease_checker.state':
1858-                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
1859-            elif fname == 'testdir/lease_checker.history':
1860+            if fname == os.path.join(tempdir, 'bucket_counter.state'):
1861+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'bucket_counter.state'))
1862+            elif fname == os.path.join(tempdir, 'lease_checker.state'):
1863+                raise IOError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'lease_checker.state'))
1864+            elif fname == os.path.join(tempdir, 'lease_checker.history'):
1865                 return StringIO()
1866         mockopen.side_effect = call_open
1867hunk ./src/allmydata/test/test_backends.py 131
1868-
1869-        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir'))
1870+        expiration_policy = {'enabled' : False,
1871+                             'mode' : 'age',
1872+                             'override_lease_duration' : None,
1873+                             'cutoff_date' : None,
1874+                             'sharetypes' : None}
1875+        testbackend = DASCore(tempdir, expiration_policy)
1876+        self.s = StorageServer('testnodeidxxxxxxxxxx', backend=DASCore(tempdir, expiration_policy) )
1877 
1878     @mock.patch('time.time')
1879     @mock.patch('os.mkdir')
1880hunk ./src/allmydata/test/test_backends.py 148
1881         """ Write a new share. """
1882 
1883         def call_listdir(dirname):
1884-            self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
1885-            raise OSError(2, "No such file or directory: 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a'")
1886+            self.failUnlessReallyEqual(dirname, sharedirname)
1887+            raise OSError(2, "No such file or directory: '%s'" % os.path.join(tempdir, 'shares/or/orsxg5dtorxxeylhmvpws3temv4a'))
1888 
1889         mocklistdir.side_effect = call_listdir
1890 
1891hunk ./src/allmydata/test/test_backends.py 178
1892 
1893         sharefile = MockFile()
1894         def call_open(fname, mode):
1895-            self.failUnlessReallyEqual(fname, 'testdir/shares/incoming/or/orsxg5dtorxxeylhmvpws3temv4a/0' )
1896+            self.failUnlessReallyEqual(fname, os.path.join(tempdir, 'shares', 'or', 'orsxg5dtorxxeylhmvpws3temv4a', '0' ))
1897             return sharefile
1898 
1899         mockopen.side_effect = call_open
1900hunk ./src/allmydata/test/test_backends.py 200
1901         StorageServer object. """
1902 
1903         def call_listdir(dirname):
1904-            self.failUnlessReallyEqual(dirname,'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
1905+            self.failUnlessReallyEqual(dirname, os.path.join(tempdir, 'shares', 'or', 'orsxg5dtorxxeylhmvpws3temv4a'))
1906             return ['0']
1907 
1908         mocklistdir.side_effect = call_listdir
1909}
1910
1911Context:
1912
1913[Rename test_package_initialization.py to (much shorter) test_import.py .
1914Brian Warner <warner@lothar.com>**20110611190234
1915 Ignore-this: 3eb3dbac73600eeff5cfa6b65d65822
1916 
1917 The former name was making my 'ls' listings hard to read, by forcing them
1918 down to just two columns.
1919]
1920[tests: fix tests to accomodate [20110611153758-92b7f-0ba5e4726fb6318dac28fb762a6512a003f4c430]
1921zooko@zooko.com**20110611163741
1922 Ignore-this: 64073a5f39e7937e8e5e1314c1a302d1
1923 Apparently none of the two authors (stercor, terrell), three reviewers (warner, davidsarah, terrell), or one committer (me) actually ran the tests. This is presumably due to #20.
1924 fixes #1412
1925]
1926[wui: right-align the size column in the WUI
1927zooko@zooko.com**20110611153758
1928 Ignore-this: 492bdaf4373c96f59f90581c7daf7cd7
1929 Thanks to Ted "stercor" Rolle Jr. and Terrell Russell.
1930 fixes #1412
1931]
1932[docs: three minor fixes
1933zooko@zooko.com**20110610121656
1934 Ignore-this: fec96579eb95aceb2ad5fc01a814c8a2
1935 CREDITS for arc for stats tweak
1936 fix link to .zip file in quickstart.rst (thanks to ChosenOne for noticing)
1937 English usage tweak
1938]
1939[docs/running.rst: fix stray HTML (not .rst) link noticed by ChosenOne.
1940david-sarah@jacaranda.org**20110609223719
1941 Ignore-this: fc50ac9c94792dcac6f1067df8ac0d4a
1942]
1943[server.py:  get_latencies now reports percentiles _only_ if there are sufficient observations for the interpretation of the percentile to be unambiguous.
1944wilcoxjg@gmail.com**20110527120135
1945 Ignore-this: 2e7029764bffc60e26f471d7c2b6611e
1946 interfaces.py:  modified the return type of RIStatsProvider.get_stats to allow for None as a return value
1947 NEWS.rst, stats.py: documentation of change to get_latencies
1948 stats.rst: now documents percentile modification in get_latencies
1949 test_storage.py:  test_latencies now expects None in output categories that contain too few samples for the associated percentile to be unambiguously reported.
1950 fixes #1392
1951]
1952[docs: revert link in relnotes.txt from NEWS.rst to NEWS, since the former did not exist at revision 5000.
1953david-sarah@jacaranda.org**20110517011214
1954 Ignore-this: 6a5be6e70241e3ec0575641f64343df7
1955]
1956[docs: convert NEWS to NEWS.rst and change all references to it.
1957david-sarah@jacaranda.org**20110517010255
1958 Ignore-this: a820b93ea10577c77e9c8206dbfe770d
1959]
1960[docs: remove out-of-date docs/testgrid/introducer.furl and containing directory. fixes #1404
1961david-sarah@jacaranda.org**20110512140559
1962 Ignore-this: 784548fc5367fac5450df1c46890876d
1963]
1964[scripts/common.py: don't assume that the default alias is always 'tahoe' (it is, but the API of get_alias doesn't say so). refs #1342
1965david-sarah@jacaranda.org**20110130164923
1966 Ignore-this: a271e77ce81d84bb4c43645b891d92eb
1967]
1968[setup: don't catch all Exception from check_requirement(), but only PackagingError and ImportError
1969zooko@zooko.com**20110128142006
1970 Ignore-this: 57d4bc9298b711e4bc9dc832c75295de
1971 I noticed this because I had accidentally inserted a bug which caused AssertionError to be raised from check_requirement().
1972]
1973[M-x whitespace-cleanup
1974zooko@zooko.com**20110510193653
1975 Ignore-this: dea02f831298c0f65ad096960e7df5c7
1976]
1977[docs: fix typo in running.rst, thanks to arch_o_median
1978zooko@zooko.com**20110510193633
1979 Ignore-this: ca06de166a46abbc61140513918e79e8
1980]
1981[relnotes.txt: don't claim to work on Cygwin (which has been untested for some time). refs #1342
1982david-sarah@jacaranda.org**20110204204902
1983 Ignore-this: 85ef118a48453d93fa4cddc32d65b25b
1984]
1985[relnotes.txt: forseeable -> foreseeable. refs #1342
1986david-sarah@jacaranda.org**20110204204116
1987 Ignore-this: 746debc4d82f4031ebf75ab4031b3a9
1988]
1989[replace remaining .html docs with .rst docs
1990zooko@zooko.com**20110510191650
1991 Ignore-this: d557d960a986d4ac8216d1677d236399
1992 Remove install.html (long since deprecated).
1993 Also replace some obsolete references to install.html with references to quickstart.rst.
1994 Fix some broken internal references within docs/historical/historical_known_issues.txt.
1995 Thanks to Ravi Pinjala and Patrick McDonald.
1996 refs #1227
1997]
1998[docs: FTP-and-SFTP.rst: fix a minor error and update the information about which version of Twisted fixes #1297
1999zooko@zooko.com**20110428055232
2000 Ignore-this: b63cfb4ebdbe32fb3b5f885255db4d39
2001]
2002[munin tahoe_files plugin: fix incorrect file count
2003francois@ctrlaltdel.ch**20110428055312
2004 Ignore-this: 334ba49a0bbd93b4a7b06a25697aba34
2005 fixes #1391
2006]
2007[corrected "k must never be smaller than N" to "k must never be greater than N"
2008secorp@allmydata.org**20110425010308
2009 Ignore-this: 233129505d6c70860087f22541805eac
2010]
2011[Fix a test failure in test_package_initialization on Python 2.4.x due to exceptions being stringified differently than in later versions of Python. refs #1389
2012david-sarah@jacaranda.org**20110411190738
2013 Ignore-this: 7847d26bc117c328c679f08a7baee519
2014]
2015[tests: add test for including the ImportError message and traceback entry in the summary of errors from importing dependencies. refs #1389
2016david-sarah@jacaranda.org**20110410155844
2017 Ignore-this: fbecdbeb0d06a0f875fe8d4030aabafa
2018]
2019[allmydata/__init__.py: preserve the message and last traceback entry (file, line number, function, and source line) of ImportErrors in the package versions string. fixes #1389
2020david-sarah@jacaranda.org**20110410155705
2021 Ignore-this: 2f87b8b327906cf8bfca9440a0904900
2022]
2023[remove unused variable detected by pyflakes
2024zooko@zooko.com**20110407172231
2025 Ignore-this: 7344652d5e0720af822070d91f03daf9
2026]
2027[allmydata/__init__.py: Nicer reporting of unparseable version numbers in dependencies. fixes #1388
2028david-sarah@jacaranda.org**20110401202750
2029 Ignore-this: 9c6bd599259d2405e1caadbb3e0d8c7f
2030]
2031[update FTP-and-SFTP.rst: the necessary patch is included in Twisted-10.1
2032Brian Warner <warner@lothar.com>**20110325232511
2033 Ignore-this: d5307faa6900f143193bfbe14e0f01a
2034]
2035[control.py: remove all uses of s.get_serverid()
2036warner@lothar.com**20110227011203
2037 Ignore-this: f80a787953bd7fa3d40e828bde00e855
2038]
2039[web: remove some uses of s.get_serverid(), not all
2040warner@lothar.com**20110227011159
2041 Ignore-this: a9347d9cf6436537a47edc6efde9f8be
2042]
2043[immutable/downloader/fetcher.py: remove all get_serverid() calls
2044warner@lothar.com**20110227011156
2045 Ignore-this: fb5ef018ade1749348b546ec24f7f09a
2046]
2047[immutable/downloader/fetcher.py: fix diversity bug in server-response handling
2048warner@lothar.com**20110227011153
2049 Ignore-this: bcd62232c9159371ae8a16ff63d22c1b
2050 
2051 When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the
2052 _shares_from_server dict was being popped incorrectly (using shnum as the
2053 index instead of serverid). I'm still thinking through the consequences of
2054 this bug. It was probably benign and really hard to detect. I think it would
2055 cause us to incorrectly believe that we're pulling too many shares from a
2056 server, and thus prefer a different server rather than asking for a second
2057 share from the first server. The diversity code is intended to spread out the
2058 number of shares simultaneously being requested from each server, but with
2059 this bug, it might be spreading out the total number of shares requested at
2060 all, not just simultaneously. (note that SegmentFetcher is scoped to a single
2061 segment, so the effect doesn't last very long).
2062]
2063[immutable/downloader/share.py: reduce get_serverid(), one left, update ext deps
2064warner@lothar.com**20110227011150
2065 Ignore-this: d8d56dd8e7b280792b40105e13664554
2066 
2067 test_download.py: create+check MyShare instances better, make sure they share
2068 Server objects, now that finder.py cares
2069]
2070[immutable/downloader/finder.py: reduce use of get_serverid(), one left
2071warner@lothar.com**20110227011146
2072 Ignore-this: 5785be173b491ae8a78faf5142892020
2073]
2074[immutable/offloaded.py: reduce use of get_serverid() a bit more
2075warner@lothar.com**20110227011142
2076 Ignore-this: b48acc1b2ae1b311da7f3ba4ffba38f
2077]
2078[immutable/upload.py: reduce use of get_serverid()
2079warner@lothar.com**20110227011138
2080 Ignore-this: ffdd7ff32bca890782119a6e9f1495f6
2081]
2082[immutable/checker.py: remove some uses of s.get_serverid(), not all
2083warner@lothar.com**20110227011134
2084 Ignore-this: e480a37efa9e94e8016d826c492f626e
2085]
2086[add remaining get_* methods to storage_client.Server, NoNetworkServer, and
2087warner@lothar.com**20110227011132
2088 Ignore-this: 6078279ddf42b179996a4b53bee8c421
2089 MockIServer stubs
2090]
2091[upload.py: rearrange _make_trackers a bit, no behavior changes
2092warner@lothar.com**20110227011128
2093 Ignore-this: 296d4819e2af452b107177aef6ebb40f
2094]
2095[happinessutil.py: finally rename merge_peers to merge_servers
2096warner@lothar.com**20110227011124
2097 Ignore-this: c8cd381fea1dd888899cb71e4f86de6e
2098]
2099[test_upload.py: factor out FakeServerTracker
2100warner@lothar.com**20110227011120
2101 Ignore-this: 6c182cba90e908221099472cc159325b
2102]
2103[test_upload.py: server-vs-tracker cleanup
2104warner@lothar.com**20110227011115
2105 Ignore-this: 2915133be1a3ba456e8603885437e03
2106]
2107[happinessutil.py: server-vs-tracker cleanup
2108warner@lothar.com**20110227011111
2109 Ignore-this: b856c84033562d7d718cae7cb01085a9
2110]
2111[upload.py: more tracker-vs-server cleanup
2112warner@lothar.com**20110227011107
2113 Ignore-this: bb75ed2afef55e47c085b35def2de315
2114]
2115[upload.py: fix var names to avoid confusion between 'trackers' and 'servers'
2116warner@lothar.com**20110227011103
2117 Ignore-this: 5d5e3415b7d2732d92f42413c25d205d
2118]
2119[refactor: s/peer/server/ in immutable/upload, happinessutil.py, test_upload
2120warner@lothar.com**20110227011100
2121 Ignore-this: 7ea858755cbe5896ac212a925840fe68
2122 
2123 No behavioral changes, just updating variable/method names and log messages.
2124 The effects outside these three files should be minimal: some exception
2125 messages changed (to say "server" instead of "peer"), and some internal class
2126 names were changed. A few things still use "peer" to minimize external
2127 changes, like UploadResults.timings["peer_selection"] and
2128 happinessutil.merge_peers, which can be changed later.
2129]
2130[storage_client.py: clean up test_add_server/test_add_descriptor, remove .test_servers
2131warner@lothar.com**20110227011056
2132 Ignore-this: efad933e78179d3d5fdcd6d1ef2b19cc
2133]
2134[test_client.py, upload.py:: remove KiB/MiB/etc constants, and other dead code
2135warner@lothar.com**20110227011051
2136 Ignore-this: dc83c5794c2afc4f81e592f689c0dc2d
2137]
2138[test: increase timeout on a network test because Francois's ARM machine hit that timeout
2139zooko@zooko.com**20110317165909
2140 Ignore-this: 380c345cdcbd196268ca5b65664ac85b
2141 I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish.
2142]
2143[docs/configuration.rst: add a "Frontend Configuration" section
2144Brian Warner <warner@lothar.com>**20110222014323
2145 Ignore-this: 657018aa501fe4f0efef9851628444ca
2146 
2147 this points to docs/frontends/*.rst, which were previously underlinked
2148]
2149[web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366
2150"Brian Warner <warner@lothar.com>"**20110221061544
2151 Ignore-this: 799d4de19933f2309b3c0c19a63bb888
2152]
2153[Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable.
2154david-sarah@jacaranda.org**20110221015817
2155 Ignore-this: 51d181698f8c20d3aca58b057e9c475a
2156]
2157[allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355.
2158david-sarah@jacaranda.org**20110221020125
2159 Ignore-this: b0744ed58f161bf188e037bad077fc48
2160]
2161[Refactor StorageFarmBroker handling of servers
2162Brian Warner <warner@lothar.com>**20110221015804
2163 Ignore-this: 842144ed92f5717699b8f580eab32a51
2164 
2165 Pass around IServer instance instead of (peerid, rref) tuple. Replace
2166 "descriptor" with "server". Other replacements:
2167 
2168  get_all_servers -> get_connected_servers/get_known_servers
2169  get_servers_for_index -> get_servers_for_psi (now returns IServers)
2170 
2171 This change still needs to be pushed further down: lots of code is now
2172 getting the IServer and then distributing (peerid, rref) internally.
2173 Instead, it ought to distribute the IServer internally and delay
2174 extracting a serverid or rref until the last moment.
2175 
2176 no_network.py was updated to retain parallelism.
2177]
2178[TAG allmydata-tahoe-1.8.2
2179warner@lothar.com**20110131020101]
2180Patch bundle hash:
2181cd0da483db21aed37143104b0902de6f9eeb4911