1 | 2 patches for repository zooko@tahoe-lafs.org:/home/source/darcs/tahoe-lafs/trunk: |
---|
2 | |
---|
3 | Fri Mar 25 14:35:14 MDT 2011 wilcoxjg@gmail.com |
---|
4 | * storage: new mocking tests of storage server read and write |
---|
5 | There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls. |
---|
6 | |
---|
7 | Wed Apr 6 14:38:12 MDT 2011 zooko@zooko.com |
---|
8 | * a bunch of incomplete work on #999, to be unrecorded in arctic's repo |
---|
9 | |
---|
10 | New patches: |
---|
11 | |
---|
12 | [storage: new mocking tests of storage server read and write |
---|
13 | wilcoxjg@gmail.com**20110325203514 |
---|
14 | Ignore-this: df65c3c4f061dd1516f88662023fdb41 |
---|
15 | There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls. |
---|
16 | ] { |
---|
17 | addfile ./src/allmydata/test/test_server.py |
---|
18 | hunk ./src/allmydata/test/test_server.py 1 |
---|
19 | +from twisted.trial import unittest |
---|
20 | + |
---|
21 | +from StringIO import StringIO |
---|
22 | + |
---|
23 | +from allmydata.test.common_util import ReallyEqualMixin |
---|
24 | + |
---|
25 | +import mock |
---|
26 | + |
---|
27 | +# This is the code that we're going to be testing. |
---|
28 | +from allmydata.storage.server import StorageServer |
---|
29 | + |
---|
30 | +# The following share file contents was generated with |
---|
31 | +# storage.immutable.ShareFile from Tahoe-LAFS v1.8.2 |
---|
32 | +# with share data == 'a'. |
---|
33 | +share_data = 'a\x00\x00\x00\x00xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\x00(\xde\x80' |
---|
34 | +share_file_data = '\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01' + share_data |
---|
35 | + |
---|
36 | +sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0' |
---|
37 | + |
---|
38 | +class TestServerConstruction(unittest.TestCase, ReallyEqualMixin): |
---|
39 | + @mock.patch('__builtin__.open') |
---|
40 | + def test_create_server(self, mockopen): |
---|
41 | + """ This tests whether a server instance can be constructed. """ |
---|
42 | + |
---|
43 | + def call_open(fname, mode): |
---|
44 | + if fname == 'testdir/bucket_counter.state': |
---|
45 | + raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'") |
---|
46 | + elif fname == 'testdir/lease_checker.state': |
---|
47 | + raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'") |
---|
48 | + elif fname == 'testdir/lease_checker.history': |
---|
49 | + return StringIO() |
---|
50 | + mockopen.side_effect = call_open |
---|
51 | + |
---|
52 | + # Now begin the test. |
---|
53 | + s = StorageServer('testdir', 'testnodeidxxxxxxxxxx') |
---|
54 | + |
---|
55 | + # You passed! |
---|
56 | + |
---|
57 | +class TestServer(unittest.TestCase, ReallyEqualMixin): |
---|
58 | + @mock.patch('__builtin__.open') |
---|
59 | + def setUp(self, mockopen): |
---|
60 | + def call_open(fname, mode): |
---|
61 | + if fname == 'testdir/bucket_counter.state': |
---|
62 | + raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'") |
---|
63 | + elif fname == 'testdir/lease_checker.state': |
---|
64 | + raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'") |
---|
65 | + elif fname == 'testdir/lease_checker.history': |
---|
66 | + return StringIO() |
---|
67 | + mockopen.side_effect = call_open |
---|
68 | + |
---|
69 | + self.s = StorageServer('testdir', 'testnodeidxxxxxxxxxx') |
---|
70 | + |
---|
71 | + |
---|
72 | + @mock.patch('time.time') |
---|
73 | + @mock.patch('os.mkdir') |
---|
74 | + @mock.patch('__builtin__.open') |
---|
75 | + @mock.patch('os.listdir') |
---|
76 | + @mock.patch('os.path.isdir') |
---|
77 | + def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime): |
---|
78 | + """Handle a report of corruption.""" |
---|
79 | + |
---|
80 | + def call_listdir(dirname): |
---|
81 | + self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a') |
---|
82 | + raise OSError(2, "No such file or directory: 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a'") |
---|
83 | + |
---|
84 | + mocklistdir.side_effect = call_listdir |
---|
85 | + |
---|
86 | + class MockFile: |
---|
87 | + def __init__(self): |
---|
88 | + self.buffer = '' |
---|
89 | + self.pos = 0 |
---|
90 | + def write(self, instring): |
---|
91 | + begin = self.pos |
---|
92 | + padlen = begin - len(self.buffer) |
---|
93 | + if padlen > 0: |
---|
94 | + self.buffer += '\x00' * padlen |
---|
95 | + end = self.pos + len(instring) |
---|
96 | + self.buffer = self.buffer[:begin]+instring+self.buffer[end:] |
---|
97 | + self.pos = end |
---|
98 | + def close(self): |
---|
99 | + pass |
---|
100 | + def seek(self, pos): |
---|
101 | + self.pos = pos |
---|
102 | + def read(self, numberbytes): |
---|
103 | + return self.buffer[self.pos:self.pos+numberbytes] |
---|
104 | + def tell(self): |
---|
105 | + return self.pos |
---|
106 | + |
---|
107 | + mocktime.return_value = 0 |
---|
108 | + |
---|
109 | + sharefile = MockFile() |
---|
110 | + def call_open(fname, mode): |
---|
111 | + self.failUnlessReallyEqual(fname, 'testdir/shares/incoming/or/orsxg5dtorxxeylhmvpws3temv4a/0' ) |
---|
112 | + return sharefile |
---|
113 | + |
---|
114 | + mockopen.side_effect = call_open |
---|
115 | + # Now begin the test. |
---|
116 | + alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
117 | + print bs |
---|
118 | + bs[0].remote_write(0, 'a') |
---|
119 | + self.failUnlessReallyEqual(sharefile.buffer, share_file_data) |
---|
120 | + |
---|
121 | + |
---|
122 | + @mock.patch('os.path.exists') |
---|
123 | + @mock.patch('os.path.getsize') |
---|
124 | + @mock.patch('__builtin__.open') |
---|
125 | + @mock.patch('os.listdir') |
---|
126 | + def test_read_share(self, mocklistdir, mockopen, mockgetsize, mockexists): |
---|
127 | + """ This tests whether the code correctly finds and reads |
---|
128 | + shares written out by old (Tahoe-LAFS <= v1.8.2) |
---|
129 | + servers. There is a similar test in test_download, but that one |
---|
130 | + is from the perspective of the client and exercises a deeper |
---|
131 | + stack of code. This one is for exercising just the |
---|
132 | + StorageServer object. """ |
---|
133 | + |
---|
134 | + def call_listdir(dirname): |
---|
135 | + self.failUnlessReallyEqual(dirname,'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a') |
---|
136 | + return ['0'] |
---|
137 | + |
---|
138 | + mocklistdir.side_effect = call_listdir |
---|
139 | + |
---|
140 | + def call_open(fname, mode): |
---|
141 | + self.failUnlessReallyEqual(fname, sharefname) |
---|
142 | + self.failUnless('r' in mode, mode) |
---|
143 | + self.failUnless('b' in mode, mode) |
---|
144 | + |
---|
145 | + return StringIO(share_file_data) |
---|
146 | + mockopen.side_effect = call_open |
---|
147 | + |
---|
148 | + datalen = len(share_file_data) |
---|
149 | + def call_getsize(fname): |
---|
150 | + self.failUnlessReallyEqual(fname, sharefname) |
---|
151 | + return datalen |
---|
152 | + mockgetsize.side_effect = call_getsize |
---|
153 | + |
---|
154 | + def call_exists(fname): |
---|
155 | + self.failUnlessReallyEqual(fname, sharefname) |
---|
156 | + return True |
---|
157 | + mockexists.side_effect = call_exists |
---|
158 | + |
---|
159 | + # Now begin the test. |
---|
160 | + bs = self.s.remote_get_buckets('teststorage_index') |
---|
161 | + |
---|
162 | + self.failUnlessEqual(len(bs), 1) |
---|
163 | + b = bs[0] |
---|
164 | + self.failUnlessReallyEqual(b.remote_read(0, datalen), share_data) |
---|
165 | + # If you try to read past the end you get the as much data as is there. |
---|
166 | + self.failUnlessReallyEqual(b.remote_read(0, datalen+20), share_data) |
---|
167 | + # If you start reading past the end of the file you get the empty string. |
---|
168 | + self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '') |
---|
169 | } |
---|
170 | [a bunch of incomplete work on #999, to be unrecorded in arctic's repo |
---|
171 | zooko@zooko.com**20110406203812 |
---|
172 | Ignore-this: bece4514b60b4a972e57fa50c87c9d0 |
---|
173 | ] { |
---|
174 | move ./src/allmydata/test/test_server.py ./src/allmydata/test/test_backends.py |
---|
175 | hunk ./docs/configuration.rst 637 |
---|
176 | [storage] |
---|
177 | enabled = True |
---|
178 | readonly = True |
---|
179 | - sizelimit = 10000000000 |
---|
180 | |
---|
181 | |
---|
182 | [helper] |
---|
183 | hunk ./docs/garbage-collection.rst 16 |
---|
184 | |
---|
185 | When a file or directory in the virtual filesystem is no longer referenced, |
---|
186 | the space that its shares occupied on each storage server can be freed, |
---|
187 | -making room for other shares. Tahoe currently uses a garbage collection |
---|
188 | +making room for other shares. Tahoe uses a garbage collection |
---|
189 | ("GC") mechanism to implement this space-reclamation process. Each share has |
---|
190 | one or more "leases", which are managed by clients who want the |
---|
191 | file/directory to be retained. The storage server accepts each share for a |
---|
192 | hunk ./docs/garbage-collection.rst 34 |
---|
193 | the `<lease-tradeoffs.svg>`_ diagram to get an idea for the tradeoffs involved. |
---|
194 | If lease renewal occurs quickly and with 100% reliability, than any renewal |
---|
195 | time that is shorter than the lease duration will suffice, but a larger ratio |
---|
196 | -of duration-over-renewal-time will be more robust in the face of occasional |
---|
197 | +of lease duration to renewal time will be more robust in the face of occasional |
---|
198 | delays or failures. |
---|
199 | |
---|
200 | The current recommended values for a small Tahoe grid are to renew the leases |
---|
201 | replace ./docs/garbage-collection.rst [A-Za-z_0-9\-\.] Tahoe Tahoe-LAFS |
---|
202 | hunk ./src/allmydata/client.py 260 |
---|
203 | sharetypes.append("mutable") |
---|
204 | expiration_sharetypes = tuple(sharetypes) |
---|
205 | |
---|
206 | + if self.get_config("storage", "backend", "filesystem") == "filesystem": |
---|
207 | + xyz |
---|
208 | + xyz |
---|
209 | ss = StorageServer(storedir, self.nodeid, |
---|
210 | reserved_space=reserved, |
---|
211 | discard_storage=discard, |
---|
212 | hunk ./src/allmydata/interfaces.py 270 |
---|
213 | store that on disk. |
---|
214 | """ |
---|
215 | |
---|
216 | +class IStorageBackend(Interface): |
---|
217 | + """ |
---|
218 | + Objects of this kind live on the server side and are used by the |
---|
219 | + storage server object. |
---|
220 | + """ |
---|
221 | + def get_available_space(self, reserved_space): |
---|
222 | + """ Returns available space for share storage in bytes, or |
---|
223 | + None this information is not available or if the available |
---|
224 | + space is unlimited. |
---|
225 | + |
---|
226 | + If the backend is configured for read-only mode then this will |
---|
227 | + return 0. |
---|
228 | + |
---|
229 | + reserved_space is how many bytes to subtract from the answer, so |
---|
230 | + you can pass how many bytes you would like to leave unused on this |
---|
231 | + filesystem as reserved_space. """ |
---|
232 | + |
---|
233 | class IStorageBucketWriter(Interface): |
---|
234 | """ |
---|
235 | Objects of this kind live on the client side. |
---|
236 | hunk ./src/allmydata/interfaces.py 2472 |
---|
237 | |
---|
238 | class EmptyPathnameComponentError(Exception): |
---|
239 | """The webapi disallows empty pathname components.""" |
---|
240 | + |
---|
241 | +class IShareStore(Interface): |
---|
242 | + pass |
---|
243 | + |
---|
244 | hunk ./src/allmydata/storage/crawler.py 68 |
---|
245 | cpu_slice = 1.0 # use up to 1.0 seconds before yielding |
---|
246 | minimum_cycle_time = 300 # don't run a cycle faster than this |
---|
247 | |
---|
248 | - def __init__(self, server, statefile, allowed_cpu_percentage=None): |
---|
249 | + def __init__(self, backend, statefile, allowed_cpu_percentage=None): |
---|
250 | service.MultiService.__init__(self) |
---|
251 | if allowed_cpu_percentage is not None: |
---|
252 | self.allowed_cpu_percentage = allowed_cpu_percentage |
---|
253 | hunk ./src/allmydata/storage/crawler.py 72 |
---|
254 | - self.server = server |
---|
255 | - self.sharedir = server.sharedir |
---|
256 | - self.statefile = statefile |
---|
257 | + self.backend = backend |
---|
258 | self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2] |
---|
259 | for i in range(2**10)] |
---|
260 | self.prefixes.sort() |
---|
261 | hunk ./src/allmydata/storage/crawler.py 446 |
---|
262 | |
---|
263 | minimum_cycle_time = 60*60 # we don't need this more than once an hour |
---|
264 | |
---|
265 | - def __init__(self, server, statefile, num_sample_prefixes=1): |
---|
266 | - ShareCrawler.__init__(self, server, statefile) |
---|
267 | + def __init__(self, statefile, num_sample_prefixes=1): |
---|
268 | + ShareCrawler.__init__(self, statefile) |
---|
269 | self.num_sample_prefixes = num_sample_prefixes |
---|
270 | |
---|
271 | def add_initial_state(self): |
---|
272 | hunk ./src/allmydata/storage/expirer.py 15 |
---|
273 | removed. |
---|
274 | |
---|
275 | I collect statistics on the leases and make these available to a web |
---|
276 | - status page, including:: |
---|
277 | + status page, including: |
---|
278 | |
---|
279 | Space recovered during this cycle-so-far: |
---|
280 | actual (only if expiration_enabled=True): |
---|
281 | hunk ./src/allmydata/storage/expirer.py 51 |
---|
282 | slow_start = 360 # wait 6 minutes after startup |
---|
283 | minimum_cycle_time = 12*60*60 # not more than twice per day |
---|
284 | |
---|
285 | - def __init__(self, server, statefile, historyfile, |
---|
286 | + def __init__(self, statefile, historyfile, |
---|
287 | expiration_enabled, mode, |
---|
288 | override_lease_duration, # used if expiration_mode=="age" |
---|
289 | cutoff_date, # used if expiration_mode=="cutoff-date" |
---|
290 | hunk ./src/allmydata/storage/expirer.py 71 |
---|
291 | else: |
---|
292 | raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % mode) |
---|
293 | self.sharetypes_to_expire = sharetypes |
---|
294 | - ShareCrawler.__init__(self, server, statefile) |
---|
295 | + ShareCrawler.__init__(self, statefile) |
---|
296 | |
---|
297 | def add_initial_state(self): |
---|
298 | # we fill ["cycle-to-date"] here (even though they will be reset in |
---|
299 | hunk ./src/allmydata/storage/immutable.py 44 |
---|
300 | sharetype = "immutable" |
---|
301 | |
---|
302 | def __init__(self, filename, max_size=None, create=False): |
---|
303 | - """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """ |
---|
304 | + """ If max_size is not None then I won't allow more than |
---|
305 | + max_size to be written to me. If create=True then max_size |
---|
306 | + must not be None. """ |
---|
307 | precondition((max_size is not None) or (not create), max_size, create) |
---|
308 | self.home = filename |
---|
309 | self._max_size = max_size |
---|
310 | hunk ./src/allmydata/storage/immutable.py 87 |
---|
311 | |
---|
312 | def read_share_data(self, offset, length): |
---|
313 | precondition(offset >= 0) |
---|
314 | - # reads beyond the end of the data are truncated. Reads that start |
---|
315 | - # beyond the end of the data return an empty string. I wonder why |
---|
316 | - # Python doesn't do the following computation for me? |
---|
317 | + # Reads beyond the end of the data are truncated. Reads that start |
---|
318 | + # beyond the end of the data return an empty string. |
---|
319 | seekpos = self._data_offset+offset |
---|
320 | fsize = os.path.getsize(self.home) |
---|
321 | actuallength = max(0, min(length, fsize-seekpos)) |
---|
322 | hunk ./src/allmydata/storage/server.py 7 |
---|
323 | from twisted.application import service |
---|
324 | |
---|
325 | from zope.interface import implements |
---|
326 | -from allmydata.interfaces import RIStorageServer, IStatsProducer |
---|
327 | +from allmydata.interfaces import RIStorageServer, IStatsProducer, IShareStore |
---|
328 | from allmydata.util import fileutil, idlib, log, time_format |
---|
329 | import allmydata # for __full_version__ |
---|
330 | |
---|
331 | hunk ./src/allmydata/storage/server.py 20 |
---|
332 | from allmydata.storage.crawler import BucketCountingCrawler |
---|
333 | from allmydata.storage.expirer import LeaseCheckingCrawler |
---|
334 | |
---|
335 | +from zope.interface import implements |
---|
336 | + |
---|
337 | +# A Backend is a MultiService so that its crawlers (if it has any) can |
---|
338 | +# be started and stopped. |
---|
339 | +class Backend(service.MultiService): |
---|
340 | + implements(RIStorageServer, IStatsProducer) |
---|
341 | + def __init__(self): |
---|
342 | + service.MultiService.__init__(self) |
---|
343 | + |
---|
344 | +class NullBackend(Backend): |
---|
345 | + def __init__(self): |
---|
346 | + Backend.__init__(self) |
---|
347 | + |
---|
348 | +class FSBackend(Backend): |
---|
349 | + def __init__(self, storedir, readonly=False, reserved_space=0): |
---|
350 | + Backend.__init__(self) |
---|
351 | + |
---|
352 | + self._setup_storage(storedir, readonly, reserved_space) |
---|
353 | + self._setup_corruption_advisory() |
---|
354 | + self._setup_bucket_counter() |
---|
355 | + self._setup_lease_checkerf() |
---|
356 | + |
---|
357 | + def _setup_storage(self, storedir, readonly, reserved_space): |
---|
358 | + self.storedir = storedir |
---|
359 | + self.readonly = readonly |
---|
360 | + self.reserved_space = int(reserved_space) |
---|
361 | + if self.reserved_space: |
---|
362 | + if self.get_available_space() is None: |
---|
363 | + log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", |
---|
364 | + umid="0wZ27w", level=log.UNUSUAL) |
---|
365 | + |
---|
366 | + self.sharedir = os.path.join(self.storedir, "shares") |
---|
367 | + fileutil.make_dirs(self.sharedir) |
---|
368 | + self.incomingdir = os.path.join(self.sharedir, 'incoming') |
---|
369 | + self._clean_incomplete() |
---|
370 | + |
---|
371 | + def _clean_incomplete(self): |
---|
372 | + fileutil.rm_dir(self.incomingdir) |
---|
373 | + fileutil.make_dirs(self.incomingdir) |
---|
374 | + |
---|
375 | + def _setup_corruption_advisory(self): |
---|
376 | + # we don't actually create the corruption-advisory dir until necessary |
---|
377 | + self.corruption_advisory_dir = os.path.join(self.storedir, |
---|
378 | + "corruption-advisories") |
---|
379 | + |
---|
380 | + def _setup_bucket_counter(self): |
---|
381 | + statefile = os.path.join(self.storedir, "bucket_counter.state") |
---|
382 | + self.bucket_counter = BucketCountingCrawler(statefile) |
---|
383 | + self.bucket_counter.setServiceParent(self) |
---|
384 | + |
---|
385 | + def _setup_lease_checkerf(self): |
---|
386 | + statefile = os.path.join(self.storedir, "lease_checker.state") |
---|
387 | + historyfile = os.path.join(self.storedir, "lease_checker.history") |
---|
388 | + self.lease_checker = LeaseCheckingCrawler(statefile, historyfile, |
---|
389 | + expiration_enabled, expiration_mode, |
---|
390 | + expiration_override_lease_duration, |
---|
391 | + expiration_cutoff_date, |
---|
392 | + expiration_sharetypes) |
---|
393 | + self.lease_checker.setServiceParent(self) |
---|
394 | + |
---|
395 | + def get_available_space(self): |
---|
396 | + if self.readonly: |
---|
397 | + return 0 |
---|
398 | + return fileutil.get_available_space(self.storedir, self.reserved_space) |
---|
399 | + |
---|
400 | # storage/ |
---|
401 | # storage/shares/incoming |
---|
402 | # incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will |
---|
403 | hunk ./src/allmydata/storage/server.py 105 |
---|
404 | name = 'storage' |
---|
405 | LeaseCheckerClass = LeaseCheckingCrawler |
---|
406 | |
---|
407 | - def __init__(self, storedir, nodeid, reserved_space=0, |
---|
408 | - discard_storage=False, readonly_storage=False, |
---|
409 | + def __init__(self, nodeid, backend, reserved_space=0, |
---|
410 | + readonly_storage=False, |
---|
411 | stats_provider=None, |
---|
412 | expiration_enabled=False, |
---|
413 | expiration_mode="age", |
---|
414 | hunk ./src/allmydata/storage/server.py 117 |
---|
415 | assert isinstance(nodeid, str) |
---|
416 | assert len(nodeid) == 20 |
---|
417 | self.my_nodeid = nodeid |
---|
418 | - self.storedir = storedir |
---|
419 | - sharedir = os.path.join(storedir, "shares") |
---|
420 | - fileutil.make_dirs(sharedir) |
---|
421 | - self.sharedir = sharedir |
---|
422 | - # we don't actually create the corruption-advisory dir until necessary |
---|
423 | - self.corruption_advisory_dir = os.path.join(storedir, |
---|
424 | - "corruption-advisories") |
---|
425 | - self.reserved_space = int(reserved_space) |
---|
426 | - self.no_storage = discard_storage |
---|
427 | - self.readonly_storage = readonly_storage |
---|
428 | self.stats_provider = stats_provider |
---|
429 | if self.stats_provider: |
---|
430 | self.stats_provider.register_producer(self) |
---|
431 | hunk ./src/allmydata/storage/server.py 120 |
---|
432 | - self.incomingdir = os.path.join(sharedir, 'incoming') |
---|
433 | - self._clean_incomplete() |
---|
434 | - fileutil.make_dirs(self.incomingdir) |
---|
435 | self._active_writers = weakref.WeakKeyDictionary() |
---|
436 | hunk ./src/allmydata/storage/server.py 121 |
---|
437 | + self.backend = backend |
---|
438 | + self.backend.setServiceParent(self) |
---|
439 | log.msg("StorageServer created", facility="tahoe.storage") |
---|
440 | |
---|
441 | hunk ./src/allmydata/storage/server.py 125 |
---|
442 | - if reserved_space: |
---|
443 | - if self.get_available_space() is None: |
---|
444 | - log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", |
---|
445 | - umin="0wZ27w", level=log.UNUSUAL) |
---|
446 | - |
---|
447 | self.latencies = {"allocate": [], # immutable |
---|
448 | "write": [], |
---|
449 | "close": [], |
---|
450 | hunk ./src/allmydata/storage/server.py 136 |
---|
451 | "renew": [], |
---|
452 | "cancel": [], |
---|
453 | } |
---|
454 | - self.add_bucket_counter() |
---|
455 | - |
---|
456 | - statefile = os.path.join(self.storedir, "lease_checker.state") |
---|
457 | - historyfile = os.path.join(self.storedir, "lease_checker.history") |
---|
458 | - klass = self.LeaseCheckerClass |
---|
459 | - self.lease_checker = klass(self, statefile, historyfile, |
---|
460 | - expiration_enabled, expiration_mode, |
---|
461 | - expiration_override_lease_duration, |
---|
462 | - expiration_cutoff_date, |
---|
463 | - expiration_sharetypes) |
---|
464 | - self.lease_checker.setServiceParent(self) |
---|
465 | |
---|
466 | def __repr__(self): |
---|
467 | return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),) |
---|
468 | hunk ./src/allmydata/storage/server.py 140 |
---|
469 | |
---|
470 | - def add_bucket_counter(self): |
---|
471 | - statefile = os.path.join(self.storedir, "bucket_counter.state") |
---|
472 | - self.bucket_counter = BucketCountingCrawler(self, statefile) |
---|
473 | - self.bucket_counter.setServiceParent(self) |
---|
474 | - |
---|
475 | def count(self, name, delta=1): |
---|
476 | if self.stats_provider: |
---|
477 | self.stats_provider.count("storage_server." + name, delta) |
---|
478 | hunk ./src/allmydata/storage/server.py 183 |
---|
479 | kwargs["facility"] = "tahoe.storage" |
---|
480 | return log.msg(*args, **kwargs) |
---|
481 | |
---|
482 | - def _clean_incomplete(self): |
---|
483 | - fileutil.rm_dir(self.incomingdir) |
---|
484 | - |
---|
485 | def get_stats(self): |
---|
486 | # remember: RIStatsProvider requires that our return dict |
---|
487 | # contains numeric values. |
---|
488 | hunk ./src/allmydata/storage/server.py 219 |
---|
489 | stats['storage_server.total_bucket_count'] = bucket_count |
---|
490 | return stats |
---|
491 | |
---|
492 | - def get_available_space(self): |
---|
493 | - """Returns available space for share storage in bytes, or None if no |
---|
494 | - API to get this information is available.""" |
---|
495 | - |
---|
496 | - if self.readonly_storage: |
---|
497 | - return 0 |
---|
498 | - return fileutil.get_available_space(self.storedir, self.reserved_space) |
---|
499 | - |
---|
500 | def allocated_size(self): |
---|
501 | space = 0 |
---|
502 | for bw in self._active_writers: |
---|
503 | hunk ./src/allmydata/storage/server.py 226 |
---|
504 | return space |
---|
505 | |
---|
506 | def remote_get_version(self): |
---|
507 | - remaining_space = self.get_available_space() |
---|
508 | + remaining_space = self.backend.get_available_space() |
---|
509 | if remaining_space is None: |
---|
510 | # We're on a platform that has no API to get disk stats. |
---|
511 | remaining_space = 2**64 |
---|
512 | hunk ./src/allmydata/storage/server.py 267 |
---|
513 | |
---|
514 | max_space_per_bucket = allocated_size |
---|
515 | |
---|
516 | - remaining_space = self.get_available_space() |
---|
517 | + remaining_space = self.backend.get_available_space() |
---|
518 | limited = remaining_space is not None |
---|
519 | if limited: |
---|
520 | # this is a bit conservative, since some of this allocated_size() |
---|
521 | hunk ./src/allmydata/test/common_util.py 20 |
---|
522 | |
---|
523 | def flip_one_bit(s, offset=0, size=None): |
---|
524 | """ flip one random bit of the string s, in a byte greater than or equal to offset and less |
---|
525 | - than offset+size. """ |
---|
526 | + than offset+size. Return the new string. """ |
---|
527 | if size is None: |
---|
528 | size=len(s)-offset |
---|
529 | i = randrange(offset, offset+size) |
---|
530 | hunk ./src/allmydata/test/test_backends.py 10 |
---|
531 | import mock |
---|
532 | |
---|
533 | # This is the code that we're going to be testing. |
---|
534 | -from allmydata.storage.server import StorageServer |
---|
535 | +from allmydata.storage.server import StorageServer, FSBackend, NullBackend |
---|
536 | |
---|
537 | # The following share file contents was generated with |
---|
538 | # storage.immutable.ShareFile from Tahoe-LAFS v1.8.2 |
---|
539 | hunk ./src/allmydata/test/test_backends.py 21 |
---|
540 | sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0' |
---|
541 | |
---|
542 | class TestServerConstruction(unittest.TestCase, ReallyEqualMixin): |
---|
543 | + @mock.patch('time.time') |
---|
544 | + @mock.patch('os.mkdir') |
---|
545 | + @mock.patch('__builtin__.open') |
---|
546 | + @mock.patch('os.listdir') |
---|
547 | + @mock.patch('os.path.isdir') |
---|
548 | + def test_create_server_null_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime): |
---|
549 | + """ This tests whether a server instance can be constructed |
---|
550 | + with a null backend. The server instance fails the test if it |
---|
551 | + tries to read or write to the file system. """ |
---|
552 | + |
---|
553 | + # Now begin the test. |
---|
554 | + s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend()) |
---|
555 | + |
---|
556 | + self.failIf(mockisdir.called) |
---|
557 | + self.failIf(mocklistdir.called) |
---|
558 | + self.failIf(mockopen.called) |
---|
559 | + self.failIf(mockmkdir.called) |
---|
560 | + self.failIf(mocktime.called) |
---|
561 | + |
---|
562 | + # You passed! |
---|
563 | + |
---|
564 | + @mock.patch('time.time') |
---|
565 | + @mock.patch('os.mkdir') |
---|
566 | @mock.patch('__builtin__.open') |
---|
567 | hunk ./src/allmydata/test/test_backends.py 45 |
---|
568 | - def test_create_server(self, mockopen): |
---|
569 | - """ This tests whether a server instance can be constructed. """ |
---|
570 | + @mock.patch('os.listdir') |
---|
571 | + @mock.patch('os.path.isdir') |
---|
572 | + def test_create_server_fs_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime): |
---|
573 | + """ This tests whether a server instance can be constructed |
---|
574 | + with a filesystem backend. To pass the test, it has to use the |
---|
575 | + filesystem in only the prescribed ways. """ |
---|
576 | |
---|
577 | def call_open(fname, mode): |
---|
578 | if fname == 'testdir/bucket_counter.state': |
---|
579 | hunk ./src/allmydata/test/test_backends.py 59 |
---|
580 | raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'") |
---|
581 | elif fname == 'testdir/lease_checker.history': |
---|
582 | return StringIO() |
---|
583 | + else: |
---|
584 | + self.fail("Server with FS backend tried to open '%s' in mode '%s'" % (fname, mode)) |
---|
585 | mockopen.side_effect = call_open |
---|
586 | |
---|
587 | # Now begin the test. |
---|
588 | hunk ./src/allmydata/test/test_backends.py 64 |
---|
589 | - s = StorageServer('testdir', 'testnodeidxxxxxxxxxx') |
---|
590 | + s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir')) |
---|
591 | + |
---|
592 | + self.failIf(mockisdir.called) |
---|
593 | + self.failIf(mocklistdir.called) |
---|
594 | + self.failIf(mockopen.called) |
---|
595 | + self.failIf(mockmkdir.called) |
---|
596 | + self.failIf(mocktime.called) |
---|
597 | |
---|
598 | # You passed! |
---|
599 | |
---|
600 | hunk ./src/allmydata/test/test_backends.py 74 |
---|
601 | -class TestServer(unittest.TestCase, ReallyEqualMixin): |
---|
602 | +class TestServerFSBackend(unittest.TestCase, ReallyEqualMixin): |
---|
603 | @mock.patch('__builtin__.open') |
---|
604 | def setUp(self, mockopen): |
---|
605 | def call_open(fname, mode): |
---|
606 | hunk ./src/allmydata/test/test_backends.py 86 |
---|
607 | return StringIO() |
---|
608 | mockopen.side_effect = call_open |
---|
609 | |
---|
610 | - self.s = StorageServer('testdir', 'testnodeidxxxxxxxxxx') |
---|
611 | - |
---|
612 | + self.s = StorageServer('testnodeidxxxxxxxxxx', backend=FSBackend('teststoredir')) |
---|
613 | |
---|
614 | @mock.patch('time.time') |
---|
615 | @mock.patch('os.mkdir') |
---|
616 | hunk ./src/allmydata/test/test_backends.py 94 |
---|
617 | @mock.patch('os.listdir') |
---|
618 | @mock.patch('os.path.isdir') |
---|
619 | def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime): |
---|
620 | - """Handle a report of corruption.""" |
---|
621 | + """ Write a new share. """ |
---|
622 | |
---|
623 | def call_listdir(dirname): |
---|
624 | self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a') |
---|
625 | hunk ./src/allmydata/test/test_backends.py 137 |
---|
626 | bs[0].remote_write(0, 'a') |
---|
627 | self.failUnlessReallyEqual(sharefile.buffer, share_file_data) |
---|
628 | |
---|
629 | - |
---|
630 | @mock.patch('os.path.exists') |
---|
631 | @mock.patch('os.path.getsize') |
---|
632 | @mock.patch('__builtin__.open') |
---|
633 | } |
---|
634 | |
---|
635 | Context: |
---|
636 | |
---|
637 | [test: increase timeout on a network test because Francois's ARM machine hit that timeout |
---|
638 | zooko@zooko.com**20110317165909 |
---|
639 | Ignore-this: 380c345cdcbd196268ca5b65664ac85b |
---|
640 | I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish. |
---|
641 | ] |
---|
642 | [docs/configuration.rst: add a "Frontend Configuration" section |
---|
643 | Brian Warner <warner@lothar.com>**20110222014323 |
---|
644 | Ignore-this: 657018aa501fe4f0efef9851628444ca |
---|
645 | |
---|
646 | this points to docs/frontends/*.rst, which were previously underlinked |
---|
647 | ] |
---|
648 | [web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366 |
---|
649 | "Brian Warner <warner@lothar.com>"**20110221061544 |
---|
650 | Ignore-this: 799d4de19933f2309b3c0c19a63bb888 |
---|
651 | ] |
---|
652 | [Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable. |
---|
653 | david-sarah@jacaranda.org**20110221015817 |
---|
654 | Ignore-this: 51d181698f8c20d3aca58b057e9c475a |
---|
655 | ] |
---|
656 | [allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355. |
---|
657 | david-sarah@jacaranda.org**20110221020125 |
---|
658 | Ignore-this: b0744ed58f161bf188e037bad077fc48 |
---|
659 | ] |
---|
660 | [Refactor StorageFarmBroker handling of servers |
---|
661 | Brian Warner <warner@lothar.com>**20110221015804 |
---|
662 | Ignore-this: 842144ed92f5717699b8f580eab32a51 |
---|
663 | |
---|
664 | Pass around IServer instance instead of (peerid, rref) tuple. Replace |
---|
665 | "descriptor" with "server". Other replacements: |
---|
666 | |
---|
667 | get_all_servers -> get_connected_servers/get_known_servers |
---|
668 | get_servers_for_index -> get_servers_for_psi (now returns IServers) |
---|
669 | |
---|
670 | This change still needs to be pushed further down: lots of code is now |
---|
671 | getting the IServer and then distributing (peerid, rref) internally. |
---|
672 | Instead, it ought to distribute the IServer internally and delay |
---|
673 | extracting a serverid or rref until the last moment. |
---|
674 | |
---|
675 | no_network.py was updated to retain parallelism. |
---|
676 | ] |
---|
677 | [TAG allmydata-tahoe-1.8.2 |
---|
678 | warner@lothar.com**20110131020101] |
---|
679 | Patch bundle hash: |
---|
680 | de6d5cdf0fe735110db2308f55d4e541dd40fd2c |
---|