1 | """ |
---|
2 | This module has been ported to Python 3. |
---|
3 | """ |
---|
4 | |
---|
5 | import random |
---|
6 | |
---|
7 | from twisted.trial import unittest |
---|
8 | from twisted.internet import defer |
---|
9 | from foolscap.api import eventually |
---|
10 | |
---|
11 | from allmydata.test import common |
---|
12 | from allmydata.test.no_network import GridTestMixin |
---|
13 | from allmydata.test.common import TEST_DATA |
---|
14 | from allmydata import uri |
---|
15 | from allmydata.util import log |
---|
16 | from allmydata.util.consumer import download_to_data |
---|
17 | |
---|
18 | from allmydata.interfaces import NotEnoughSharesError |
---|
19 | from allmydata.immutable.upload import Data |
---|
20 | from allmydata.immutable.downloader import finder |
---|
21 | from allmydata.immutable.literal import LiteralFileNode |
---|
22 | |
---|
23 | from .no_network import ( |
---|
24 | NoNetworkServer, |
---|
25 | ) |
---|
26 | |
---|
27 | class MockShareHashTree(object): |
---|
28 | def needed_hashes(self): |
---|
29 | return False |
---|
30 | |
---|
31 | class MockNode(object): |
---|
32 | def __init__(self, check_reneging, check_fetch_failed): |
---|
33 | self.got = 0 |
---|
34 | self.finished_d = defer.Deferred() |
---|
35 | self.segment_size = 78 |
---|
36 | self.guessed_segment_size = 78 |
---|
37 | self._no_more_shares = False |
---|
38 | self.check_reneging = check_reneging |
---|
39 | self.check_fetch_failed = check_fetch_failed |
---|
40 | self._si_prefix='aa' |
---|
41 | self.have_UEB = True |
---|
42 | self.share_hash_tree = MockShareHashTree() |
---|
43 | self.on_want_more_shares = None |
---|
44 | |
---|
45 | def when_finished(self): |
---|
46 | return self.finished_d |
---|
47 | def get_num_segments(self): |
---|
48 | return (5, True) |
---|
49 | def _calculate_sizes(self, guessed_segment_size): |
---|
50 | return {'block_size': 4, 'num_segments': 5} |
---|
51 | def no_more_shares(self): |
---|
52 | self._no_more_shares = True |
---|
53 | def got_shares(self, shares): |
---|
54 | if self.check_reneging: |
---|
55 | if self._no_more_shares: |
---|
56 | self.finished_d.errback(unittest.FailTest("The node was told by the share finder that it is destined to remain hungry, then was given another share.")) |
---|
57 | return |
---|
58 | self.got += len(shares) |
---|
59 | log.msg("yyy 3 %s.got_shares(%s) got: %s" % (self, shares, self.got)) |
---|
60 | if self.got == 3: |
---|
61 | self.finished_d.callback(True) |
---|
62 | def get_desired_ciphertext_hashes(self, *args, **kwargs): |
---|
63 | return iter([]) |
---|
64 | def fetch_failed(self, *args, **kwargs): |
---|
65 | if self.check_fetch_failed: |
---|
66 | if self.finished_d: |
---|
67 | self.finished_d.errback(unittest.FailTest("The node was told by the segment fetcher that the download failed.")) |
---|
68 | self.finished_d = None |
---|
69 | def want_more_shares(self): |
---|
70 | if self.on_want_more_shares: |
---|
71 | self.on_want_more_shares() |
---|
72 | def process_blocks(self, *args, **kwargs): |
---|
73 | if self.finished_d: |
---|
74 | self.finished_d.callback(None) |
---|
75 | |
---|
76 | class TestShareFinder(unittest.TestCase): |
---|
77 | def test_no_reneging_on_no_more_shares_ever(self): |
---|
78 | # ticket #1191 |
---|
79 | |
---|
80 | # Suppose that K=3 and you send two DYHB requests, the first |
---|
81 | # response offers two shares, and then the last offers one |
---|
82 | # share. If you tell your share consumer "no more shares, |
---|
83 | # ever", and then immediately tell them "oh, and here's |
---|
84 | # another share", then you lose. |
---|
85 | |
---|
86 | rcap = uri.CHKFileURI(b'a'*32, b'a'*32, 3, 99, 100) |
---|
87 | vcap = rcap.get_verify_cap() |
---|
88 | |
---|
89 | class MockBuckets(object): |
---|
90 | pass |
---|
91 | |
---|
92 | class MockServer(object): |
---|
93 | def __init__(self, buckets): |
---|
94 | self.version = { |
---|
95 | b'http://allmydata.org/tahoe/protocols/storage/v1': { |
---|
96 | b"tolerates-immutable-read-overrun": True |
---|
97 | } |
---|
98 | } |
---|
99 | self.buckets = buckets |
---|
100 | self.d = defer.Deferred() |
---|
101 | self.s = None |
---|
102 | def callRemote(self, methname, *args, **kwargs): |
---|
103 | d = defer.Deferred() |
---|
104 | |
---|
105 | # Even after the 3rd answer we're still hungry because |
---|
106 | # we're interested in finding a share on a 3rd server |
---|
107 | # so we don't have to download more than one share |
---|
108 | # from the first server. This is actually necessary to |
---|
109 | # trigger the bug. |
---|
110 | def _give_buckets_and_hunger_again(): |
---|
111 | d.callback(self.buckets) |
---|
112 | self.s.hungry() |
---|
113 | eventually(_give_buckets_and_hunger_again) |
---|
114 | return d |
---|
115 | |
---|
116 | class MockStorageBroker(object): |
---|
117 | def __init__(self, servers): |
---|
118 | self.servers = servers |
---|
119 | def get_servers_for_psi(self, si): |
---|
120 | return self.servers |
---|
121 | |
---|
122 | class MockDownloadStatus(object): |
---|
123 | def add_dyhb_request(self, server, when): |
---|
124 | return MockDYHBEvent() |
---|
125 | |
---|
126 | class MockDYHBEvent(object): |
---|
127 | def finished(self, shnums, when): |
---|
128 | pass |
---|
129 | |
---|
130 | mockserver1 = MockServer({1: MockBuckets(), 2: MockBuckets()}) |
---|
131 | mockserver2 = MockServer({}) |
---|
132 | mockserver3 = MockServer({3: MockBuckets()}) |
---|
133 | servers = [ NoNetworkServer(b"ms1", mockserver1), |
---|
134 | NoNetworkServer(b"ms2", mockserver2), |
---|
135 | NoNetworkServer(b"ms3", mockserver3), ] |
---|
136 | mockstoragebroker = MockStorageBroker(servers) |
---|
137 | mockdownloadstatus = MockDownloadStatus() |
---|
138 | mocknode = MockNode(check_reneging=True, check_fetch_failed=True) |
---|
139 | |
---|
140 | s = finder.ShareFinder(mockstoragebroker, vcap, mocknode, mockdownloadstatus) |
---|
141 | |
---|
142 | mockserver1.s = s |
---|
143 | mockserver2.s = s |
---|
144 | mockserver3.s = s |
---|
145 | |
---|
146 | s.hungry() |
---|
147 | |
---|
148 | return mocknode.when_finished() |
---|
149 | |
---|
150 | |
---|
151 | class Test(GridTestMixin, unittest.TestCase, common.ShouldFailMixin): |
---|
152 | def startup(self, basedir): |
---|
153 | self.basedir = basedir |
---|
154 | self.set_up_grid(num_clients=2, num_servers=5) |
---|
155 | c1 = self.g.clients[1] |
---|
156 | # We need multiple segments to test crypttext hash trees that are |
---|
157 | # non-trivial (i.e. they have more than just one hash in them). |
---|
158 | c1.encoding_params['max_segment_size'] = 12 |
---|
159 | # Tests that need to test servers of happiness using this should |
---|
160 | # set their own value for happy -- the default (7) breaks stuff. |
---|
161 | c1.encoding_params['happy'] = 1 |
---|
162 | d = c1.upload(Data(TEST_DATA, convergence=b"")) |
---|
163 | def _after_upload(ur): |
---|
164 | self.uri = ur.get_uri() |
---|
165 | self.filenode = self.g.clients[0].create_node_from_uri(ur.get_uri()) |
---|
166 | return self.uri |
---|
167 | d.addCallback(_after_upload) |
---|
168 | return d |
---|
169 | |
---|
170 | def _stash_shares(self, shares): |
---|
171 | self.shares = shares |
---|
172 | |
---|
173 | def _download_and_check_plaintext(self, ign=None): |
---|
174 | num_reads = self._count_reads() |
---|
175 | d = download_to_data(self.filenode) |
---|
176 | def _after_download(result): |
---|
177 | self.failUnlessEqual(result, TEST_DATA) |
---|
178 | return self._count_reads() - num_reads |
---|
179 | d.addCallback(_after_download) |
---|
180 | return d |
---|
181 | |
---|
182 | def _shuffled(self, num_shnums): |
---|
183 | shnums = list(range(10)) |
---|
184 | random.shuffle(shnums) |
---|
185 | return shnums[:num_shnums] |
---|
186 | |
---|
187 | def _count_reads(self): |
---|
188 | return sum([s.stats_provider.get_stats() ['counters'].get('storage_server.read', 0) |
---|
189 | for s in self.g.servers_by_number.values()]) |
---|
190 | |
---|
191 | |
---|
192 | def _count_allocates(self): |
---|
193 | return sum([s.stats_provider.get_stats() ['counters'].get('storage_server.allocate', 0) |
---|
194 | for s in self.g.servers_by_number.values()]) |
---|
195 | |
---|
196 | def _count_writes(self): |
---|
197 | return sum([s.stats_provider.get_stats() ['counters'].get('storage_server.write', 0) |
---|
198 | for s in self.g.servers_by_number.values()]) |
---|
199 | |
---|
200 | def test_test_code(self): |
---|
201 | # The following process of stashing the shares, running |
---|
202 | # replace_shares, and asserting that the new set of shares equals the |
---|
203 | # old is more to test this test code than to test the Tahoe code... |
---|
204 | d = self.startup("immutable/Test/code") |
---|
205 | d.addCallback(self.copy_shares) |
---|
206 | d.addCallback(self._stash_shares) |
---|
207 | d.addCallback(self._download_and_check_plaintext) |
---|
208 | |
---|
209 | # The following process of deleting 8 of the shares and asserting |
---|
210 | # that you can't download it is more to test this test code than to |
---|
211 | # test the Tahoe code... |
---|
212 | def _then_delete_8(ign): |
---|
213 | self.restore_all_shares(self.shares) |
---|
214 | self.delete_shares_numbered(self.uri, range(8)) |
---|
215 | d.addCallback(_then_delete_8) |
---|
216 | d.addCallback(lambda ign: |
---|
217 | self.shouldFail(NotEnoughSharesError, "download-2", |
---|
218 | "ran out of shares", |
---|
219 | download_to_data, self.filenode)) |
---|
220 | return d |
---|
221 | |
---|
222 | def test_download(self): |
---|
223 | """ Basic download. (This functionality is more or less already |
---|
224 | tested by test code in other modules, but this module is also going |
---|
225 | to test some more specific things about immutable download.) |
---|
226 | """ |
---|
227 | d = self.startup("immutable/Test/download") |
---|
228 | d.addCallback(self._download_and_check_plaintext) |
---|
229 | def _after_download(ign): |
---|
230 | num_reads = self._count_reads() |
---|
231 | #print(num_reads) |
---|
232 | self.failIf(num_reads > 41, num_reads) |
---|
233 | d.addCallback(_after_download) |
---|
234 | return d |
---|
235 | |
---|
236 | def test_download_from_only_3_remaining_shares(self): |
---|
237 | """ Test download after 7 random shares (of the 10) have been |
---|
238 | removed.""" |
---|
239 | d = self.startup("immutable/Test/download_from_only_3_remaining_shares") |
---|
240 | d.addCallback(lambda ign: |
---|
241 | self.delete_shares_numbered(self.uri, range(7))) |
---|
242 | d.addCallback(self._download_and_check_plaintext) |
---|
243 | def _after_download(num_reads): |
---|
244 | #print(num_reads) |
---|
245 | self.failIf(num_reads > 41, num_reads) |
---|
246 | d.addCallback(_after_download) |
---|
247 | return d |
---|
248 | |
---|
249 | def test_download_from_only_3_shares_with_good_crypttext_hash(self): |
---|
250 | """ Test download after 7 random shares (of the 10) have had their |
---|
251 | crypttext hash tree corrupted.""" |
---|
252 | d = self.startup("download_from_only_3_shares_with_good_crypttext_hash") |
---|
253 | def _corrupt_7(ign): |
---|
254 | c = common._corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes |
---|
255 | self.corrupt_shares_numbered(self.uri, self._shuffled(7), c) |
---|
256 | d.addCallback(_corrupt_7) |
---|
257 | d.addCallback(self._download_and_check_plaintext) |
---|
258 | return d |
---|
259 | |
---|
260 | def test_download_abort_if_too_many_missing_shares(self): |
---|
261 | """ Test that download gives up quickly when it realizes there aren't |
---|
262 | enough shares out there.""" |
---|
263 | d = self.startup("download_abort_if_too_many_missing_shares") |
---|
264 | d.addCallback(lambda ign: |
---|
265 | self.delete_shares_numbered(self.uri, range(8))) |
---|
266 | d.addCallback(lambda ign: |
---|
267 | self.shouldFail(NotEnoughSharesError, "delete 8", |
---|
268 | "Last failure: None", |
---|
269 | download_to_data, self.filenode)) |
---|
270 | # the new downloader pipelines a bunch of read requests in parallel, |
---|
271 | # so don't bother asserting anything about the number of reads |
---|
272 | return d |
---|
273 | |
---|
274 | def test_download_abort_if_too_many_corrupted_shares(self): |
---|
275 | """Test that download gives up quickly when it realizes there aren't |
---|
276 | enough uncorrupted shares out there. It should be able to tell |
---|
277 | because the corruption occurs in the sharedata version number, which |
---|
278 | it checks first.""" |
---|
279 | d = self.startup("download_abort_if_too_many_corrupted_shares") |
---|
280 | def _corrupt_8(ign): |
---|
281 | c = common._corrupt_sharedata_version_number |
---|
282 | self.corrupt_shares_numbered(self.uri, self._shuffled(8), c) |
---|
283 | d.addCallback(_corrupt_8) |
---|
284 | def _try_download(ign): |
---|
285 | start_reads = self._count_reads() |
---|
286 | d2 = self.shouldFail(NotEnoughSharesError, "corrupt 8", |
---|
287 | "LayoutInvalid", |
---|
288 | download_to_data, self.filenode) |
---|
289 | def _check_numreads(ign): |
---|
290 | num_reads = self._count_reads() - start_reads |
---|
291 | #print(num_reads) |
---|
292 | |
---|
293 | # To pass this test, you are required to give up before |
---|
294 | # reading all of the share data. Actually, we could give up |
---|
295 | # sooner than 45 reads, but currently our download code does |
---|
296 | # 45 reads. This test then serves as a "performance |
---|
297 | # regression detector" -- if you change download code so that |
---|
298 | # it takes *more* reads, then this test will fail. |
---|
299 | self.failIf(num_reads > 45, num_reads) |
---|
300 | d2.addCallback(_check_numreads) |
---|
301 | return d2 |
---|
302 | d.addCallback(_try_download) |
---|
303 | return d |
---|
304 | |
---|
305 | def test_download_to_data(self): |
---|
306 | d = self.startup("download_to_data") |
---|
307 | d.addCallback(lambda ign: self.filenode.download_to_data()) |
---|
308 | d.addCallback(lambda data: |
---|
309 | self.failUnlessEqual(data, common.TEST_DATA)) |
---|
310 | return d |
---|
311 | |
---|
312 | |
---|
313 | def test_download_best_version(self): |
---|
314 | d = self.startup("download_best_version") |
---|
315 | d.addCallback(lambda ign: self.filenode.download_best_version()) |
---|
316 | d.addCallback(lambda data: |
---|
317 | self.failUnlessEqual(data, common.TEST_DATA)) |
---|
318 | return d |
---|
319 | |
---|
320 | |
---|
321 | def test_get_best_readable_version(self): |
---|
322 | d = self.startup("get_best_readable_version") |
---|
323 | d.addCallback(lambda ign: self.filenode.get_best_readable_version()) |
---|
324 | d.addCallback(lambda n2: |
---|
325 | self.failUnlessEqual(n2, self.filenode)) |
---|
326 | return d |
---|
327 | |
---|
328 | def test_get_size_of_best_version(self): |
---|
329 | d = self.startup("get_size_of_best_version") |
---|
330 | d.addCallback(lambda ign: self.filenode.get_size_of_best_version()) |
---|
331 | d.addCallback(lambda size: |
---|
332 | self.failUnlessEqual(size, len(common.TEST_DATA))) |
---|
333 | return d |
---|
334 | |
---|
335 | |
---|
336 | class LiteralFileNodeTests(unittest.TestCase): |
---|
337 | """Tests for LiteralFileNode.""" |
---|
338 | |
---|
339 | def test_equality(self): |
---|
340 | """LiteralFileNodes are equal iff they have the same URI.""" |
---|
341 | uri1 = uri.LiteralFileURI(b"1") |
---|
342 | uri2 = uri.LiteralFileURI(b"2") |
---|
343 | lfn1 = LiteralFileNode(uri1) |
---|
344 | lfn1b = LiteralFileNode(uri1) |
---|
345 | lfn2 = LiteralFileNode(uri2) |
---|
346 | self.assertTrue(lfn1 == lfn1b) |
---|
347 | self.assertFalse(lfn1 != lfn1b) |
---|
348 | self.assertTrue(lfn1 != lfn2) |
---|
349 | self.assertFalse(lfn1 == lfn2) |
---|
350 | self.assertTrue(lfn1 != 300) |
---|
351 | self.assertFalse(lfn1 == 300) |
---|
352 | |
---|
353 | |
---|
354 | # XXX extend these tests to show bad behavior of various kinds from servers: |
---|
355 | # raising exception from each remove_foo() method, for example |
---|
356 | |
---|
357 | # XXX test disconnect DeadReferenceError from get_buckets and get_block_whatsit |
---|
358 | |
---|
359 | # TODO: delete this whole file |
---|