1 | """ |
---|
2 | Ported to Python 3. |
---|
3 | """ |
---|
4 | |
---|
5 | import json |
---|
6 | import os.path, shutil |
---|
7 | |
---|
8 | from bs4 import BeautifulSoup |
---|
9 | |
---|
10 | from twisted.trial import unittest |
---|
11 | from twisted.internet import defer |
---|
12 | |
---|
13 | from zope.interface import implementer |
---|
14 | from twisted.web.resource import ( |
---|
15 | Resource, |
---|
16 | ) |
---|
17 | from twisted.web.template import ( |
---|
18 | renderElement, |
---|
19 | ) |
---|
20 | |
---|
21 | from allmydata import check_results, uri |
---|
22 | from allmydata import uri as tahoe_uri |
---|
23 | from allmydata.interfaces import ( |
---|
24 | IServer, |
---|
25 | ICheckResults, |
---|
26 | ICheckAndRepairResults, |
---|
27 | ) |
---|
28 | from allmydata.util import base32 |
---|
29 | from allmydata.web import check_results as web_check_results |
---|
30 | from allmydata.storage_client import StorageFarmBroker, NativeStorageServer |
---|
31 | from allmydata.storage.server import storage_index_to_dir |
---|
32 | from allmydata.monitor import Monitor |
---|
33 | from allmydata.test.no_network import GridTestMixin |
---|
34 | from allmydata.immutable.upload import Data |
---|
35 | from allmydata.mutable.publish import MutableData |
---|
36 | |
---|
37 | from .common import ( |
---|
38 | EMPTY_CLIENT_CONFIG, |
---|
39 | ) |
---|
40 | from .common_web import ( |
---|
41 | render, |
---|
42 | ) |
---|
43 | |
---|
44 | from .web.common import ( |
---|
45 | assert_soup_has_favicon, |
---|
46 | assert_soup_has_tag_with_content, |
---|
47 | ) |
---|
48 | |
---|
49 | class FakeClient(object): |
---|
50 | def get_storage_broker(self): |
---|
51 | return self.storage_broker |
---|
52 | |
---|
53 | |
---|
54 | @implementer(IServer) |
---|
55 | class FakeServer(object): # type: ignore # incomplete implementation |
---|
56 | |
---|
57 | def get_name(self): |
---|
58 | return "fake name" |
---|
59 | |
---|
60 | def get_longname(self): |
---|
61 | return "fake longname" |
---|
62 | |
---|
63 | def get_nickname(self): |
---|
64 | return "fake nickname" |
---|
65 | |
---|
66 | |
---|
67 | @implementer(ICheckResults) |
---|
68 | class FakeCheckResults(object): # type: ignore # incomplete implementation |
---|
69 | |
---|
70 | def __init__(self, si=None, |
---|
71 | healthy=False, recoverable=False, |
---|
72 | summary="fake summary"): |
---|
73 | self._storage_index = si |
---|
74 | self._is_healthy = healthy |
---|
75 | self._is_recoverable = recoverable |
---|
76 | self._summary = summary |
---|
77 | |
---|
78 | def get_storage_index(self): |
---|
79 | return self._storage_index |
---|
80 | |
---|
81 | def get_storage_index_string(self): |
---|
82 | return base32.b2a_or_none(self._storage_index) |
---|
83 | |
---|
84 | def is_healthy(self): |
---|
85 | return self._is_healthy |
---|
86 | |
---|
87 | def is_recoverable(self): |
---|
88 | return self._is_recoverable |
---|
89 | |
---|
90 | def get_summary(self): |
---|
91 | return self._summary |
---|
92 | |
---|
93 | def get_corrupt_shares(self): |
---|
94 | # returns a list of (IServer, storage_index, sharenum) |
---|
95 | return [(FakeServer(), b"<fake-si>", 0)] |
---|
96 | |
---|
97 | |
---|
98 | @implementer(ICheckAndRepairResults) |
---|
99 | class FakeCheckAndRepairResults(object): # type: ignore # incomplete implementation |
---|
100 | |
---|
101 | def __init__(self, si=None, |
---|
102 | repair_attempted=False, |
---|
103 | repair_success=False): |
---|
104 | self._storage_index = si |
---|
105 | self._repair_attempted = repair_attempted |
---|
106 | self._repair_success = repair_success |
---|
107 | |
---|
108 | def get_storage_index(self): |
---|
109 | return self._storage_index |
---|
110 | |
---|
111 | def get_pre_repair_results(self): |
---|
112 | return FakeCheckResults() |
---|
113 | |
---|
114 | def get_post_repair_results(self): |
---|
115 | return FakeCheckResults() |
---|
116 | |
---|
117 | def get_repair_attempted(self): |
---|
118 | return self._repair_attempted |
---|
119 | |
---|
120 | def get_repair_successful(self): |
---|
121 | return self._repair_success |
---|
122 | |
---|
123 | |
---|
124 | class ElementResource(Resource, object): |
---|
125 | def __init__(self, element): |
---|
126 | Resource.__init__(self) |
---|
127 | self.element = element |
---|
128 | |
---|
129 | def render(self, request): |
---|
130 | return renderElement(request, self.element) |
---|
131 | |
---|
132 | |
---|
133 | class WebResultsRendering(unittest.TestCase): |
---|
134 | |
---|
135 | @staticmethod |
---|
136 | def remove_tags(html): |
---|
137 | return BeautifulSoup(html, 'html5lib').get_text(separator=" ") |
---|
138 | |
---|
139 | def create_fake_client(self): |
---|
140 | sb = StorageFarmBroker(True, None, EMPTY_CLIENT_CONFIG) |
---|
141 | # s.get_name() (the "short description") will be "v0-00000000". |
---|
142 | # s.get_longname() will include the -long suffix. |
---|
143 | servers = [(b"v0-00000000-long", b"\x00"*20, "peer-0"), |
---|
144 | (b"v0-ffffffff-long", b"\xff"*20, "peer-f"), |
---|
145 | (b"v0-11111111-long", b"\x11"*20, "peer-11")] |
---|
146 | for (key_s, binary_tubid, nickname) in servers: |
---|
147 | server_id = key_s |
---|
148 | tubid_b32 = base32.b2a(binary_tubid) |
---|
149 | furl = "pb://%s@nowhere/fake" % str(tubid_b32, "utf-8") |
---|
150 | ann = { "version": 0, |
---|
151 | "service-name": "storage", |
---|
152 | "anonymous-storage-FURL": furl, |
---|
153 | "permutation-seed-base32": "", |
---|
154 | "nickname": str(nickname), |
---|
155 | "app-versions": {}, # need #466 and v2 introducer |
---|
156 | "my-version": "ver", |
---|
157 | "oldest-supported": "oldest", |
---|
158 | } |
---|
159 | s = NativeStorageServer(server_id, ann, None, None, None) |
---|
160 | sb.test_add_server(server_id, s) |
---|
161 | c = FakeClient() |
---|
162 | c.storage_broker = sb |
---|
163 | return c |
---|
164 | |
---|
165 | def render_json(self, resource): |
---|
166 | return self.successResultOf(render(resource, {b"output": [b"json"]})) |
---|
167 | |
---|
168 | def render_element(self, element, args=None): |
---|
169 | if args is None: |
---|
170 | args = {} |
---|
171 | return self.successResultOf(render(ElementResource(element), args)) |
---|
172 | |
---|
173 | def test_literal(self): |
---|
174 | lcr = web_check_results.LiteralCheckResultsRendererElement() |
---|
175 | |
---|
176 | html = self.render_element(lcr) |
---|
177 | self.failUnlessIn(b"Literal files are always healthy", html) |
---|
178 | |
---|
179 | html = self.render_element(lcr, args={b"return_to": [b"FOOURL"]}) |
---|
180 | self.failUnlessIn(b"Literal files are always healthy", html) |
---|
181 | self.failUnlessIn(b'<a href="FOOURL">Return to file.</a>', html) |
---|
182 | |
---|
183 | c = self.create_fake_client() |
---|
184 | lcr = web_check_results.LiteralCheckResultsRenderer(c) |
---|
185 | |
---|
186 | js = self.render_json(lcr) |
---|
187 | j = json.loads(js) |
---|
188 | self.failUnlessEqual(j["storage-index"], "") |
---|
189 | self.failUnlessEqual(j["results"]["healthy"], True) |
---|
190 | |
---|
191 | |
---|
192 | def test_check(self): |
---|
193 | c = self.create_fake_client() |
---|
194 | sb = c.storage_broker |
---|
195 | serverid_1 = b"\x00"*20 |
---|
196 | serverid_f = b"\xff"*20 |
---|
197 | server_1 = sb.get_stub_server(serverid_1) |
---|
198 | server_f = sb.get_stub_server(serverid_f) |
---|
199 | u = uri.CHKFileURI(b"\x00"*16, b"\x00"*32, 3, 10, 1234) |
---|
200 | data = { "count_happiness": 8, |
---|
201 | "count_shares_needed": 3, |
---|
202 | "count_shares_expected": 9, |
---|
203 | "count_shares_good": 10, |
---|
204 | "count_good_share_hosts": 11, |
---|
205 | "count_recoverable_versions": 1, |
---|
206 | "count_unrecoverable_versions": 0, |
---|
207 | "servers_responding": [], |
---|
208 | "sharemap": {"shareid1": [server_1, server_f]}, |
---|
209 | "count_wrong_shares": 0, |
---|
210 | "list_corrupt_shares": [], |
---|
211 | "count_corrupt_shares": 0, |
---|
212 | "list_incompatible_shares": [], |
---|
213 | "count_incompatible_shares": 0, |
---|
214 | "report": [], "share_problems": [], "servermap": None, |
---|
215 | } |
---|
216 | cr = check_results.CheckResults(u, u.get_storage_index(), |
---|
217 | healthy=True, recoverable=True, |
---|
218 | summary="groovy", |
---|
219 | **data) |
---|
220 | w = web_check_results.CheckResultsRendererElement(c, cr) |
---|
221 | html = self.render_element(w) |
---|
222 | s = self.remove_tags(html) |
---|
223 | self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated |
---|
224 | self.failUnlessIn("Healthy : groovy", s) |
---|
225 | self.failUnlessIn("Share Counts: need 3-of-9, have 10", s) |
---|
226 | self.failUnlessIn("Happiness Level: 8", s) |
---|
227 | self.failUnlessIn("Hosts with good shares: 11", s) |
---|
228 | self.failUnlessIn("Corrupt shares: none", s) |
---|
229 | self.failUnlessIn("Wrong Shares: 0", s) |
---|
230 | self.failUnlessIn("Recoverable Versions: 1", s) |
---|
231 | self.failUnlessIn("Unrecoverable Versions: 0", s) |
---|
232 | self.failUnlessIn("Good Shares (sorted in share order): Share ID Nickname Node ID shareid1 peer-0 00000000 peer-f ffffffff", s) |
---|
233 | |
---|
234 | cr = check_results.CheckResults(u, u.get_storage_index(), |
---|
235 | healthy=False, recoverable=True, |
---|
236 | summary="ungroovy", |
---|
237 | **data) |
---|
238 | w = web_check_results.CheckResultsRendererElement(c, cr) |
---|
239 | html = self.render_element(w) |
---|
240 | s = self.remove_tags(html) |
---|
241 | self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated |
---|
242 | self.failUnlessIn("Not Healthy! : ungroovy", s) |
---|
243 | |
---|
244 | data["count_corrupt_shares"] = 1 |
---|
245 | data["list_corrupt_shares"] = [(server_1, u.get_storage_index(), 2)] |
---|
246 | cr = check_results.CheckResults(u, u.get_storage_index(), |
---|
247 | healthy=False, recoverable=False, |
---|
248 | summary="rather dead", |
---|
249 | **data) |
---|
250 | w = web_check_results.CheckResultsRendererElement(c, cr) |
---|
251 | html = self.render_element(w) |
---|
252 | s = self.remove_tags(html) |
---|
253 | self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated |
---|
254 | self.failUnlessIn("Not Recoverable! : rather dead", s) |
---|
255 | self.failUnlessIn("Corrupt shares: Share ID Nickname Node ID sh#2 peer-0 00000000", s) |
---|
256 | |
---|
257 | html = self.render_element(w) |
---|
258 | s = self.remove_tags(html) |
---|
259 | self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated |
---|
260 | self.failUnlessIn("Not Recoverable! : rather dead", s) |
---|
261 | |
---|
262 | html = self.render_element(w, args={b"return_to": [b"FOOURL"]}) |
---|
263 | self.failUnlessIn(b'<a href="FOOURL">Return to file/directory.</a>', |
---|
264 | html) |
---|
265 | |
---|
266 | w = web_check_results.CheckResultsRenderer(c, cr) |
---|
267 | d = self.render_json(w) |
---|
268 | def _check_json(jdata): |
---|
269 | j = json.loads(jdata) |
---|
270 | self.failUnlessEqual(j["summary"], "rather dead") |
---|
271 | self.failUnlessEqual(j["storage-index"], |
---|
272 | "2k6avpjga3dho3zsjo6nnkt7n4") |
---|
273 | expected = {'count-happiness': 8, |
---|
274 | 'count-shares-expected': 9, |
---|
275 | 'healthy': False, |
---|
276 | 'count-unrecoverable-versions': 0, |
---|
277 | 'count-shares-needed': 3, |
---|
278 | 'sharemap': {"shareid1": |
---|
279 | ["v0-00000000-long", "v0-ffffffff-long"]}, |
---|
280 | 'count-recoverable-versions': 1, |
---|
281 | 'list-corrupt-shares': |
---|
282 | [["v0-00000000-long", "2k6avpjga3dho3zsjo6nnkt7n4", 2]], |
---|
283 | 'count-good-share-hosts': 11, |
---|
284 | 'count-wrong-shares': 0, |
---|
285 | 'count-shares-good': 10, |
---|
286 | 'count-corrupt-shares': 1, |
---|
287 | 'servers-responding': [], |
---|
288 | 'recoverable': False, |
---|
289 | } |
---|
290 | self.failUnlessEqual(j["results"], expected) |
---|
291 | _check_json(d) |
---|
292 | |
---|
293 | w = web_check_results.CheckResultsRendererElement(c, cr) |
---|
294 | d = self.render_element(w) |
---|
295 | def _check(html): |
---|
296 | s = self.remove_tags(html) |
---|
297 | self.failUnlessIn("File Check Results for SI=2k6avp", s) |
---|
298 | self.failUnlessIn("Not Recoverable! : rather dead", s) |
---|
299 | _check(html) |
---|
300 | |
---|
301 | def test_check_and_repair(self): |
---|
302 | c = self.create_fake_client() |
---|
303 | sb = c.storage_broker |
---|
304 | serverid_1 = b"\x00"*20 |
---|
305 | serverid_f = b"\xff"*20 |
---|
306 | u = uri.CHKFileURI(b"\x00"*16, b"\x00"*32, 3, 10, 1234) |
---|
307 | |
---|
308 | data = { "count_happiness": 5, |
---|
309 | "count_shares_needed": 3, |
---|
310 | "count_shares_expected": 10, |
---|
311 | "count_shares_good": 6, |
---|
312 | "count_good_share_hosts": 7, |
---|
313 | "count_recoverable_versions": 1, |
---|
314 | "count_unrecoverable_versions": 0, |
---|
315 | "servers_responding": [], |
---|
316 | "sharemap": {"shareid1": [sb.get_stub_server(serverid_1), |
---|
317 | sb.get_stub_server(serverid_f)]}, |
---|
318 | "count_wrong_shares": 0, |
---|
319 | "list_corrupt_shares": [], |
---|
320 | "count_corrupt_shares": 0, |
---|
321 | "list_incompatible_shares": [], |
---|
322 | "count_incompatible_shares": 0, |
---|
323 | "report": [], "share_problems": [], "servermap": None, |
---|
324 | } |
---|
325 | pre_cr = check_results.CheckResults(u, u.get_storage_index(), |
---|
326 | healthy=False, recoverable=True, |
---|
327 | summary="illing", |
---|
328 | **data) |
---|
329 | |
---|
330 | data = { "count_happiness": 9, |
---|
331 | "count_shares_needed": 3, |
---|
332 | "count_shares_expected": 10, |
---|
333 | "count_shares_good": 10, |
---|
334 | "count_good_share_hosts": 11, |
---|
335 | "count_recoverable_versions": 1, |
---|
336 | "count_unrecoverable_versions": 0, |
---|
337 | "servers_responding": [], |
---|
338 | "sharemap": {"shareid1": [sb.get_stub_server(serverid_1), |
---|
339 | sb.get_stub_server(serverid_f)]}, |
---|
340 | "count_wrong_shares": 0, |
---|
341 | "count_corrupt_shares": 0, |
---|
342 | "list_corrupt_shares": [], |
---|
343 | "list_incompatible_shares": [], |
---|
344 | "count_incompatible_shares": 0, |
---|
345 | "report": [], "share_problems": [], "servermap": None, |
---|
346 | } |
---|
347 | post_cr = check_results.CheckResults(u, u.get_storage_index(), |
---|
348 | healthy=True, recoverable=True, |
---|
349 | summary="groovy", |
---|
350 | **data) |
---|
351 | |
---|
352 | crr = check_results.CheckAndRepairResults(u.get_storage_index()) |
---|
353 | crr.pre_repair_results = pre_cr |
---|
354 | crr.post_repair_results = post_cr |
---|
355 | crr.repair_attempted = False |
---|
356 | |
---|
357 | w = web_check_results.CheckAndRepairResultsRendererElement(c, crr) |
---|
358 | html = self.render_element(w) |
---|
359 | s = self.remove_tags(html) |
---|
360 | |
---|
361 | self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s) |
---|
362 | self.failUnlessIn("Healthy : groovy", s) |
---|
363 | self.failUnlessIn("No repair necessary", s) |
---|
364 | self.failUnlessIn("Post-Repair Checker Results:", s) |
---|
365 | self.failUnlessIn("Share Counts: need 3-of-10, have 10", s) |
---|
366 | |
---|
367 | crr.repair_attempted = True |
---|
368 | crr.repair_successful = True |
---|
369 | html = self.render_element(w) |
---|
370 | s = self.remove_tags(html) |
---|
371 | |
---|
372 | self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s) |
---|
373 | self.failUnlessIn("Healthy : groovy", s) |
---|
374 | self.failUnlessIn("Repair successful", s) |
---|
375 | self.failUnlessIn("Post-Repair Checker Results:", s) |
---|
376 | |
---|
377 | crr.repair_attempted = True |
---|
378 | crr.repair_successful = False |
---|
379 | post_cr = check_results.CheckResults(u, u.get_storage_index(), |
---|
380 | healthy=False, recoverable=True, |
---|
381 | summary="better", |
---|
382 | **data) |
---|
383 | crr.post_repair_results = post_cr |
---|
384 | html = self.render_element(w) |
---|
385 | s = self.remove_tags(html) |
---|
386 | |
---|
387 | self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s) |
---|
388 | self.failUnlessIn("Not Healthy! : better", s) |
---|
389 | self.failUnlessIn("Repair unsuccessful", s) |
---|
390 | self.failUnlessIn("Post-Repair Checker Results:", s) |
---|
391 | |
---|
392 | crr.repair_attempted = True |
---|
393 | crr.repair_successful = False |
---|
394 | post_cr = check_results.CheckResults(u, u.get_storage_index(), |
---|
395 | healthy=False, recoverable=False, |
---|
396 | summary="worse", |
---|
397 | **data) |
---|
398 | crr.post_repair_results = post_cr |
---|
399 | html = self.render_element(w) |
---|
400 | s = self.remove_tags(html) |
---|
401 | |
---|
402 | self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s) |
---|
403 | self.failUnlessIn("Not Recoverable! : worse", s) |
---|
404 | self.failUnlessIn("Repair unsuccessful", s) |
---|
405 | self.failUnlessIn("Post-Repair Checker Results:", s) |
---|
406 | |
---|
407 | w = web_check_results.CheckAndRepairResultsRenderer(c, crr) |
---|
408 | j = json.loads(self.render_json(w)) |
---|
409 | self.failUnlessEqual(j["repair-attempted"], True) |
---|
410 | self.failUnlessEqual(j["storage-index"], |
---|
411 | "2k6avpjga3dho3zsjo6nnkt7n4") |
---|
412 | self.failUnlessEqual(j["pre-repair-results"]["summary"], "illing") |
---|
413 | self.failUnlessEqual(j["post-repair-results"]["summary"], "worse") |
---|
414 | |
---|
415 | w = web_check_results.CheckAndRepairResultsRenderer(c, None) |
---|
416 | j = json.loads(self.render_json(w)) |
---|
417 | self.failUnlessEqual(j["repair-attempted"], False) |
---|
418 | self.failUnlessEqual(j["storage-index"], "") |
---|
419 | |
---|
420 | |
---|
421 | def test_deep_check_renderer(self): |
---|
422 | status = check_results.DeepCheckResults(b"fake-root-si") |
---|
423 | status.add_check( |
---|
424 | FakeCheckResults(b"<unhealthy/unrecoverable>", False, False), |
---|
425 | (u"fake", u"unhealthy", u"unrecoverable") |
---|
426 | ) |
---|
427 | status.add_check( |
---|
428 | FakeCheckResults(b"<healthy/recoverable>", True, True), |
---|
429 | (u"fake", u"healthy", u"recoverable") |
---|
430 | ) |
---|
431 | status.add_check( |
---|
432 | FakeCheckResults(b"<healthy/unrecoverable>", True, False), |
---|
433 | (u"fake", u"healthy", u"unrecoverable") |
---|
434 | ) |
---|
435 | status.add_check( |
---|
436 | FakeCheckResults(b"<unhealthy/unrecoverable>", False, True), |
---|
437 | (u"fake", u"unhealthy", u"recoverable") |
---|
438 | ) |
---|
439 | |
---|
440 | monitor = Monitor() |
---|
441 | monitor.set_status(status) |
---|
442 | |
---|
443 | elem = web_check_results.DeepCheckResultsRendererElement(monitor) |
---|
444 | doc = self.render_element(elem) |
---|
445 | soup = BeautifulSoup(doc, 'html5lib') |
---|
446 | |
---|
447 | assert_soup_has_favicon(self, soup) |
---|
448 | |
---|
449 | assert_soup_has_tag_with_content( |
---|
450 | self, soup, u"title", |
---|
451 | u"Tahoe-LAFS - Deep Check Results" |
---|
452 | ) |
---|
453 | |
---|
454 | assert_soup_has_tag_with_content( |
---|
455 | self, soup, u"h1", |
---|
456 | "Deep-Check Results for root SI=" |
---|
457 | ) |
---|
458 | |
---|
459 | assert_soup_has_tag_with_content( |
---|
460 | self, soup, u"li", |
---|
461 | u"Objects Checked: 4" |
---|
462 | ) |
---|
463 | |
---|
464 | assert_soup_has_tag_with_content( |
---|
465 | self, soup, u"li", |
---|
466 | u"Objects Healthy: 2" |
---|
467 | ) |
---|
468 | |
---|
469 | assert_soup_has_tag_with_content( |
---|
470 | self, soup, u"li", |
---|
471 | u"Objects Unhealthy: 2" |
---|
472 | ) |
---|
473 | |
---|
474 | assert_soup_has_tag_with_content( |
---|
475 | self, soup, u"li", |
---|
476 | u"Objects Unrecoverable: 2" |
---|
477 | ) |
---|
478 | |
---|
479 | assert_soup_has_tag_with_content( |
---|
480 | self, soup, u"li", |
---|
481 | u"Corrupt Shares: 4" |
---|
482 | ) |
---|
483 | |
---|
484 | assert_soup_has_tag_with_content( |
---|
485 | self, soup, u"h2", |
---|
486 | u"Files/Directories That Had Problems:" |
---|
487 | ) |
---|
488 | |
---|
489 | assert_soup_has_tag_with_content( |
---|
490 | self, soup, u"li", |
---|
491 | u"fake/unhealthy/recoverable: fake summary" |
---|
492 | ) |
---|
493 | |
---|
494 | assert_soup_has_tag_with_content( |
---|
495 | self, soup, u"li", |
---|
496 | u"fake/unhealthy/unrecoverable: fake summary" |
---|
497 | ) |
---|
498 | |
---|
499 | assert_soup_has_tag_with_content( |
---|
500 | self, soup, u"h2", |
---|
501 | u"Servers on which corrupt shares were found" |
---|
502 | ) |
---|
503 | |
---|
504 | assert_soup_has_tag_with_content( |
---|
505 | self, soup, u"h2", |
---|
506 | u"Corrupt Shares" |
---|
507 | ) |
---|
508 | |
---|
509 | assert_soup_has_tag_with_content( |
---|
510 | self, soup, u"h2", |
---|
511 | u"All Results" |
---|
512 | ) |
---|
513 | |
---|
514 | def test_deep_check_and_repair_renderer(self): |
---|
515 | status = check_results.DeepCheckAndRepairResults(b"") |
---|
516 | |
---|
517 | status.add_check_and_repair( |
---|
518 | FakeCheckAndRepairResults(b"attempted/success", True, True), |
---|
519 | (u"attempted", u"success") |
---|
520 | ) |
---|
521 | status.add_check_and_repair( |
---|
522 | FakeCheckAndRepairResults(b"attempted/failure", True, False), |
---|
523 | (u"attempted", u"failure") |
---|
524 | ) |
---|
525 | status.add_check_and_repair( |
---|
526 | FakeCheckAndRepairResults(b"unattempted/failure", False, False), |
---|
527 | (u"unattempted", u"failure") |
---|
528 | ) |
---|
529 | |
---|
530 | monitor = Monitor() |
---|
531 | monitor.set_status(status) |
---|
532 | |
---|
533 | elem = web_check_results.DeepCheckAndRepairResultsRendererElement(monitor) |
---|
534 | doc = self.render_element(elem) |
---|
535 | soup = BeautifulSoup(doc, 'html5lib') |
---|
536 | |
---|
537 | assert_soup_has_favicon(self, soup) |
---|
538 | |
---|
539 | assert_soup_has_tag_with_content( |
---|
540 | self, soup, u"title", |
---|
541 | u"Tahoe-LAFS - Deep Check Results" |
---|
542 | ) |
---|
543 | |
---|
544 | assert_soup_has_tag_with_content( |
---|
545 | self, soup, u"h1", |
---|
546 | u"Deep-Check-And-Repair Results for root SI=" |
---|
547 | ) |
---|
548 | |
---|
549 | assert_soup_has_tag_with_content( |
---|
550 | self, soup, u"li", |
---|
551 | u"Objects Checked: 3" |
---|
552 | ) |
---|
553 | |
---|
554 | assert_soup_has_tag_with_content( |
---|
555 | self, soup, u"li", |
---|
556 | u"Objects Healthy (before repair): 0" |
---|
557 | ) |
---|
558 | |
---|
559 | assert_soup_has_tag_with_content( |
---|
560 | self, soup, u"li", |
---|
561 | u"Objects Unhealthy (before repair): 3" |
---|
562 | ) |
---|
563 | |
---|
564 | assert_soup_has_tag_with_content( |
---|
565 | self, soup, u"li", |
---|
566 | u"Corrupt Shares (before repair): 3" |
---|
567 | ) |
---|
568 | |
---|
569 | assert_soup_has_tag_with_content( |
---|
570 | self, soup, u"li", |
---|
571 | u"Repairs Attempted: 2" |
---|
572 | ) |
---|
573 | |
---|
574 | assert_soup_has_tag_with_content( |
---|
575 | self, soup, u"li", |
---|
576 | u"Repairs Successful: 1" |
---|
577 | ) |
---|
578 | |
---|
579 | assert_soup_has_tag_with_content( |
---|
580 | self, soup, u"li", |
---|
581 | "Repairs Unsuccessful: 1" |
---|
582 | ) |
---|
583 | |
---|
584 | assert_soup_has_tag_with_content( |
---|
585 | self, soup, u"li", |
---|
586 | u"Objects Healthy (after repair): 0" |
---|
587 | ) |
---|
588 | |
---|
589 | assert_soup_has_tag_with_content( |
---|
590 | self, soup, u"li", |
---|
591 | u"Objects Unhealthy (after repair): 3" |
---|
592 | ) |
---|
593 | |
---|
594 | assert_soup_has_tag_with_content( |
---|
595 | self, soup, u"li", |
---|
596 | u"Corrupt Shares (after repair): 3" |
---|
597 | ) |
---|
598 | |
---|
599 | assert_soup_has_tag_with_content( |
---|
600 | self, soup, u"h2", |
---|
601 | u"Files/Directories That Had Problems:" |
---|
602 | ) |
---|
603 | |
---|
604 | assert_soup_has_tag_with_content( |
---|
605 | self, soup, u"h2", |
---|
606 | u"Files/Directories That Still Have Problems:" |
---|
607 | ) |
---|
608 | |
---|
609 | assert_soup_has_tag_with_content( |
---|
610 | self, soup, u"h2", |
---|
611 | u"Servers on which corrupt shares were found" |
---|
612 | ) |
---|
613 | |
---|
614 | assert_soup_has_tag_with_content( |
---|
615 | self, soup, u"h2", |
---|
616 | u"Remaining Corrupt Shares" |
---|
617 | ) |
---|
618 | |
---|
619 | |
---|
620 | class BalancingAct(GridTestMixin, unittest.TestCase): |
---|
621 | # test for #1115 regarding the 'count-good-share-hosts' metric |
---|
622 | |
---|
623 | |
---|
624 | def add_server(self, server_number, readonly=False): |
---|
625 | assert self.g, "I tried to find a grid at self.g, but failed" |
---|
626 | ss = self.g.make_server(server_number, readonly) |
---|
627 | #log.msg("just created a server, number: %s => %s" % (server_number, ss,)) |
---|
628 | self.g.add_server(server_number, ss) |
---|
629 | |
---|
630 | def add_server_with_share(self, server_number, uri, share_number=None, |
---|
631 | readonly=False): |
---|
632 | self.add_server(server_number, readonly) |
---|
633 | if share_number is not None: |
---|
634 | self.copy_share_to_server(uri, share_number, server_number) |
---|
635 | |
---|
636 | def copy_share_to_server(self, uri, share_number, server_number): |
---|
637 | ss = self.g.servers_by_number[server_number] |
---|
638 | # Copy share i from the directory associated with the first |
---|
639 | # storage server to the directory associated with this one. |
---|
640 | assert self.g, "I tried to find a grid at self.g, but failed" |
---|
641 | assert self.shares, "I tried to find shares at self.shares, but failed" |
---|
642 | old_share_location = self.shares[share_number][2] |
---|
643 | new_share_location = os.path.join(ss.storedir, "shares") |
---|
644 | si = tahoe_uri.from_string(self.uri).get_storage_index() |
---|
645 | new_share_location = os.path.join(new_share_location, |
---|
646 | storage_index_to_dir(si)) |
---|
647 | if not os.path.exists(new_share_location): |
---|
648 | os.makedirs(new_share_location) |
---|
649 | new_share_location = os.path.join(new_share_location, |
---|
650 | str(share_number)) |
---|
651 | if old_share_location != new_share_location: |
---|
652 | shutil.copy(old_share_location, new_share_location) |
---|
653 | shares = self.find_uri_shares(uri) |
---|
654 | # Make sure that the storage server has the share. |
---|
655 | self.failUnless((share_number, ss.my_nodeid, new_share_location) |
---|
656 | in shares) |
---|
657 | |
---|
658 | def _pretty_shares_chart(self, uri): |
---|
659 | # Servers are labeled A-Z, shares are labeled 0-9 |
---|
660 | letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' |
---|
661 | assert len(self.g.servers_by_number) < len(letters), \ |
---|
662 | "This little printing function is only meant for < 26 servers" |
---|
663 | shares_chart = {} |
---|
664 | names = dict(zip([ss.my_nodeid |
---|
665 | for _,ss in self.g.servers_by_number.items()], |
---|
666 | letters)) |
---|
667 | for shnum, serverid, _ in self.find_uri_shares(uri): |
---|
668 | shares_chart.setdefault(shnum, []).append(names[serverid]) |
---|
669 | return shares_chart |
---|
670 | |
---|
671 | def test_good_share_hosts(self): |
---|
672 | self.basedir = "checker/BalancingAct/1115" |
---|
673 | self.set_up_grid(num_servers=1) |
---|
674 | c0 = self.g.clients[0] |
---|
675 | c0.encoding_params['happy'] = 1 |
---|
676 | c0.encoding_params['n'] = 4 |
---|
677 | c0.encoding_params['k'] = 3 |
---|
678 | |
---|
679 | DATA = b"data" * 100 |
---|
680 | d = c0.upload(Data(DATA, convergence=b"")) |
---|
681 | def _stash_immutable(ur): |
---|
682 | self.imm = c0.create_node_from_uri(ur.get_uri()) |
---|
683 | self.uri = self.imm.get_uri() |
---|
684 | d.addCallback(_stash_immutable) |
---|
685 | d.addCallback(lambda ign: |
---|
686 | self.find_uri_shares(self.uri)) |
---|
687 | def _store_shares(shares): |
---|
688 | self.shares = shares |
---|
689 | d.addCallback(_store_shares) |
---|
690 | |
---|
691 | def add_three(_, i): |
---|
692 | # Add a new server with just share 3 |
---|
693 | self.add_server_with_share(i, self.uri, 3) |
---|
694 | #print(self._pretty_shares_chart(self.uri)) |
---|
695 | for i in range(1,5): |
---|
696 | d.addCallback(add_three, i) |
---|
697 | |
---|
698 | def _check_and_repair(_): |
---|
699 | return self.imm.check_and_repair(Monitor()) |
---|
700 | def _check_counts(crr, shares_good, good_share_hosts): |
---|
701 | prr = crr.get_post_repair_results() |
---|
702 | self.failUnlessEqual(prr.get_share_counter_good(), shares_good) |
---|
703 | self.failUnlessEqual(prr.get_host_counter_good_shares(), |
---|
704 | good_share_hosts) |
---|
705 | |
---|
706 | """ |
---|
707 | Initial sharemap: |
---|
708 | 0:[A] 1:[A] 2:[A] 3:[A,B,C,D,E] |
---|
709 | 4 good shares, but 5 good hosts |
---|
710 | After deleting all instances of share #3 and repairing: |
---|
711 | 0:[A], 1:[A,B], 2:[C,A], 3:[E] |
---|
712 | # actually: {0: ['E', 'A'], 1: ['C', 'A'], 2: ['A', 'B'], 3: ['D']} |
---|
713 | Still 4 good shares but now 4 good hosts |
---|
714 | """ |
---|
715 | d.addCallback(_check_and_repair) |
---|
716 | d.addCallback(_check_counts, 4, 5) |
---|
717 | d.addCallback(lambda _: self.delete_shares_numbered(self.uri, [3])) |
---|
718 | d.addCallback(_check_and_repair) |
---|
719 | |
---|
720 | # it can happen that our uploader will choose, e.g., to upload |
---|
721 | # to servers B, C, D, E .. which will mean that all 5 serves |
---|
722 | # now contain our shares (and thus "respond"). |
---|
723 | |
---|
724 | def _check_happy(crr): |
---|
725 | prr = crr.get_post_repair_results() |
---|
726 | self.assertTrue(prr.get_host_counter_good_shares() >= 4) |
---|
727 | return crr |
---|
728 | d.addCallback(_check_happy) |
---|
729 | d.addCallback(lambda _: all([self.g.break_server(sid) |
---|
730 | for sid in self.g.get_all_serverids()])) |
---|
731 | d.addCallback(_check_and_repair) |
---|
732 | d.addCallback(_check_counts, 0, 0) |
---|
733 | return d |
---|
734 | |
---|
735 | class AddLease(GridTestMixin, unittest.TestCase): |
---|
736 | # test for #875, in which failures in the add-lease call cause |
---|
737 | # false-negatives in the checker |
---|
738 | |
---|
739 | def test_875(self): |
---|
740 | self.basedir = "checker/AddLease/875" |
---|
741 | self.set_up_grid(num_servers=1) |
---|
742 | c0 = self.g.clients[0] |
---|
743 | c0.encoding_params['happy'] = 1 |
---|
744 | self.uris = {} |
---|
745 | DATA = b"data" * 100 |
---|
746 | d = c0.upload(Data(DATA, convergence=b"")) |
---|
747 | def _stash_immutable(ur): |
---|
748 | self.imm = c0.create_node_from_uri(ur.get_uri()) |
---|
749 | d.addCallback(_stash_immutable) |
---|
750 | d.addCallback(lambda ign: |
---|
751 | c0.create_mutable_file(MutableData(b"contents"))) |
---|
752 | def _stash_mutable(node): |
---|
753 | self.mut = node |
---|
754 | d.addCallback(_stash_mutable) |
---|
755 | |
---|
756 | def _check_cr(cr, which): |
---|
757 | self.failUnless(cr.is_healthy(), which) |
---|
758 | |
---|
759 | # these two should work normally |
---|
760 | d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True)) |
---|
761 | d.addCallback(_check_cr, "immutable-normal") |
---|
762 | d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True)) |
---|
763 | d.addCallback(_check_cr, "mutable-normal") |
---|
764 | |
---|
765 | really_did_break = [] |
---|
766 | # now break the server's add_lease call |
---|
767 | def _break_add_lease(ign): |
---|
768 | def broken_add_lease(*args, **kwargs): |
---|
769 | really_did_break.append(1) |
---|
770 | raise KeyError("intentional failure, should be ignored") |
---|
771 | assert self.g.servers_by_number[0].add_lease |
---|
772 | self.g.servers_by_number[0].add_lease = broken_add_lease |
---|
773 | d.addCallback(_break_add_lease) |
---|
774 | |
---|
775 | # and confirm that the files still look healthy |
---|
776 | d.addCallback(lambda ign: self.mut.check(Monitor(), add_lease=True)) |
---|
777 | d.addCallback(_check_cr, "mutable-broken") |
---|
778 | d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True)) |
---|
779 | d.addCallback(_check_cr, "immutable-broken") |
---|
780 | |
---|
781 | d.addCallback(lambda ign: self.failUnless(really_did_break)) |
---|
782 | return d |
---|
783 | |
---|
784 | class CounterHolder(object): |
---|
785 | def __init__(self): |
---|
786 | self._num_active_block_fetches = 0 |
---|
787 | self._max_active_block_fetches = 0 |
---|
788 | |
---|
789 | from allmydata.immutable.checker import ValidatedReadBucketProxy |
---|
790 | class MockVRBP(ValidatedReadBucketProxy): |
---|
791 | def __init__(self, sharenum, bucket, share_hash_tree, num_blocks, block_size, share_size, counterholder): |
---|
792 | ValidatedReadBucketProxy.__init__(self, sharenum, bucket, |
---|
793 | share_hash_tree, num_blocks, |
---|
794 | block_size, share_size) |
---|
795 | self.counterholder = counterholder |
---|
796 | |
---|
797 | def get_block(self, blocknum): |
---|
798 | self.counterholder._num_active_block_fetches += 1 |
---|
799 | if self.counterholder._num_active_block_fetches > self.counterholder._max_active_block_fetches: |
---|
800 | self.counterholder._max_active_block_fetches = self.counterholder._num_active_block_fetches |
---|
801 | d = ValidatedReadBucketProxy.get_block(self, blocknum) |
---|
802 | def _mark_no_longer_active(res): |
---|
803 | self.counterholder._num_active_block_fetches -= 1 |
---|
804 | return res |
---|
805 | d.addBoth(_mark_no_longer_active) |
---|
806 | return d |
---|
807 | |
---|
808 | class TooParallel(GridTestMixin, unittest.TestCase): |
---|
809 | # bug #1395: immutable verifier was aggressively parallized, checking all |
---|
810 | # blocks of all shares at the same time, blowing our memory budget and |
---|
811 | # crashing with MemoryErrors on >1GB files. |
---|
812 | |
---|
813 | def test_immutable(self): |
---|
814 | import allmydata.immutable.checker |
---|
815 | origVRBP = allmydata.immutable.checker.ValidatedReadBucketProxy |
---|
816 | |
---|
817 | self.basedir = "checker/TooParallel/immutable" |
---|
818 | |
---|
819 | # If any code asks to instantiate a ValidatedReadBucketProxy, |
---|
820 | # we give them a MockVRBP which is configured to use our |
---|
821 | # CounterHolder. |
---|
822 | counterholder = CounterHolder() |
---|
823 | def make_mock_VRBP(*args, **kwargs): |
---|
824 | return MockVRBP(counterholder=counterholder, *args, **kwargs) |
---|
825 | allmydata.immutable.checker.ValidatedReadBucketProxy = make_mock_VRBP |
---|
826 | |
---|
827 | d = defer.succeed(None) |
---|
828 | def _start(ign): |
---|
829 | self.set_up_grid(num_servers=4) |
---|
830 | self.c0 = self.g.clients[0] |
---|
831 | self.c0.encoding_params = { "k": 1, |
---|
832 | "happy": 4, |
---|
833 | "n": 4, |
---|
834 | "max_segment_size": 5, |
---|
835 | } |
---|
836 | self.uris = {} |
---|
837 | DATA = b"data" * 100 # 400/5 = 80 blocks |
---|
838 | return self.c0.upload(Data(DATA, convergence=b"")) |
---|
839 | d.addCallback(_start) |
---|
840 | def _do_check(ur): |
---|
841 | n = self.c0.create_node_from_uri(ur.get_uri()) |
---|
842 | return n.check(Monitor(), verify=True) |
---|
843 | d.addCallback(_do_check) |
---|
844 | def _check(cr): |
---|
845 | # the verifier works on all 4 shares in parallel, but only |
---|
846 | # fetches one block from each share at a time, so we expect to |
---|
847 | # see 4 parallel fetches |
---|
848 | self.failUnlessEqual(counterholder._max_active_block_fetches, 4) |
---|
849 | d.addCallback(_check) |
---|
850 | def _clean_up(res): |
---|
851 | allmydata.immutable.checker.ValidatedReadBucketProxy = origVRBP |
---|
852 | return res |
---|
853 | d.addBoth(_clean_up) |
---|
854 | return d |
---|