1 | diff -rN -u old-1.9.2/src/allmydata/mutable/common.py new-1.9.2/src/allmydata/mutable/common.py |
---|
2 | --- old-1.9.2/src/allmydata/mutable/common.py 2012-06-21 00:35:33.535864570 -0300 |
---|
3 | +++ new-1.9.2/src/allmydata/mutable/common.py 2012-06-21 00:35:33.709197226 -0300 |
---|
4 | @@ -1,6 +1,35 @@ |
---|
5 | |
---|
6 | from allmydata.util.spans import DataSpans |
---|
7 | |
---|
8 | +from allmydata.util import hashutil |
---|
9 | + |
---|
10 | +def is_salt(x): |
---|
11 | + return isinstance(x, str) and len(x) == 16 |
---|
12 | + |
---|
13 | +def check_is_verinfo(x): |
---|
14 | + if not isinstance(x, tuple): |
---|
15 | + raise TypeError("This isn't a verinfo because its type is %s instead of tuple. %r" % (type(x), x,)) |
---|
16 | + if len(x) != 9: |
---|
17 | + raise TypeError("This isn't a verinfo because its length is %s instead of 9. %r :: %s" % (len(x), x, type(x),)) |
---|
18 | + if not isinstance(x[0], (int, long)): |
---|
19 | + raise TypeError("This isn't a verinfo because the type of its 0 element is %s instead of int/long. %r :: %s" % (type(x[0]), x, type(x),)) |
---|
20 | + if not hashutil.is_hash(x[1]): |
---|
21 | + raise TypeError("This isn't a verinfo because its 1 element (%r :: %s) is not a hash. %r :: %s" % (x[1], type(x[1]), x, type(x),)) |
---|
22 | + if not hashutil.is_hash(x[2]) and not is_salt(x[2]) and x[2] is not None: |
---|
23 | + raise TypeError("This isn't a verinfo because its 2 element (%r :: %s) is neither a hash nor a salt nor None. %r :: %s" % (x[2], type(x[2]), x, type(x),)) |
---|
24 | + if not isinstance(x[3], (int, long)): |
---|
25 | + raise TypeError("This isn't a verinfo because the type of its 3 element is %s instead of int/long. %r :: %s" % (type(x[3]), x, type(x),)) |
---|
26 | + if not isinstance(x[4], (int, long)): |
---|
27 | + raise TypeError("This isn't a verinfo because the type of its 4 element is %s instead of int/long. %r :: %s" % (type(x[4]), x, type(x),)) |
---|
28 | + if not isinstance(x[5], (int, long)): |
---|
29 | + raise TypeError("This isn't a verinfo because the type of its 5 element is %s instead of int/long. %r :: %s" % (type(x[5]), x, type(x),)) |
---|
30 | + if not isinstance(x[6], (int, long)): |
---|
31 | + raise TypeError("This isn't a verinfo because the type of its 6 element is %s instead of int/long. %r :: %s" % (type(x[6]), x, type(x),)) |
---|
32 | + if not isinstance(x[7], str): |
---|
33 | + raise TypeError("This isn't a verinfo because the type of its 7 element is %s instead of str. %r :: %s" % (type(x[7]), x, type(x),)) |
---|
34 | + if not isinstance(x[8], tuple): |
---|
35 | + raise TypeError("This isn't a verinfo because the type of its 8 element is %s instead of tuple. %r :: %s" % (type(x[8]), x, type(x),)) |
---|
36 | + |
---|
37 | MODE_CHECK = "MODE_CHECK" # query all peers |
---|
38 | MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version |
---|
39 | MODE_WRITE = "MODE_WRITE" # replace all shares, probably.. not for initial |
---|
40 | diff -rN -u old-1.9.2/src/allmydata/mutable/layout.py new-1.9.2/src/allmydata/mutable/layout.py |
---|
41 | --- old-1.9.2/src/allmydata/mutable/layout.py 2012-06-21 00:35:33.535864570 -0300 |
---|
42 | +++ new-1.9.2/src/allmydata/mutable/layout.py 2012-06-21 00:35:33.709197226 -0300 |
---|
43 | @@ -1,10 +1,11 @@ |
---|
44 | |
---|
45 | import struct |
---|
46 | from allmydata.mutable.common import NeedMoreDataError, UnknownVersionError, \ |
---|
47 | - BadShareError |
---|
48 | + BadShareError, check_is_verinfo |
---|
49 | from allmydata.interfaces import HASH_SIZE, SALT_SIZE, SDMF_VERSION, \ |
---|
50 | MDMF_VERSION, IMutableSlotWriter |
---|
51 | from allmydata.util import mathutil |
---|
52 | +from allmydata.util.assertutil import precondition |
---|
53 | from twisted.python import failure |
---|
54 | from twisted.internet import defer |
---|
55 | from zope.interface import implements |
---|
56 | @@ -432,13 +433,12 @@ |
---|
57 | |
---|
58 | def get_verinfo(self): |
---|
59 | """ |
---|
60 | - I return my verinfo tuple. This is used by the ServermapUpdater |
---|
61 | - to keep track of versions of mutable files. |
---|
62 | + I return a Deferred that eventually fires with my verinfo tuple. |
---|
63 | |
---|
64 | - The verinfo tuple for MDMF files contains: |
---|
65 | + The verinfo tuple contains: |
---|
66 | - seqnum |
---|
67 | - root hash |
---|
68 | - - a blank (nothing) |
---|
69 | + - a salt |
---|
70 | - segsize |
---|
71 | - datalen |
---|
72 | - k |
---|
73 | @@ -447,12 +447,9 @@ |
---|
74 | - a tuple of offsets |
---|
75 | |
---|
76 | We include the nonce in MDMF to simplify processing of version |
---|
77 | - information tuples. |
---|
78 | - |
---|
79 | - The verinfo tuple for SDMF files is the same, but contains a |
---|
80 | - 16-byte IV instead of a hash of salts. |
---|
81 | + information tuples. XXX what nonce? |
---|
82 | """ |
---|
83 | - return (self._seqnum, |
---|
84 | + x = (self._seqnum, |
---|
85 | self._share_pieces['root_hash'], |
---|
86 | self._share_pieces['salt'], |
---|
87 | self._segment_size, |
---|
88 | @@ -461,6 +458,8 @@ |
---|
89 | self._total_shares, |
---|
90 | self.get_signable(), |
---|
91 | self._get_offsets_tuple()) |
---|
92 | + check_is_verinfo(x) |
---|
93 | + return defer.succeed(x) |
---|
94 | |
---|
95 | def _get_offsets_dict(self): |
---|
96 | post_offset = HEADER_LENGTH |
---|
97 | @@ -604,10 +603,10 @@ |
---|
98 | # which prempetively read a big part of the share -- possible. |
---|
99 | # |
---|
100 | # The checkstring is the first three fields -- the version number, |
---|
101 | - # sequence number, root hash and root salt hash. This is consistent |
---|
102 | + # sequence number, root hash and root salt hash. This is consistent # XXX first *four* fields? And should root hash salt be inserted into the table of fields layout above? And should it be called "root hash of salts" or something instead of "root salt hash"? |
---|
103 | # in meaning to what we have with SDMF files, except now instead of |
---|
104 | # using the literal salt, we use a value derived from all of the |
---|
105 | - # salts -- the share hash root. |
---|
106 | + # salts -- the share hash root. # XXX the root hash of salts? |
---|
107 | # |
---|
108 | # The salt is stored before the block for each segment. The block |
---|
109 | # hash tree is computed over the combination of block and salt for |
---|
110 | @@ -1100,14 +1099,34 @@ |
---|
111 | |
---|
112 | |
---|
113 | def get_verinfo(self): |
---|
114 | - return (self._seqnum, |
---|
115 | + """ |
---|
116 | + I return a Deferred that eventually fires with my verinfo tuple. |
---|
117 | + |
---|
118 | + The verinfo tuple contains: |
---|
119 | + - seqnum |
---|
120 | + - root hash |
---|
121 | + - None |
---|
122 | + - segsize |
---|
123 | + - datalen |
---|
124 | + - k |
---|
125 | + - n |
---|
126 | + - prefix (the thing that you sign) |
---|
127 | + - a tuple of offsets |
---|
128 | + |
---|
129 | + We include the nonce in MDMF to simplify processing of version |
---|
130 | + information tuples. XXX what nonce? |
---|
131 | + """ |
---|
132 | + x = (self._seqnum, |
---|
133 | self._root_hash, |
---|
134 | - self._required_shares, |
---|
135 | - self._total_shares, |
---|
136 | + None, |
---|
137 | self._segment_size, |
---|
138 | self._data_length, |
---|
139 | + self._required_shares, |
---|
140 | + self._total_shares, |
---|
141 | self.get_signable(), |
---|
142 | self._get_offsets_tuple()) |
---|
143 | + check_is_verinfo(x) |
---|
144 | + return defer.succeed(x) |
---|
145 | |
---|
146 | |
---|
147 | def finish_publishing(self): |
---|
148 | @@ -1177,7 +1196,7 @@ |
---|
149 | f.trap(struct.error) |
---|
150 | raise BadShareError(f.value.args[0]) |
---|
151 | |
---|
152 | -class MDMFSlotReadProxy: |
---|
153 | +class MutableReadProxy: |
---|
154 | """ |
---|
155 | I read from a mutable slot filled with data written in the MDMF data |
---|
156 | format (which is described above). |
---|
157 | @@ -1242,7 +1261,7 @@ |
---|
158 | # MDMF, though we'll be left with 4 more bytes than we |
---|
159 | # need if this ends up being MDMF. This is probably less |
---|
160 | # expensive than the cost of a second roundtrip. |
---|
161 | - readvs = [(0, 123)] |
---|
162 | + readvs = [(0, 123)] # XXX 123 != 107 |
---|
163 | d = self._read(readvs, force_remote) |
---|
164 | d.addCallback(self._process_encoding_parameters) |
---|
165 | d.addCallback(self._process_offsets) |
---|
166 | @@ -1686,18 +1705,17 @@ |
---|
167 | # The offsets tuple is another component of the version |
---|
168 | # information tuple. It is basically our offsets dictionary, |
---|
169 | # itemized and in a tuple. |
---|
170 | - return self._offsets.copy() |
---|
171 | + return tuple([(key, value) for key, value in self._offsets.items()]) |
---|
172 | |
---|
173 | |
---|
174 | def get_verinfo(self): |
---|
175 | """ |
---|
176 | - I return my verinfo tuple. This is used by the ServermapUpdater |
---|
177 | - to keep track of versions of mutable files. |
---|
178 | + I return a Deferred that eventually fires with my verinfo tuple. |
---|
179 | |
---|
180 | The verinfo tuple for MDMF files contains: |
---|
181 | - seqnum |
---|
182 | - root hash |
---|
183 | - - a blank (nothing) |
---|
184 | + - salt (SDMF) or None (MDMF) |
---|
185 | - segsize |
---|
186 | - datalen |
---|
187 | - k |
---|
188 | @@ -1713,11 +1731,12 @@ |
---|
189 | """ |
---|
190 | d = self._maybe_fetch_offsets_and_header() |
---|
191 | def _build_verinfo(ignored): |
---|
192 | + precondition(self._version_number in (SDMF_VERSION, MDMF_VERSION), self._version_number) |
---|
193 | if self._version_number == SDMF_VERSION: |
---|
194 | salt_to_use = self._salt |
---|
195 | else: |
---|
196 | salt_to_use = None |
---|
197 | - return (self._sequence_number, |
---|
198 | + x = (self._sequence_number, |
---|
199 | self._root_hash, |
---|
200 | salt_to_use, |
---|
201 | self._segment_size, |
---|
202 | @@ -1726,6 +1745,8 @@ |
---|
203 | self._total_shares, |
---|
204 | self._build_prefix(), |
---|
205 | self._get_offsets_tuple()) |
---|
206 | + check_is_verinfo(x) |
---|
207 | + return x |
---|
208 | d.addCallback(_build_verinfo) |
---|
209 | return d |
---|
210 | |
---|
211 | @@ -1747,15 +1768,6 @@ |
---|
212 | readvs) |
---|
213 | |
---|
214 | |
---|
215 | - def is_sdmf(self): |
---|
216 | - """I tell my caller whether or not my remote file is SDMF or MDMF |
---|
217 | - """ |
---|
218 | - d = self._maybe_fetch_offsets_and_header() |
---|
219 | - d.addCallback(lambda ignored: |
---|
220 | - self._version_number == 0) |
---|
221 | - return d |
---|
222 | - |
---|
223 | - |
---|
224 | class LayoutInvalid(BadShareError): |
---|
225 | """ |
---|
226 | This isn't a valid MDMF mutable file |
---|
227 | diff -rN -u old-1.9.2/src/allmydata/mutable/publish.py new-1.9.2/src/allmydata/mutable/publish.py |
---|
228 | --- old-1.9.2/src/allmydata/mutable/publish.py 2012-06-21 00:35:33.539197890 -0300 |
---|
229 | +++ new-1.9.2/src/allmydata/mutable/publish.py 2012-06-21 00:35:33.709197226 -0300 |
---|
230 | @@ -16,7 +16,7 @@ |
---|
231 | from foolscap.api import eventually, fireEventually |
---|
232 | |
---|
233 | from allmydata.mutable.common import MODE_WRITE, MODE_CHECK, MODE_REPAIR, \ |
---|
234 | - UncoordinatedWriteError, NotEnoughServersError |
---|
235 | + UncoordinatedWriteError, NotEnoughServersError, check_is_verinfo |
---|
236 | from allmydata.mutable.servermap import ServerMap |
---|
237 | from allmydata.mutable.layout import get_version_from_checkstring,\ |
---|
238 | unpack_mdmf_checkstring, \ |
---|
239 | @@ -246,7 +246,7 @@ |
---|
240 | self.bad_share_checkstrings = {} |
---|
241 | |
---|
242 | # This is set at the last step of the publishing process. |
---|
243 | - self.versioninfo = "" |
---|
244 | + self.versioninfo = None |
---|
245 | |
---|
246 | # we use the servermap to populate the initial goal: this way we will |
---|
247 | # try to update each existing share in place. Since we're |
---|
248 | @@ -439,7 +439,7 @@ |
---|
249 | self.bad_share_checkstrings = {} |
---|
250 | |
---|
251 | # This is set at the last step of the publishing process. |
---|
252 | - self.versioninfo = "" |
---|
253 | + self.versioninfo = None |
---|
254 | |
---|
255 | # we use the servermap to populate the initial goal: this way we will |
---|
256 | # try to update each existing share in place. |
---|
257 | @@ -881,13 +881,21 @@ |
---|
258 | d.addErrback(self._connection_problem, writer) |
---|
259 | d.addCallback(self._got_write_answer, writer, started) |
---|
260 | ds.append(d) |
---|
261 | - self._record_verinfo() |
---|
262 | - self._status.timings['pack'] = time.time() - started |
---|
263 | - return defer.DeferredList(ds) |
---|
264 | - |
---|
265 | - |
---|
266 | - def _record_verinfo(self): |
---|
267 | - self.versioninfo = self._get_some_writer().get_verinfo() |
---|
268 | + alld = defer.DeferredList(ds) |
---|
269 | + alld.addCallback(self._record_verinfo) |
---|
270 | + def _record_time(o): |
---|
271 | + self._status.timings['pack'] = time.time() - started |
---|
272 | + return o |
---|
273 | + alld.addCallback(_record_time) |
---|
274 | + return alld |
---|
275 | + |
---|
276 | + def _record_verinfo(self, ignored): |
---|
277 | + d = self._get_some_writer().get_verinfo() |
---|
278 | + def _record(versioninfo): |
---|
279 | + check_is_verinfo(versioninfo) |
---|
280 | + self.versioninfo = versioninfo |
---|
281 | + d.addCallback(_record) |
---|
282 | + return d |
---|
283 | |
---|
284 | |
---|
285 | def _connection_problem(self, f, writer): |
---|
286 | @@ -1146,7 +1154,7 @@ |
---|
287 | # self.versioninfo is set during the last phase of publishing. |
---|
288 | # If we get there, we know that responses correspond to placed |
---|
289 | # shares, and can safely execute these statements. |
---|
290 | - if self.versioninfo: |
---|
291 | + if self.versioninfo is not None: |
---|
292 | self.log("wrote successfully: adding new share to servermap") |
---|
293 | self._servermap.add_new_share(server, writer.shnum, |
---|
294 | self.versioninfo, started) |
---|
295 | diff -rN -u old-1.9.2/src/allmydata/mutable/retrieve.py new-1.9.2/src/allmydata/mutable/retrieve.py |
---|
296 | --- old-1.9.2/src/allmydata/mutable/retrieve.py 2012-06-21 00:35:33.542531210 -0300 |
---|
297 | +++ new-1.9.2/src/allmydata/mutable/retrieve.py 2012-06-21 00:35:33.709197226 -0300 |
---|
298 | @@ -18,7 +18,7 @@ |
---|
299 | |
---|
300 | from allmydata.mutable.common import CorruptShareError, BadShareError, \ |
---|
301 | UncoordinatedWriteError |
---|
302 | -from allmydata.mutable.layout import MDMFSlotReadProxy |
---|
303 | +from allmydata.mutable.layout import MutableReadProxy |
---|
304 | |
---|
305 | class RetrieveStatus: |
---|
306 | implements(IRetrieveStatus) |
---|
307 | @@ -145,7 +145,7 @@ |
---|
308 | self._status.set_helper(False) |
---|
309 | self._status.set_progress(0.0) |
---|
310 | self._status.set_active(True) |
---|
311 | - (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, |
---|
312 | + (seqnum, root_hash, salt_or_none, segsize, datalength, k, N, prefix, |
---|
313 | offsets_tuple) = self.verinfo |
---|
314 | self._status.set_size(datalength) |
---|
315 | self._status.set_encoding(k, N) |
---|
316 | @@ -271,7 +271,7 @@ |
---|
317 | # how many shares do we need? |
---|
318 | (seqnum, |
---|
319 | root_hash, |
---|
320 | - IV, |
---|
321 | + salt_or_none, |
---|
322 | segsize, |
---|
323 | datalength, |
---|
324 | k, |
---|
325 | @@ -292,7 +292,7 @@ |
---|
326 | # data that they have, then change this method to do that. |
---|
327 | any_cache = self._node._read_from_cache(self.verinfo, shnum, |
---|
328 | 0, 1000) |
---|
329 | - reader = MDMFSlotReadProxy(server.get_rref(), |
---|
330 | + reader = MutableReadProxy(server.get_rref(), |
---|
331 | self._storage_index, |
---|
332 | shnum, |
---|
333 | any_cache) |
---|
334 | @@ -339,7 +339,7 @@ |
---|
335 | """ |
---|
336 | (seqnum, |
---|
337 | root_hash, |
---|
338 | - IV, |
---|
339 | + salt_or_none, |
---|
340 | segsize, |
---|
341 | datalength, |
---|
342 | k, |
---|
343 | @@ -351,7 +351,7 @@ |
---|
344 | self._segment_size = segsize |
---|
345 | self._data_length = datalength |
---|
346 | |
---|
347 | - if not IV: |
---|
348 | + if salt_or_none is None: |
---|
349 | self._version = MDMF_VERSION |
---|
350 | else: |
---|
351 | self._version = SDMF_VERSION |
---|
352 | @@ -516,7 +516,7 @@ |
---|
353 | """ |
---|
354 | (seqnum, |
---|
355 | root_hash, |
---|
356 | - IV, |
---|
357 | + salt_or_none, |
---|
358 | segsize, |
---|
359 | datalength, |
---|
360 | k, |
---|
361 | @@ -862,7 +862,7 @@ |
---|
362 | blocks_and_salts.update(d) |
---|
363 | |
---|
364 | # All of these blocks should have the same salt; in SDMF, it is |
---|
365 | - # the file-wide IV, while in MDMF it is the per-segment salt. In |
---|
366 | + # the file-wide salt, while in MDMF it is the per-segment salt. In |
---|
367 | # either case, we just need to get one of them and use it. |
---|
368 | # |
---|
369 | # d.items()[0] is like (shnum, (block, salt)) |
---|
370 | @@ -975,7 +975,7 @@ |
---|
371 | self._status.set_progress(1.0) |
---|
372 | |
---|
373 | # remember the encoding parameters, use them again next time |
---|
374 | - (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, |
---|
375 | + (seqnum, root_hash, salt_or_none, segsize, datalength, k, N, prefix, |
---|
376 | offsets_tuple) = self.verinfo |
---|
377 | self._node._populate_required_shares(k) |
---|
378 | self._node._populate_total_shares(N) |
---|
379 | diff -rN -u old-1.9.2/src/allmydata/mutable/servermap.py new-1.9.2/src/allmydata/mutable/servermap.py |
---|
380 | --- old-1.9.2/src/allmydata/mutable/servermap.py 2012-06-21 00:35:33.542531210 -0300 |
---|
381 | +++ new-1.9.2/src/allmydata/mutable/servermap.py 2012-06-21 00:35:33.709197226 -0300 |
---|
382 | @@ -13,8 +13,8 @@ |
---|
383 | from pycryptopp.publickey import rsa |
---|
384 | |
---|
385 | from allmydata.mutable.common import MODE_CHECK, MODE_ANYTHING, MODE_WRITE, \ |
---|
386 | - MODE_READ, MODE_REPAIR, CorruptShareError |
---|
387 | -from allmydata.mutable.layout import SIGNED_PREFIX_LENGTH, MDMFSlotReadProxy |
---|
388 | + MODE_READ, MODE_REPAIR, CorruptShareError, check_is_verinfo |
---|
389 | +from allmydata.mutable.layout import SIGNED_PREFIX_LENGTH, MutableReadProxy |
---|
390 | |
---|
391 | class UpdateStatus: |
---|
392 | implements(IServermapUpdaterStatus) |
---|
393 | @@ -121,7 +121,7 @@ |
---|
394 | self._last_update_time = 0 |
---|
395 | self.update_data = {} # shnum -> [(verinfo,(blockhashes,start,end)),..] |
---|
396 | # where blockhashes is a list of bytestrings (the result of |
---|
397 | - # layout.MDMFSlotReadProxy.get_blockhashes), and start/end are both |
---|
398 | + # layout.MutableReadProxy.get_blockhashes), and start/end are both |
---|
399 | # (block,salt) tuple-of-bytestrings from get_block_and_salt() |
---|
400 | |
---|
401 | def copy(self): |
---|
402 | @@ -640,9 +640,8 @@ |
---|
403 | to occur when the file is downloaded, or when the file is |
---|
404 | updated. |
---|
405 | """ |
---|
406 | - if verinfo: |
---|
407 | - self._node._add_to_cache(verinfo, shnum, 0, data) |
---|
408 | - |
---|
409 | + check_is_verinfo(verinfo) |
---|
410 | + self._node._add_to_cache(verinfo, shnum, 0, data) |
---|
411 | |
---|
412 | def _got_results(self, datavs, server, readsize, storage_index, started): |
---|
413 | lp = self.log(format="got result from [%(name)s], %(numshares)d shares", |
---|
414 | @@ -668,11 +667,11 @@ |
---|
415 | else: |
---|
416 | self._empty_servers.add(server) |
---|
417 | |
---|
418 | - ds = [] |
---|
419 | - |
---|
420 | + # a list of deferreds, one for each share that we found on this server. |
---|
421 | + sds = [] |
---|
422 | for shnum,datav in datavs.items(): |
---|
423 | data = datav[0] |
---|
424 | - reader = MDMFSlotReadProxy(ss, |
---|
425 | + reader = MutableReadProxy(ss, |
---|
426 | storage_index, |
---|
427 | shnum, |
---|
428 | data) |
---|
429 | @@ -698,13 +697,6 @@ |
---|
430 | # consequence, so the first entry in our deferredlist will |
---|
431 | # be None. |
---|
432 | |
---|
433 | - # - Next, we need the version information. We almost |
---|
434 | - # certainly got this by reading the first thousand or so |
---|
435 | - # bytes of the share on the storage server, so we |
---|
436 | - # shouldn't need to fetch anything at this step. |
---|
437 | - d2 = reader.get_verinfo() |
---|
438 | - d2.addErrback(lambda error, shnum=shnum, data=data: |
---|
439 | - self._got_corrupt_share(error, shnum, server, data, lp)) |
---|
440 | # - Next, we need the signature. For an SDMF share, it is |
---|
441 | # likely that we fetched this when doing our initial fetch |
---|
442 | # to get the version information. In MDMF, this lives at |
---|
443 | @@ -727,39 +719,45 @@ |
---|
444 | else: |
---|
445 | d4 = defer.succeed(None) |
---|
446 | |
---|
447 | + # - Next, we need the version information. We almost |
---|
448 | + # certainly got this by reading the first thousand or so |
---|
449 | + # bytes of the share on the storage server, so we |
---|
450 | + # shouldn't need to fetch anything at this step. |
---|
451 | + d2 = reader.get_verinfo() |
---|
452 | + d2.addErrback(lambda error, shnum=shnum, data=data: |
---|
453 | + self._got_corrupt_share(error, shnum, server, data, lp)) |
---|
454 | |
---|
455 | if self.fetch_update_data: |
---|
456 | # fetch the block hash tree and first + last segment, as |
---|
457 | # configured earlier. |
---|
458 | # Then set them in wherever we happen to want to set |
---|
459 | # them. |
---|
460 | - ds = [] |
---|
461 | - # XXX: We do this above, too. Is there a good way to |
---|
462 | - # make the two routines share the value without |
---|
463 | - # introducing more roundtrips? |
---|
464 | - ds.append(reader.get_verinfo()) |
---|
465 | - ds.append(reader.get_blockhashes()) |
---|
466 | - ds.append(reader.get_block_and_salt(self.start_segment)) |
---|
467 | - ds.append(reader.get_block_and_salt(self.end_segment)) |
---|
468 | - d5 = deferredutil.gatherResults(ds) |
---|
469 | + # a list of deferreds, one for each of four parts of this share that we want to process |
---|
470 | + pds = [] |
---|
471 | + pds.append(d2) |
---|
472 | + pds.append(reader.get_blockhashes()) |
---|
473 | + pds.append(reader.get_block_and_salt(self.start_segment)) |
---|
474 | + pds.append(reader.get_block_and_salt(self.end_segment)) |
---|
475 | + d5 = deferredutil.gatherResults(pds) |
---|
476 | d5.addCallback(self._got_update_results_one_share, shnum) |
---|
477 | else: |
---|
478 | d5 = defer.succeed(None) |
---|
479 | |
---|
480 | - dl = defer.DeferredList([d, d2, d3, d4, d5]) |
---|
481 | - dl.addBoth(self._turn_barrier) |
---|
482 | - dl.addCallback(lambda results, shnum=shnum: |
---|
483 | + pdl = defer.DeferredList([d, d2, d3, d4, d5]) |
---|
484 | + pdl.addBoth(self._turn_barrier) |
---|
485 | + pdl.addCallback(lambda results, shnum=shnum: |
---|
486 | self._got_signature_one_share(results, shnum, server, lp)) |
---|
487 | - dl.addErrback(lambda error, shnum=shnum, data=data: |
---|
488 | - self._got_corrupt_share(error, shnum, server, data, lp)) |
---|
489 | - dl.addCallback(lambda verinfo, shnum=shnum, data=data: |
---|
490 | + pdl.addCallback(lambda verinfo, shnum=shnum, data=data: |
---|
491 | self._cache_good_sharedata(verinfo, shnum, now, data)) |
---|
492 | - ds.append(dl) |
---|
493 | - # dl is a deferred list that will fire when all of the shares |
---|
494 | - # that we found on this server are done processing. When dl fires, |
---|
495 | + pdl.addErrback(lambda error, shnum=shnum, data=data: |
---|
496 | + self._got_corrupt_share(error, shnum, server, data, lp)) |
---|
497 | + sds.append(pdl) |
---|
498 | + |
---|
499 | + # sdl is a deferred list that will fire when all of the shares |
---|
500 | + # that we found on this server are done processing. When sdl fires, |
---|
501 | # we know that processing is done, so we can decrement the |
---|
502 | # semaphore-like thing that we incremented earlier. |
---|
503 | - dl = defer.DeferredList(ds, fireOnOneErrback=True) |
---|
504 | + sdl = defer.DeferredList(sds, fireOnOneErrback=True) |
---|
505 | # Are we done? Done means that there are no more queries to |
---|
506 | # send, that there are no outstanding queries, and that we |
---|
507 | # haven't received any queries that are still processing. If we |
---|
508 | @@ -767,12 +765,12 @@ |
---|
509 | # that we returned to our caller to fire, which tells them that |
---|
510 | # they have a complete servermap, and that we won't be touching |
---|
511 | # the servermap anymore. |
---|
512 | - dl.addCallback(_done_processing) |
---|
513 | - dl.addCallback(self._check_for_done) |
---|
514 | - dl.addErrback(self._fatal_error) |
---|
515 | + sdl.addCallback(_done_processing) |
---|
516 | + sdl.addCallback(self._check_for_done) |
---|
517 | + sdl.addErrback(self._fatal_error) |
---|
518 | # all done! |
---|
519 | self.log("_got_results done", parent=lp, level=log.NOISY) |
---|
520 | - return dl |
---|
521 | + return sdl |
---|
522 | |
---|
523 | |
---|
524 | def _turn_barrier(self, result): |
---|
525 | @@ -816,7 +814,8 @@ |
---|
526 | self.log("but we're not running anymore.") |
---|
527 | return None |
---|
528 | |
---|
529 | - _, verinfo, signature, __, ___ = results |
---|
530 | + _, (__, verinfo), signature, ___, ____ = results |
---|
531 | + check_is_verinfo(verinfo) |
---|
532 | (seqnum, |
---|
533 | root_hash, |
---|
534 | saltish, |
---|
535 | @@ -825,8 +824,7 @@ |
---|
536 | k, |
---|
537 | n, |
---|
538 | prefix, |
---|
539 | - offsets) = verinfo[1] |
---|
540 | - offsets_tuple = tuple( [(key,value) for key,value in offsets.items()] ) |
---|
541 | + offsets) = verinfo |
---|
542 | |
---|
543 | # XXX: This should be done for us in the method, so |
---|
544 | # presumably you can go in there and fix it. |
---|
545 | @@ -838,7 +836,8 @@ |
---|
546 | k, |
---|
547 | n, |
---|
548 | prefix, |
---|
549 | - offsets_tuple) |
---|
550 | + offsets) |
---|
551 | + check_is_verinfo(verinfo) |
---|
552 | # This tuple uniquely identifies a share on the grid; we use it |
---|
553 | # to keep track of the ones that we've already seen. |
---|
554 | |
---|
555 | @@ -884,30 +883,9 @@ |
---|
556 | """ |
---|
557 | I record the update results in results. |
---|
558 | """ |
---|
559 | - assert len(results) == 4 |
---|
560 | + assert len(results) == 4, len(results) |
---|
561 | verinfo, blockhashes, start, end = results |
---|
562 | - (seqnum, |
---|
563 | - root_hash, |
---|
564 | - saltish, |
---|
565 | - segsize, |
---|
566 | - datalen, |
---|
567 | - k, |
---|
568 | - n, |
---|
569 | - prefix, |
---|
570 | - offsets) = verinfo |
---|
571 | - offsets_tuple = tuple( [(key,value) for key,value in offsets.items()] ) |
---|
572 | - |
---|
573 | - # XXX: This should be done for us in the method, so |
---|
574 | - # presumably you can go in there and fix it. |
---|
575 | - verinfo = (seqnum, |
---|
576 | - root_hash, |
---|
577 | - saltish, |
---|
578 | - segsize, |
---|
579 | - datalen, |
---|
580 | - k, |
---|
581 | - n, |
---|
582 | - prefix, |
---|
583 | - offsets_tuple) |
---|
584 | + check_is_verinfo(verinfo) |
---|
585 | |
---|
586 | update_data = (blockhashes, start, end) |
---|
587 | self._servermap.set_update_data_for_share_and_verinfo(share, |
---|
588 | diff -rN -u old-1.9.2/src/allmydata/scripts/debug.py new-1.9.2/src/allmydata/scripts/debug.py |
---|
589 | --- old-1.9.2/src/allmydata/scripts/debug.py 2012-06-21 00:35:33.549197850 -0300 |
---|
590 | +++ new-1.9.2/src/allmydata/scripts/debug.py 2012-06-21 00:35:33.712530547 -0300 |
---|
591 | @@ -306,7 +306,7 @@ |
---|
592 | print >>out |
---|
593 | |
---|
594 | def dump_MDMF_share(m, length, options): |
---|
595 | - from allmydata.mutable.layout import MDMFSlotReadProxy |
---|
596 | + from allmydata.mutable.layout import MutableReadProxy |
---|
597 | from allmydata.util import base32, hashutil |
---|
598 | from allmydata.uri import MDMFVerifierURI |
---|
599 | from allmydata.util.encodingutil import quote_output, to_str |
---|
600 | @@ -317,7 +317,7 @@ |
---|
601 | f = open(options['filename'], "rb") |
---|
602 | storage_index = None; shnum = 0 |
---|
603 | |
---|
604 | - class ShareDumper(MDMFSlotReadProxy): |
---|
605 | + class ShareDumper(MutableReadProxy): |
---|
606 | def _read(self, readvs, force_remote=False, queue=False): |
---|
607 | data = [] |
---|
608 | for (where,length) in readvs: |
---|
609 | @@ -755,10 +755,10 @@ |
---|
610 | seqnum, base32.b2a(root_hash), |
---|
611 | expiration, quote_output(abs_sharefile)) |
---|
612 | elif share_type == "MDMF": |
---|
613 | - from allmydata.mutable.layout import MDMFSlotReadProxy |
---|
614 | + from allmydata.mutable.layout import MutableReadProxy |
---|
615 | fake_shnum = 0 |
---|
616 | # TODO: factor this out with dump_MDMF_share() |
---|
617 | - class ShareDumper(MDMFSlotReadProxy): |
---|
618 | + class ShareDumper(MutableReadProxy): |
---|
619 | def _read(self, readvs, force_remote=False, queue=False): |
---|
620 | data = [] |
---|
621 | for (where,length) in readvs: |
---|
622 | diff -rN -u old-1.9.2/src/allmydata/test/test_checker.py new-1.9.2/src/allmydata/test/test_checker.py |
---|
623 | --- old-1.9.2/src/allmydata/test/test_checker.py 2012-06-21 00:35:33.562531132 -0300 |
---|
624 | +++ new-1.9.2/src/allmydata/test/test_checker.py 2012-06-21 00:35:33.715863868 -0300 |
---|
625 | @@ -405,7 +405,7 @@ |
---|
626 | d.addCallback(_stash_mutable) |
---|
627 | |
---|
628 | def _check_cr(cr, which): |
---|
629 | - self.failUnless(cr.is_healthy(), which) |
---|
630 | + self.failUnless(cr.is_healthy(), (cr, which)) |
---|
631 | |
---|
632 | # these two should work normally |
---|
633 | d.addCallback(lambda ign: self.imm.check(Monitor(), add_lease=True)) |
---|
634 | diff -rN -u old-1.9.2/src/allmydata/test/test_mutable.py new-1.9.2/src/allmydata/test/test_mutable.py |
---|
635 | --- old-1.9.2/src/allmydata/test/test_mutable.py 2012-06-21 00:35:33.582531053 -0300 |
---|
636 | +++ new-1.9.2/src/allmydata/test/test_mutable.py 2012-06-21 00:35:33.715863868 -0300 |
---|
637 | @@ -30,7 +30,7 @@ |
---|
638 | MutableData, \ |
---|
639 | DEFAULT_MAX_SEGMENT_SIZE |
---|
640 | from allmydata.mutable.servermap import ServerMap, ServermapUpdater |
---|
641 | -from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy |
---|
642 | +from allmydata.mutable.layout import unpack_header, MutableReadProxy |
---|
643 | from allmydata.mutable.repairer import MustForceRepairError |
---|
644 | |
---|
645 | import allmydata.test.common_util as testutil |
---|
646 | @@ -192,7 +192,7 @@ |
---|
647 | # won't need to use the rref that we didn't provide, nor the |
---|
648 | # storage index that we didn't provide. We do this because |
---|
649 | # the reader will work for both MDMF and SDMF. |
---|
650 | - reader = MDMFSlotReadProxy(None, None, shnum, data) |
---|
651 | + reader = MutableReadProxy(None, None, shnum, data) |
---|
652 | # We need to get the offsets for the next part. |
---|
653 | d = reader.get_verinfo() |
---|
654 | def _do_corruption(verinfo, data, shnum, shares): |
---|
655 | @@ -202,6 +202,7 @@ |
---|
656 | segsize, |
---|
657 | datalen, |
---|
658 | k, n, prefix, o) = verinfo |
---|
659 | + o = dict(o) |
---|
660 | if isinstance(offset, tuple): |
---|
661 | offset1, offset2 = offset |
---|
662 | else: |
---|
663 | @@ -1936,7 +1937,7 @@ |
---|
664 | initial_shares = self.old_shares[0] |
---|
665 | new_shares = self.old_shares[1] |
---|
666 | # TODO: this really shouldn't change anything. When we implement |
---|
667 | - # a "minimal-bandwidth" repairer", change this test to assert: |
---|
668 | + # a "minimal-bandwidth" repairer, change this test to assert: |
---|
669 | #self.failUnlessEqual(new_shares, initial_shares) |
---|
670 | |
---|
671 | # all shares should be in the same place as before |
---|
672 | @@ -2033,13 +2034,13 @@ |
---|
673 | |
---|
674 | def test_repairable_5shares(self): |
---|
675 | d = self.publish_mdmf() |
---|
676 | - def _delete_all_shares(ign): |
---|
677 | + def _delete_shares_after_first_5(ign): |
---|
678 | shares = self._storage._peers |
---|
679 | for peerid in shares: |
---|
680 | for shnum in list(shares[peerid]): |
---|
681 | if shnum > 4: |
---|
682 | del shares[peerid][shnum] |
---|
683 | - d.addCallback(_delete_all_shares) |
---|
684 | + d.addCallback(_delete_shares_after_first_5) |
---|
685 | d.addCallback(lambda ign: self._fn.check(Monitor())) |
---|
686 | d.addCallback(lambda check_results: self._fn.repair(check_results)) |
---|
687 | def _check(crr): |
---|
688 | diff -rN -u old-1.9.2/src/allmydata/test/test_storage.py new-1.9.2/src/allmydata/test/test_storage.py |
---|
689 | --- old-1.9.2/src/allmydata/test/test_storage.py 2012-06-21 00:35:33.589197694 -0300 |
---|
690 | +++ new-1.9.2/src/allmydata/test/test_storage.py 2012-06-21 00:35:33.719197188 -0300 |
---|
691 | @@ -20,7 +20,7 @@ |
---|
692 | from allmydata.storage.expirer import LeaseCheckingCrawler |
---|
693 | from allmydata.immutable.layout import WriteBucketProxy, WriteBucketProxy_v2, \ |
---|
694 | ReadBucketProxy |
---|
695 | -from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \ |
---|
696 | +from allmydata.mutable.layout import MDMFSlotWriteProxy, MutableReadProxy, \ |
---|
697 | LayoutInvalid, MDMFSIGNABLEHEADER, \ |
---|
698 | SIGNED_PREFIX, MDMFHEADER, \ |
---|
699 | MDMFOFFSETS, SDMFSlotWriteProxy, \ |
---|
700 | @@ -1352,11 +1352,11 @@ |
---|
701 | self.failIf(os.path.exists(bucketdir), bucketdir) |
---|
702 | |
---|
703 | |
---|
704 | -class MDMFProxies(unittest.TestCase, ShouldFailMixin): |
---|
705 | +class MutableProxies(unittest.TestCase, ShouldFailMixin): |
---|
706 | def setUp(self): |
---|
707 | self.sparent = LoggingServiceParent() |
---|
708 | self._lease_secret = itertools.count() |
---|
709 | - self.ss = self.create("MDMFProxies storage test server") |
---|
710 | + self.ss = self.create("MutableProxies storage test server") |
---|
711 | self.rref = RemoteBucket() |
---|
712 | self.rref.target = self.ss |
---|
713 | self.secrets = (self.write_enabler("we_secret"), |
---|
714 | @@ -1385,7 +1385,7 @@ |
---|
715 | |
---|
716 | def tearDown(self): |
---|
717 | self.sparent.stopService() |
---|
718 | - shutil.rmtree(self.workdir("MDMFProxies storage test server")) |
---|
719 | + shutil.rmtree(self.workdir("MutableProxies storage test server")) |
---|
720 | |
---|
721 | |
---|
722 | def write_enabler(self, we_tag): |
---|
723 | @@ -1601,7 +1601,7 @@ |
---|
724 | |
---|
725 | def test_read(self): |
---|
726 | self.write_test_share_to_server("si1") |
---|
727 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
728 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
729 | # Check that every method equals what we expect it to. |
---|
730 | d = defer.succeed(None) |
---|
731 | def _check_block_and_salt((block, salt)): |
---|
732 | @@ -1671,7 +1671,7 @@ |
---|
733 | |
---|
734 | def test_read_with_different_tail_segment_size(self): |
---|
735 | self.write_test_share_to_server("si1", tail_segment=True) |
---|
736 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
737 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
738 | d = mr.get_block_and_salt(5) |
---|
739 | def _check_tail_segment(results): |
---|
740 | block, salt = results |
---|
741 | @@ -1683,7 +1683,7 @@ |
---|
742 | |
---|
743 | def test_get_block_with_invalid_segnum(self): |
---|
744 | self.write_test_share_to_server("si1") |
---|
745 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
746 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
747 | d = defer.succeed(None) |
---|
748 | d.addCallback(lambda ignored: |
---|
749 | self.shouldFail(LayoutInvalid, "test invalid segnum", |
---|
750 | @@ -1694,7 +1694,7 @@ |
---|
751 | |
---|
752 | def test_get_encoding_parameters_first(self): |
---|
753 | self.write_test_share_to_server("si1") |
---|
754 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
755 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
756 | d = mr.get_encoding_parameters() |
---|
757 | def _check_encoding_parameters((k, n, segment_size, datalen)): |
---|
758 | self.failUnlessEqual(k, 3) |
---|
759 | @@ -1707,7 +1707,7 @@ |
---|
760 | |
---|
761 | def test_get_seqnum_first(self): |
---|
762 | self.write_test_share_to_server("si1") |
---|
763 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
764 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
765 | d = mr.get_seqnum() |
---|
766 | d.addCallback(lambda seqnum: |
---|
767 | self.failUnlessEqual(seqnum, 0)) |
---|
768 | @@ -1716,7 +1716,7 @@ |
---|
769 | |
---|
770 | def test_get_root_hash_first(self): |
---|
771 | self.write_test_share_to_server("si1") |
---|
772 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
773 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
774 | d = mr.get_root_hash() |
---|
775 | d.addCallback(lambda root_hash: |
---|
776 | self.failUnlessEqual(root_hash, self.root_hash)) |
---|
777 | @@ -1725,7 +1725,7 @@ |
---|
778 | |
---|
779 | def test_get_checkstring_first(self): |
---|
780 | self.write_test_share_to_server("si1") |
---|
781 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
782 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
783 | d = mr.get_checkstring() |
---|
784 | d.addCallback(lambda checkstring: |
---|
785 | self.failUnlessEqual(checkstring, self.checkstring)) |
---|
786 | @@ -2243,7 +2243,7 @@ |
---|
787 | d.addCallback(lambda ignored: |
---|
788 | mw.finish_publishing()) |
---|
789 | |
---|
790 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
791 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
792 | def _check_block_and_salt((block, salt)): |
---|
793 | self.failUnlessEqual(block, self.block) |
---|
794 | self.failUnlessEqual(salt, self.salt) |
---|
795 | @@ -2304,28 +2304,12 @@ |
---|
796 | return d |
---|
797 | |
---|
798 | |
---|
799 | - def test_is_sdmf(self): |
---|
800 | - # The MDMFSlotReadProxy should also know how to read SDMF files, |
---|
801 | - # since it will encounter them on the grid. Callers use the |
---|
802 | - # is_sdmf method to test this. |
---|
803 | - self.write_sdmf_share_to_server("si1") |
---|
804 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
805 | - d = mr.is_sdmf() |
---|
806 | - d.addCallback(lambda issdmf: |
---|
807 | - self.failUnless(issdmf)) |
---|
808 | - return d |
---|
809 | - |
---|
810 | - |
---|
811 | def test_reads_sdmf(self): |
---|
812 | # The slot read proxy should, naturally, know how to tell us |
---|
813 | # about data in the SDMF format |
---|
814 | self.write_sdmf_share_to_server("si1") |
---|
815 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
816 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
817 | d = defer.succeed(None) |
---|
818 | - d.addCallback(lambda ignored: |
---|
819 | - mr.is_sdmf()) |
---|
820 | - d.addCallback(lambda issdmf: |
---|
821 | - self.failUnless(issdmf)) |
---|
822 | |
---|
823 | # What do we need to read? |
---|
824 | # - The sharedata |
---|
825 | @@ -2391,13 +2375,9 @@ |
---|
826 | # read more segments than that. The reader should know this and |
---|
827 | # complain if we try to do that. |
---|
828 | self.write_sdmf_share_to_server("si1") |
---|
829 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
830 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
831 | d = defer.succeed(None) |
---|
832 | d.addCallback(lambda ignored: |
---|
833 | - mr.is_sdmf()) |
---|
834 | - d.addCallback(lambda issdmf: |
---|
835 | - self.failUnless(issdmf)) |
---|
836 | - d.addCallback(lambda ignored: |
---|
837 | self.shouldFail(LayoutInvalid, "test bad segment", |
---|
838 | None, |
---|
839 | mr.get_block_and_salt, 1)) |
---|
840 | @@ -2405,7 +2385,7 @@ |
---|
841 | |
---|
842 | |
---|
843 | def test_read_with_prefetched_mdmf_data(self): |
---|
844 | - # The MDMFSlotReadProxy will prefill certain fields if you pass |
---|
845 | + # The MutableReadProxy will prefill certain fields if you pass |
---|
846 | # it data that you have already fetched. This is useful for |
---|
847 | # cases like the Servermap, which prefetches ~2kb of data while |
---|
848 | # finding out which shares are on the remote peer so that it |
---|
849 | @@ -2413,7 +2393,7 @@ |
---|
850 | mdmf_data = self.build_test_mdmf_share() |
---|
851 | self.write_test_share_to_server("si1") |
---|
852 | def _make_mr(ignored, length): |
---|
853 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0, mdmf_data[:length]) |
---|
854 | + mr = MutableReadProxy(self.rref, "si1", 0, mdmf_data[:length]) |
---|
855 | return mr |
---|
856 | |
---|
857 | d = defer.succeed(None) |
---|
858 | @@ -2473,7 +2453,7 @@ |
---|
859 | sdmf_data = self.build_test_sdmf_share() |
---|
860 | self.write_sdmf_share_to_server("si1") |
---|
861 | def _make_mr(ignored, length): |
---|
862 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0, sdmf_data[:length]) |
---|
863 | + mr = MutableReadProxy(self.rref, "si1", 0, sdmf_data[:length]) |
---|
864 | return mr |
---|
865 | |
---|
866 | d = defer.succeed(None) |
---|
867 | @@ -2538,7 +2518,7 @@ |
---|
868 | # unrelated to the actual handling of the content of the file. |
---|
869 | # The reader should behave intelligently in these cases. |
---|
870 | self.write_test_share_to_server("si1", empty=True) |
---|
871 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
872 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
873 | # We should be able to get the encoding parameters, and they |
---|
874 | # should be correct. |
---|
875 | d = defer.succeed(None) |
---|
876 | @@ -2564,7 +2544,7 @@ |
---|
877 | |
---|
878 | def test_read_with_empty_sdmf_file(self): |
---|
879 | self.write_sdmf_share_to_server("si1", empty=True) |
---|
880 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
881 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
882 | # We should be able to get the encoding parameters, and they |
---|
883 | # should be correct |
---|
884 | d = defer.succeed(None) |
---|
885 | @@ -2590,7 +2570,7 @@ |
---|
886 | |
---|
887 | def test_verinfo_with_sdmf_file(self): |
---|
888 | self.write_sdmf_share_to_server("si1") |
---|
889 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
890 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
891 | # We should be able to get the version information. |
---|
892 | d = defer.succeed(None) |
---|
893 | d.addCallback(lambda ignored: |
---|
894 | @@ -2600,7 +2580,7 @@ |
---|
895 | self.failUnlessEqual(len(verinfo), 9) |
---|
896 | (seqnum, |
---|
897 | root_hash, |
---|
898 | - salt, |
---|
899 | + unused, |
---|
900 | segsize, |
---|
901 | datalen, |
---|
902 | k, |
---|
903 | @@ -2609,7 +2589,6 @@ |
---|
904 | offsets) = verinfo |
---|
905 | self.failUnlessEqual(seqnum, 0) |
---|
906 | self.failUnlessEqual(root_hash, self.root_hash) |
---|
907 | - self.failUnlessEqual(salt, self.salt) |
---|
908 | self.failUnlessEqual(segsize, 36) |
---|
909 | self.failUnlessEqual(datalen, 36) |
---|
910 | self.failUnlessEqual(k, 3) |
---|
911 | @@ -2618,20 +2597,20 @@ |
---|
912 | 0, |
---|
913 | seqnum, |
---|
914 | root_hash, |
---|
915 | - salt, |
---|
916 | + self.salt, |
---|
917 | k, |
---|
918 | n, |
---|
919 | segsize, |
---|
920 | datalen) |
---|
921 | self.failUnlessEqual(prefix, expected_prefix) |
---|
922 | - self.failUnlessEqual(offsets, self.offsets) |
---|
923 | + self.failUnlessEqual(offsets, tuple([(key, value) for key, value in self.offsets.items()])) |
---|
924 | d.addCallback(_check_verinfo) |
---|
925 | return d |
---|
926 | |
---|
927 | |
---|
928 | def test_verinfo_with_mdmf_file(self): |
---|
929 | self.write_test_share_to_server("si1") |
---|
930 | - mr = MDMFSlotReadProxy(self.rref, "si1", 0) |
---|
931 | + mr = MutableReadProxy(self.rref, "si1", 0) |
---|
932 | d = defer.succeed(None) |
---|
933 | d.addCallback(lambda ignored: |
---|
934 | mr.get_verinfo()) |
---|
935 | @@ -2663,7 +2642,7 @@ |
---|
936 | segsize, |
---|
937 | datalen) |
---|
938 | self.failUnlessEqual(prefix, expected_prefix) |
---|
939 | - self.failUnlessEqual(offsets, self.offsets) |
---|
940 | + self.failUnlessEqual(offsets, tuple([(key, value) for key, value in self.offsets.items()])) |
---|
941 | d.addCallback(_check_verinfo) |
---|
942 | return d |
---|
943 | |
---|
944 | diff -rN -u old-1.9.2/src/allmydata/util/hashutil.py new-1.9.2/src/allmydata/util/hashutil.py |
---|
945 | --- old-1.9.2/src/allmydata/util/hashutil.py 2012-06-21 00:35:33.605864296 -0300 |
---|
946 | +++ new-1.9.2/src/allmydata/util/hashutil.py 2012-06-21 00:35:33.722530508 -0300 |
---|
947 | @@ -209,3 +209,7 @@ |
---|
948 | BACKUPDB_DIRHASH_TAG = "allmydata_backupdb_dirhash_v1" |
---|
949 | def backupdb_dirhash(contents): |
---|
950 | return tagged_hash(BACKUPDB_DIRHASH_TAG, contents) |
---|
951 | + |
---|
952 | +def is_hash(x): |
---|
953 | + return isinstance(x, str) and len(x) == CRYPTO_VAL_SIZE |
---|
954 | + |
---|