1 | """ |
---|
2 | Ported to Python 3. |
---|
3 | """ |
---|
4 | |
---|
5 | import struct, time, os, sys |
---|
6 | |
---|
7 | from twisted.python import usage, failure |
---|
8 | from twisted.internet import defer |
---|
9 | from foolscap.logging import cli as foolscap_cli |
---|
10 | |
---|
11 | from allmydata.scripts.common import BaseOptions |
---|
12 | from allmydata import uri |
---|
13 | from allmydata.storage.mutable import MutableShareFile |
---|
14 | from allmydata.storage.immutable import ShareFile |
---|
15 | from allmydata.mutable.layout import unpack_share |
---|
16 | from allmydata.mutable.layout import MDMFSlotReadProxy |
---|
17 | from allmydata.mutable.common import NeedMoreDataError |
---|
18 | from allmydata.immutable.layout import ReadBucketProxy |
---|
19 | from allmydata.util import base32 |
---|
20 | from allmydata.util.encodingutil import quote_output |
---|
21 | from allmydata.scripts.types_ import SubCommands |
---|
22 | |
---|
23 | def bchr(s): |
---|
24 | return bytes([s]) |
---|
25 | |
---|
26 | class DumpOptions(BaseOptions): |
---|
27 | def getSynopsis(self): |
---|
28 | return "Usage: tahoe [global-options] debug dump-share SHARE_FILENAME" |
---|
29 | |
---|
30 | optFlags = [ |
---|
31 | ["offsets", None, "Display a table of section offsets."], |
---|
32 | ["leases-only", None, "Dump leases but not CHK contents."], |
---|
33 | ] |
---|
34 | |
---|
35 | description = """ |
---|
36 | Print lots of information about the given share, by parsing the share's |
---|
37 | contents. This includes share type, lease information, encoding parameters, |
---|
38 | hash-tree roots, public keys, and segment sizes. This command also emits a |
---|
39 | verify-cap for the file that uses the share. |
---|
40 | |
---|
41 | tahoe debug dump-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0 |
---|
42 | """ |
---|
43 | |
---|
44 | def parseArgs(self, filename): |
---|
45 | from allmydata.util.encodingutil import argv_to_abspath |
---|
46 | self['filename'] = argv_to_abspath(filename) |
---|
47 | |
---|
48 | def dump_share(options): |
---|
49 | from allmydata.storage.mutable import MutableShareFile |
---|
50 | from allmydata.util.encodingutil import quote_output |
---|
51 | |
---|
52 | out = options.stdout |
---|
53 | |
---|
54 | # check the version, to see if we have a mutable or immutable share |
---|
55 | print("share filename: %s" % quote_output(options['filename']), file=out) |
---|
56 | |
---|
57 | with open(options['filename'], "rb") as f: |
---|
58 | if MutableShareFile.is_valid_header(f.read(32)): |
---|
59 | return dump_mutable_share(options) |
---|
60 | # otherwise assume it's immutable |
---|
61 | return dump_immutable_share(options) |
---|
62 | |
---|
63 | def dump_immutable_share(options): |
---|
64 | from allmydata.storage.immutable import ShareFile |
---|
65 | |
---|
66 | out = options.stdout |
---|
67 | f = ShareFile(options['filename']) |
---|
68 | if not options["leases-only"]: |
---|
69 | dump_immutable_chk_share(f, out, options) |
---|
70 | dump_immutable_lease_info(f, out) |
---|
71 | print(file=out) |
---|
72 | return 0 |
---|
73 | |
---|
74 | def dump_immutable_chk_share(f, out, options): |
---|
75 | from allmydata import uri |
---|
76 | from allmydata.util import base32 |
---|
77 | from allmydata.immutable.layout import ReadBucketProxy |
---|
78 | from allmydata.util.encodingutil import quote_output, to_bytes |
---|
79 | |
---|
80 | # use a ReadBucketProxy to parse the bucket and find the uri extension |
---|
81 | bp = ReadBucketProxy(None, None, '') |
---|
82 | offsets = bp._parse_offsets(f.read_share_data(0, 0x44)) |
---|
83 | print("%20s: %d" % ("version", bp._version), file=out) |
---|
84 | seek = offsets['uri_extension'] |
---|
85 | length = struct.unpack(bp._fieldstruct, |
---|
86 | f.read_share_data(seek, bp._fieldsize))[0] |
---|
87 | seek += bp._fieldsize |
---|
88 | UEB_data = f.read_share_data(seek, length) |
---|
89 | |
---|
90 | unpacked = uri.unpack_extension_readable(UEB_data) |
---|
91 | keys1 = ("size", "num_segments", "segment_size", |
---|
92 | "needed_shares", "total_shares") |
---|
93 | keys2 = ("codec_name", "codec_params", "tail_codec_params") |
---|
94 | keys3 = ("plaintext_hash", "plaintext_root_hash", |
---|
95 | "crypttext_hash", "crypttext_root_hash", |
---|
96 | "share_root_hash", "UEB_hash") |
---|
97 | display_keys = {"size": "file_size"} |
---|
98 | |
---|
99 | def to_string(v): |
---|
100 | if isinstance(v, bytes): |
---|
101 | return str(v, "utf-8") |
---|
102 | else: |
---|
103 | return str(v) |
---|
104 | |
---|
105 | for k in keys1: |
---|
106 | if k in unpacked: |
---|
107 | dk = display_keys.get(k, k) |
---|
108 | print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) |
---|
109 | print(file=out) |
---|
110 | for k in keys2: |
---|
111 | if k in unpacked: |
---|
112 | dk = display_keys.get(k, k) |
---|
113 | print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) |
---|
114 | print(file=out) |
---|
115 | for k in keys3: |
---|
116 | if k in unpacked: |
---|
117 | dk = display_keys.get(k, k) |
---|
118 | print("%20s: %s" % (dk, to_string(unpacked[k])), file=out) |
---|
119 | |
---|
120 | leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3) |
---|
121 | if leftover: |
---|
122 | print(file=out) |
---|
123 | print("LEFTOVER:", file=out) |
---|
124 | for k in sorted(leftover): |
---|
125 | print("%20s: %s" % (k, to_string(unpacked[k])), file=out) |
---|
126 | |
---|
127 | # the storage index isn't stored in the share itself, so we depend upon |
---|
128 | # knowing the parent directory name to get it |
---|
129 | pieces = options['filename'].split(os.sep) |
---|
130 | if len(pieces) >= 2: |
---|
131 | piece = to_bytes(pieces[-2]) |
---|
132 | if base32.could_be_base32_encoded(piece): |
---|
133 | storage_index = base32.a2b(piece) |
---|
134 | uri_extension_hash = base32.a2b(unpacked["UEB_hash"]) |
---|
135 | u = uri.CHKFileVerifierURI(storage_index, uri_extension_hash, |
---|
136 | unpacked["needed_shares"], |
---|
137 | unpacked["total_shares"], unpacked["size"]) |
---|
138 | verify_cap = u.to_string() |
---|
139 | print("%20s: %s" % ("verify-cap", quote_output(verify_cap, quotemarks=False)), file=out) |
---|
140 | |
---|
141 | sizes = {} |
---|
142 | sizes['data'] = (offsets['plaintext_hash_tree'] - |
---|
143 | offsets['data']) |
---|
144 | sizes['validation'] = (offsets['uri_extension'] - |
---|
145 | offsets['plaintext_hash_tree']) |
---|
146 | sizes['uri-extension'] = len(UEB_data) |
---|
147 | print(file=out) |
---|
148 | print(" Size of data within the share:", file=out) |
---|
149 | for k in sorted(sizes): |
---|
150 | print("%20s: %s" % (k, sizes[k]), file=out) |
---|
151 | |
---|
152 | if options['offsets']: |
---|
153 | print(file=out) |
---|
154 | print(" Section Offsets:", file=out) |
---|
155 | print("%20s: %s" % ("share data", f._data_offset), file=out) |
---|
156 | for k in ["data", "plaintext_hash_tree", "crypttext_hash_tree", |
---|
157 | "block_hashes", "share_hashes", "uri_extension"]: |
---|
158 | name = {"data": "block data"}.get(k,k) |
---|
159 | offset = f._data_offset + offsets[k] |
---|
160 | print(" %20s: %s (0x%x)" % (name, offset, offset), file=out) |
---|
161 | print("%20s: %s" % ("leases", f._lease_offset), file=out) |
---|
162 | |
---|
163 | def dump_immutable_lease_info(f, out): |
---|
164 | # display lease information too |
---|
165 | print(file=out) |
---|
166 | leases = list(f.get_leases()) |
---|
167 | if leases: |
---|
168 | for i,lease in enumerate(leases): |
---|
169 | when = format_expiration_time(lease.get_expiration_time()) |
---|
170 | print(" Lease #%d: owner=%d, expire in %s" \ |
---|
171 | % (i, lease.owner_num, when), file=out) |
---|
172 | else: |
---|
173 | print(" No leases.", file=out) |
---|
174 | |
---|
175 | def format_expiration_time(expiration_time): |
---|
176 | now = time.time() |
---|
177 | remains = expiration_time - now |
---|
178 | when = "%ds" % remains |
---|
179 | if remains > 24*3600: |
---|
180 | when += " (%d days)" % (remains // (24*3600)) |
---|
181 | elif remains > 3600: |
---|
182 | when += " (%d hours)" % (remains // 3600) |
---|
183 | return when |
---|
184 | |
---|
185 | |
---|
186 | def dump_mutable_share(options): |
---|
187 | from allmydata.storage.mutable import MutableShareFile |
---|
188 | from allmydata.util import base32, idlib |
---|
189 | out = options.stdout |
---|
190 | m = MutableShareFile(options['filename']) |
---|
191 | f = open(options['filename'], "rb") |
---|
192 | WE, nodeid = m._read_write_enabler_and_nodeid(f) |
---|
193 | num_extra_leases = m._read_num_extra_leases(f) |
---|
194 | data_length = m._read_data_length(f) |
---|
195 | extra_lease_offset = m._read_extra_lease_offset(f) |
---|
196 | container_size = extra_lease_offset - m.DATA_OFFSET |
---|
197 | leases = list(m._enumerate_leases(f)) |
---|
198 | |
---|
199 | share_type = "unknown" |
---|
200 | f.seek(m.DATA_OFFSET) |
---|
201 | version = f.read(1) |
---|
202 | if version == b"\x00": |
---|
203 | # this slot contains an SMDF share |
---|
204 | share_type = "SDMF" |
---|
205 | elif version == b"\x01": |
---|
206 | share_type = "MDMF" |
---|
207 | f.close() |
---|
208 | |
---|
209 | print(file=out) |
---|
210 | print("Mutable slot found:", file=out) |
---|
211 | print(" share_type: %s" % share_type, file=out) |
---|
212 | print(" write_enabler: %s" % str(base32.b2a(WE), "utf-8"), file=out) |
---|
213 | print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out) |
---|
214 | print(" num_extra_leases: %d" % num_extra_leases, file=out) |
---|
215 | print(" container_size: %d" % container_size, file=out) |
---|
216 | print(" data_length: %d" % data_length, file=out) |
---|
217 | if leases: |
---|
218 | for (leasenum, lease) in leases: |
---|
219 | print(file=out) |
---|
220 | print(" Lease #%d:" % leasenum, file=out) |
---|
221 | print(" ownerid: %d" % lease.owner_num, file=out) |
---|
222 | when = format_expiration_time(lease.get_expiration_time()) |
---|
223 | print(" expires in %s" % when, file=out) |
---|
224 | print(" renew_secret: %s" % lease.present_renew_secret(), file=out) |
---|
225 | print(" cancel_secret: %s" % lease.present_cancel_secret(), file=out) |
---|
226 | print(" secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid), file=out) |
---|
227 | else: |
---|
228 | print("No leases.", file=out) |
---|
229 | print(file=out) |
---|
230 | |
---|
231 | if share_type == "SDMF": |
---|
232 | dump_SDMF_share(m, data_length, options) |
---|
233 | elif share_type == "MDMF": |
---|
234 | dump_MDMF_share(m, data_length, options) |
---|
235 | |
---|
236 | return 0 |
---|
237 | |
---|
238 | def dump_SDMF_share(m, length, options): |
---|
239 | from allmydata.mutable.layout import unpack_share, unpack_header |
---|
240 | from allmydata.mutable.common import NeedMoreDataError |
---|
241 | from allmydata.util import base32, hashutil |
---|
242 | from allmydata.uri import SSKVerifierURI |
---|
243 | from allmydata.util.encodingutil import quote_output, to_bytes |
---|
244 | |
---|
245 | offset = m.DATA_OFFSET |
---|
246 | |
---|
247 | out = options.stdout |
---|
248 | |
---|
249 | f = open(options['filename'], "rb") |
---|
250 | f.seek(offset) |
---|
251 | data = f.read(min(length, 2000)) |
---|
252 | f.close() |
---|
253 | |
---|
254 | try: |
---|
255 | pieces = unpack_share(data) |
---|
256 | except NeedMoreDataError as e: |
---|
257 | # retry once with the larger size |
---|
258 | size = e.needed_bytes |
---|
259 | f = open(options['filename'], "rb") |
---|
260 | f.seek(offset) |
---|
261 | data = f.read(min(length, size)) |
---|
262 | f.close() |
---|
263 | pieces = unpack_share(data) |
---|
264 | |
---|
265 | (seqnum, root_hash, IV, k, N, segsize, datalen, |
---|
266 | pubkey, signature, share_hash_chain, block_hash_tree, |
---|
267 | share_data, enc_privkey) = pieces |
---|
268 | (ig_version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, |
---|
269 | ig_datalen, offsets) = unpack_header(data) |
---|
270 | |
---|
271 | print(" SDMF contents:", file=out) |
---|
272 | print(" seqnum: %d" % seqnum, file=out) |
---|
273 | print(" root_hash: %s" % str(base32.b2a(root_hash), "utf-8"), file=out) |
---|
274 | print(" IV: %s" % str(base32.b2a(IV), "utf-8"), file=out) |
---|
275 | print(" required_shares: %d" % k, file=out) |
---|
276 | print(" total_shares: %d" % N, file=out) |
---|
277 | print(" segsize: %d" % segsize, file=out) |
---|
278 | print(" datalen: %d" % datalen, file=out) |
---|
279 | print(" enc_privkey: %d bytes" % len(enc_privkey), file=out) |
---|
280 | print(" pubkey: %d bytes" % len(pubkey), file=out) |
---|
281 | print(" signature: %d bytes" % len(signature), file=out) |
---|
282 | share_hash_ids = ",".join(sorted([str(hid) |
---|
283 | for hid in share_hash_chain.keys()])) |
---|
284 | print(" share_hash_chain: %s" % share_hash_ids, file=out) |
---|
285 | print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) |
---|
286 | |
---|
287 | # the storage index isn't stored in the share itself, so we depend upon |
---|
288 | # knowing the parent directory name to get it |
---|
289 | pieces = options['filename'].split(os.sep) |
---|
290 | if len(pieces) >= 2: |
---|
291 | piece = to_bytes(pieces[-2]) |
---|
292 | if base32.could_be_base32_encoded(piece): |
---|
293 | storage_index = base32.a2b(piece) |
---|
294 | fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) |
---|
295 | u = SSKVerifierURI(storage_index, fingerprint) |
---|
296 | verify_cap = u.to_string() |
---|
297 | print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) |
---|
298 | |
---|
299 | if options['offsets']: |
---|
300 | # NOTE: this offset-calculation code is fragile, and needs to be |
---|
301 | # merged with MutableShareFile's internals. |
---|
302 | print(file=out) |
---|
303 | print(" Section Offsets:", file=out) |
---|
304 | def printoffset(name, value, shift=0): |
---|
305 | print("%s%20s: %s (0x%x)" % (" "*shift, name, value, value), file=out) |
---|
306 | printoffset("first lease", m.HEADER_SIZE) |
---|
307 | printoffset("share data", m.DATA_OFFSET) |
---|
308 | o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") |
---|
309 | printoffset("seqnum", o_seqnum, 2) |
---|
310 | o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") |
---|
311 | printoffset("root_hash", o_root_hash, 2) |
---|
312 | for k in ["signature", "share_hash_chain", "block_hash_tree", |
---|
313 | "share_data", |
---|
314 | "enc_privkey", "EOF"]: |
---|
315 | name = {"share_data": "block data", |
---|
316 | "EOF": "end of share data"}.get(k,k) |
---|
317 | offset = m.DATA_OFFSET + offsets[k] |
---|
318 | printoffset(name, offset, 2) |
---|
319 | f = open(options['filename'], "rb") |
---|
320 | printoffset("extra leases", m._read_extra_lease_offset(f) + 4) |
---|
321 | f.close() |
---|
322 | |
---|
323 | print(file=out) |
---|
324 | |
---|
325 | def dump_MDMF_share(m, length, options): |
---|
326 | from allmydata.mutable.layout import MDMFSlotReadProxy |
---|
327 | from allmydata.util import base32, hashutil |
---|
328 | from allmydata.uri import MDMFVerifierURI |
---|
329 | from allmydata.util.encodingutil import quote_output, to_bytes |
---|
330 | |
---|
331 | offset = m.DATA_OFFSET |
---|
332 | out = options.stdout |
---|
333 | |
---|
334 | f = open(options['filename'], "rb") |
---|
335 | storage_index = None; shnum = 0 |
---|
336 | |
---|
337 | class ShareDumper(MDMFSlotReadProxy): |
---|
338 | def _read(self, readvs, force_remote=False, queue=False): |
---|
339 | data = [] |
---|
340 | for (where,length) in readvs: |
---|
341 | f.seek(offset+where) |
---|
342 | data.append(f.read(length)) |
---|
343 | return defer.succeed({shnum: data}) |
---|
344 | |
---|
345 | p = ShareDumper(None, storage_index, shnum) |
---|
346 | def extract(func): |
---|
347 | stash = [] |
---|
348 | # these methods return Deferreds, but we happen to know that they run |
---|
349 | # synchronously when not actually talking to a remote server |
---|
350 | d = func() |
---|
351 | d.addCallback(stash.append) |
---|
352 | return stash[0] |
---|
353 | |
---|
354 | verinfo = extract(p.get_verinfo) |
---|
355 | encprivkey = extract(p.get_encprivkey) |
---|
356 | signature = extract(p.get_signature) |
---|
357 | pubkey = extract(p.get_verification_key) |
---|
358 | block_hash_tree = extract(p.get_blockhashes) |
---|
359 | share_hash_chain = extract(p.get_sharehashes) |
---|
360 | f.close() |
---|
361 | |
---|
362 | (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, |
---|
363 | offsets) = verinfo |
---|
364 | |
---|
365 | print(" MDMF contents:", file=out) |
---|
366 | print(" seqnum: %d" % seqnum, file=out) |
---|
367 | print(" root_hash: %s" % str(base32.b2a(root_hash), "utf-8"), file=out) |
---|
368 | #print(" IV: %s" % base32.b2a(IV), file=out) |
---|
369 | print(" required_shares: %d" % k, file=out) |
---|
370 | print(" total_shares: %d" % N, file=out) |
---|
371 | print(" segsize: %d" % segsize, file=out) |
---|
372 | print(" datalen: %d" % datalen, file=out) |
---|
373 | print(" enc_privkey: %d bytes" % len(encprivkey), file=out) |
---|
374 | print(" pubkey: %d bytes" % len(pubkey), file=out) |
---|
375 | print(" signature: %d bytes" % len(signature), file=out) |
---|
376 | share_hash_ids = ",".join([str(hid) |
---|
377 | for hid in sorted(share_hash_chain.keys())]) |
---|
378 | print(" share_hash_chain: %s" % share_hash_ids, file=out) |
---|
379 | print(" block_hash_tree: %d nodes" % len(block_hash_tree), file=out) |
---|
380 | |
---|
381 | # the storage index isn't stored in the share itself, so we depend upon |
---|
382 | # knowing the parent directory name to get it |
---|
383 | pieces = options['filename'].split(os.sep) |
---|
384 | if len(pieces) >= 2: |
---|
385 | piece = to_bytes(pieces[-2]) |
---|
386 | if base32.could_be_base32_encoded(piece): |
---|
387 | storage_index = base32.a2b(piece) |
---|
388 | fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey) |
---|
389 | u = MDMFVerifierURI(storage_index, fingerprint) |
---|
390 | verify_cap = u.to_string() |
---|
391 | print(" verify-cap:", quote_output(verify_cap, quotemarks=False), file=out) |
---|
392 | |
---|
393 | if options['offsets']: |
---|
394 | # NOTE: this offset-calculation code is fragile, and needs to be |
---|
395 | # merged with MutableShareFile's internals. |
---|
396 | |
---|
397 | print(file=out) |
---|
398 | print(" Section Offsets:", file=out) |
---|
399 | def printoffset(name, value, shift=0): |
---|
400 | print("%s%.20s: %s (0x%x)" % (" "*shift, name, value, value), file=out) |
---|
401 | printoffset("first lease", m.HEADER_SIZE, 2) |
---|
402 | printoffset("share data", m.DATA_OFFSET, 2) |
---|
403 | o_seqnum = m.DATA_OFFSET + struct.calcsize(">B") |
---|
404 | printoffset("seqnum", o_seqnum, 4) |
---|
405 | o_root_hash = m.DATA_OFFSET + struct.calcsize(">BQ") |
---|
406 | printoffset("root_hash", o_root_hash, 4) |
---|
407 | for k in ["enc_privkey", "share_hash_chain", "signature", |
---|
408 | "verification_key", "verification_key_end", |
---|
409 | "share_data", "block_hash_tree", "EOF"]: |
---|
410 | name = {"share_data": "block data", |
---|
411 | "verification_key": "pubkey", |
---|
412 | "verification_key_end": "end of pubkey", |
---|
413 | "EOF": "end of share data"}.get(k,k) |
---|
414 | offset = m.DATA_OFFSET + offsets[k] |
---|
415 | printoffset(name, offset, 4) |
---|
416 | f = open(options['filename'], "rb") |
---|
417 | printoffset("extra leases", m._read_extra_lease_offset(f) + 4, 2) |
---|
418 | f.close() |
---|
419 | |
---|
420 | print(file=out) |
---|
421 | |
---|
422 | |
---|
423 | |
---|
424 | class DumpCapOptions(BaseOptions): |
---|
425 | def getSynopsis(self): |
---|
426 | return "Usage: tahoe [global-options] debug dump-cap [options] FILECAP" |
---|
427 | optParameters = [ |
---|
428 | ["nodeid", "n", |
---|
429 | None, "Specify the storage server nodeid (ASCII), to construct WE and secrets."], |
---|
430 | ["client-secret", "c", None, |
---|
431 | "Specify the client's base secret (ASCII), to construct secrets."], |
---|
432 | ["client-dir", "d", None, |
---|
433 | "Specify the client's base directory, from which a -c secret will be read."], |
---|
434 | ] |
---|
435 | def parseArgs(self, cap): |
---|
436 | self.cap = cap |
---|
437 | |
---|
438 | description = """ |
---|
439 | Print information about the given cap-string (aka: URI, file-cap, dir-cap, |
---|
440 | read-cap, write-cap). The URI string is parsed and unpacked. This prints the |
---|
441 | type of the cap, its storage index, and any derived keys. |
---|
442 | |
---|
443 | tahoe debug dump-cap URI:SSK-Verifier:4vozh77tsrw7mdhnj7qvp5ky74:q7f3dwz76sjys4kqfdt3ocur2pay3a6rftnkqmi2uxu3vqsdsofq |
---|
444 | |
---|
445 | This may be useful to determine if a read-cap and a write-cap refer to the |
---|
446 | same time, or to extract the storage-index from a file-cap (to then use with |
---|
447 | find-shares) |
---|
448 | |
---|
449 | If additional information is provided (storage server nodeid and/or client |
---|
450 | base secret), this command will compute the shared secrets used for the |
---|
451 | write-enabler and for lease-renewal. |
---|
452 | """ |
---|
453 | |
---|
454 | |
---|
455 | def dump_cap(options): |
---|
456 | from allmydata import uri |
---|
457 | from allmydata.util import base32 |
---|
458 | from base64 import b32decode |
---|
459 | from urllib.parse import unquote, urlparse |
---|
460 | |
---|
461 | out = options.stdout |
---|
462 | cap = options.cap |
---|
463 | nodeid = None |
---|
464 | if options['nodeid']: |
---|
465 | nodeid = b32decode(options['nodeid'].upper()) |
---|
466 | secret = None |
---|
467 | if options['client-secret']: |
---|
468 | secret = base32.a2b(options['client-secret'].encode("ascii")) |
---|
469 | elif options['client-dir']: |
---|
470 | secretfile = os.path.join(options['client-dir'], "private", "secret") |
---|
471 | try: |
---|
472 | secret = base32.a2b(open(secretfile, "rb").read().strip()) |
---|
473 | except EnvironmentError: |
---|
474 | pass |
---|
475 | |
---|
476 | if cap.startswith("http"): |
---|
477 | scheme, netloc, path, params, query, fragment = urlparse(cap) |
---|
478 | assert path.startswith("/uri/") |
---|
479 | cap = unquote(path[len("/uri/"):]) |
---|
480 | |
---|
481 | u = uri.from_string(cap) |
---|
482 | |
---|
483 | print(file=out) |
---|
484 | dump_uri_instance(u, nodeid, secret, out) |
---|
485 | |
---|
486 | def _dump_secrets(storage_index, secret, nodeid, out): |
---|
487 | from allmydata.util import hashutil |
---|
488 | from allmydata.util import base32 |
---|
489 | |
---|
490 | if secret: |
---|
491 | crs = hashutil.my_renewal_secret_hash(secret) |
---|
492 | print(" client renewal secret:", str(base32.b2a(crs), "ascii"), file=out) |
---|
493 | frs = hashutil.file_renewal_secret_hash(crs, storage_index) |
---|
494 | print(" file renewal secret:", str(base32.b2a(frs), "ascii"), file=out) |
---|
495 | if nodeid: |
---|
496 | renew = hashutil.bucket_renewal_secret_hash(frs, nodeid) |
---|
497 | print(" lease renewal secret:", str(base32.b2a(renew), "ascii"), file=out) |
---|
498 | ccs = hashutil.my_cancel_secret_hash(secret) |
---|
499 | print(" client cancel secret:", str(base32.b2a(ccs), "ascii"), file=out) |
---|
500 | fcs = hashutil.file_cancel_secret_hash(ccs, storage_index) |
---|
501 | print(" file cancel secret:", str(base32.b2a(fcs), "ascii"), file=out) |
---|
502 | if nodeid: |
---|
503 | cancel = hashutil.bucket_cancel_secret_hash(fcs, nodeid) |
---|
504 | print(" lease cancel secret:", str(base32.b2a(cancel), "ascii"), file=out) |
---|
505 | |
---|
506 | def dump_uri_instance(u, nodeid, secret, out, show_header=True): |
---|
507 | from allmydata import uri |
---|
508 | from allmydata.storage.server import si_b2a |
---|
509 | from allmydata.util import base32, hashutil |
---|
510 | from allmydata.util.encodingutil import quote_output |
---|
511 | |
---|
512 | if isinstance(u, uri.CHKFileURI): |
---|
513 | if show_header: |
---|
514 | print("CHK File:", file=out) |
---|
515 | print(" key:", str(base32.b2a(u.key), "ascii"), file=out) |
---|
516 | print(" UEB hash:", str(base32.b2a(u.uri_extension_hash), "ascii"), file=out) |
---|
517 | print(" size:", u.size, file=out) |
---|
518 | print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out) |
---|
519 | print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) |
---|
520 | _dump_secrets(u.get_storage_index(), secret, nodeid, out) |
---|
521 | elif isinstance(u, uri.CHKFileVerifierURI): |
---|
522 | if show_header: |
---|
523 | print("CHK Verifier URI:", file=out) |
---|
524 | print(" UEB hash:", str(base32.b2a(u.uri_extension_hash), "ascii"), file=out) |
---|
525 | print(" size:", u.size, file=out) |
---|
526 | print(" k/N: %d/%d" % (u.needed_shares, u.total_shares), file=out) |
---|
527 | print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) |
---|
528 | |
---|
529 | elif isinstance(u, uri.LiteralFileURI): |
---|
530 | if show_header: |
---|
531 | print("Literal File URI:", file=out) |
---|
532 | print(" data:", quote_output(u.data), file=out) |
---|
533 | |
---|
534 | elif isinstance(u, uri.WriteableSSKFileURI): # SDMF |
---|
535 | if show_header: |
---|
536 | print("SDMF Writeable URI:", file=out) |
---|
537 | print(" writekey:", str(base32.b2a(u.writekey), "ascii"), file=out) |
---|
538 | print(" readkey:", str(base32.b2a(u.readkey), "ascii"), file=out) |
---|
539 | print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) |
---|
540 | print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) |
---|
541 | print(file=out) |
---|
542 | if nodeid: |
---|
543 | we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid) |
---|
544 | print(" write_enabler:", str(base32.b2a(we), "ascii"), file=out) |
---|
545 | print(file=out) |
---|
546 | _dump_secrets(u.get_storage_index(), secret, nodeid, out) |
---|
547 | elif isinstance(u, uri.ReadonlySSKFileURI): |
---|
548 | if show_header: |
---|
549 | print("SDMF Read-only URI:", file=out) |
---|
550 | print(" readkey:", str(base32.b2a(u.readkey), "ascii"), file=out) |
---|
551 | print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) |
---|
552 | print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) |
---|
553 | elif isinstance(u, uri.SSKVerifierURI): |
---|
554 | if show_header: |
---|
555 | print("SDMF Verifier URI:", file=out) |
---|
556 | print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) |
---|
557 | print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) |
---|
558 | |
---|
559 | elif isinstance(u, uri.WriteableMDMFFileURI): # MDMF |
---|
560 | if show_header: |
---|
561 | print("MDMF Writeable URI:", file=out) |
---|
562 | print(" writekey:", str(base32.b2a(u.writekey), "ascii"), file=out) |
---|
563 | print(" readkey:", str(base32.b2a(u.readkey), "ascii"), file=out) |
---|
564 | print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) |
---|
565 | print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) |
---|
566 | print(file=out) |
---|
567 | if nodeid: |
---|
568 | we = hashutil.ssk_write_enabler_hash(u.writekey, nodeid) |
---|
569 | print(" write_enabler:", str(base32.b2a(we), "ascii"), file=out) |
---|
570 | print(file=out) |
---|
571 | _dump_secrets(u.get_storage_index(), secret, nodeid, out) |
---|
572 | elif isinstance(u, uri.ReadonlyMDMFFileURI): |
---|
573 | if show_header: |
---|
574 | print("MDMF Read-only URI:", file=out) |
---|
575 | print(" readkey:", str(base32.b2a(u.readkey), "ascii"), file=out) |
---|
576 | print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) |
---|
577 | print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) |
---|
578 | elif isinstance(u, uri.MDMFVerifierURI): |
---|
579 | if show_header: |
---|
580 | print("MDMF Verifier URI:", file=out) |
---|
581 | print(" storage index:", str(si_b2a(u.get_storage_index()), "ascii"), file=out) |
---|
582 | print(" fingerprint:", str(base32.b2a(u.fingerprint), "ascii"), file=out) |
---|
583 | |
---|
584 | |
---|
585 | elif isinstance(u, uri.ImmutableDirectoryURI): # CHK-based directory |
---|
586 | if show_header: |
---|
587 | print("CHK Directory URI:", file=out) |
---|
588 | dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) |
---|
589 | elif isinstance(u, uri.ImmutableDirectoryURIVerifier): |
---|
590 | if show_header: |
---|
591 | print("CHK Directory Verifier URI:", file=out) |
---|
592 | dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) |
---|
593 | |
---|
594 | elif isinstance(u, uri.DirectoryURI): # SDMF-based directory |
---|
595 | if show_header: |
---|
596 | print("Directory Writeable URI:", file=out) |
---|
597 | dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) |
---|
598 | elif isinstance(u, uri.ReadonlyDirectoryURI): |
---|
599 | if show_header: |
---|
600 | print("Directory Read-only URI:", file=out) |
---|
601 | dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) |
---|
602 | elif isinstance(u, uri.DirectoryURIVerifier): |
---|
603 | if show_header: |
---|
604 | print("Directory Verifier URI:", file=out) |
---|
605 | dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) |
---|
606 | |
---|
607 | elif isinstance(u, uri.MDMFDirectoryURI): # MDMF-based directory |
---|
608 | if show_header: |
---|
609 | print("Directory Writeable URI:", file=out) |
---|
610 | dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) |
---|
611 | elif isinstance(u, uri.ReadonlyMDMFDirectoryURI): |
---|
612 | if show_header: |
---|
613 | print("Directory Read-only URI:", file=out) |
---|
614 | dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) |
---|
615 | elif isinstance(u, uri.MDMFDirectoryURIVerifier): |
---|
616 | if show_header: |
---|
617 | print("Directory Verifier URI:", file=out) |
---|
618 | dump_uri_instance(u._filenode_uri, nodeid, secret, out, False) |
---|
619 | |
---|
620 | else: |
---|
621 | print("unknown cap type", file=out) |
---|
622 | |
---|
623 | class FindSharesOptions(BaseOptions): |
---|
624 | def getSynopsis(self): |
---|
625 | return "Usage: tahoe [global-options] debug find-shares STORAGE_INDEX NODEDIRS.." |
---|
626 | |
---|
627 | def parseArgs(self, storage_index_s, *nodedirs): |
---|
628 | from allmydata.util.encodingutil import argv_to_abspath |
---|
629 | self.si_s = storage_index_s |
---|
630 | self.nodedirs = list(map(argv_to_abspath, nodedirs)) |
---|
631 | |
---|
632 | description = """ |
---|
633 | Locate all shares for the given storage index. This command looks through one |
---|
634 | or more node directories to find the shares. It returns a list of filenames, |
---|
635 | one per line, for each share file found. |
---|
636 | |
---|
637 | tahoe debug find-shares 4vozh77tsrw7mdhnj7qvp5ky74 testgrid/node-* |
---|
638 | |
---|
639 | It may be useful during testing, when running a test grid in which all the |
---|
640 | nodes are on a local disk. The share files thus located can be counted, |
---|
641 | examined (with dump-share), or corrupted/deleted to test checker/repairer. |
---|
642 | """ |
---|
643 | |
---|
644 | def find_shares(options): |
---|
645 | """Given a storage index and a list of node directories, emit a list of |
---|
646 | all matching shares to stdout, one per line. For example: |
---|
647 | |
---|
648 | find-shares.py 44kai1tui348689nrw8fjegc8c ~/testnet/node-* |
---|
649 | |
---|
650 | gives: |
---|
651 | |
---|
652 | /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/5 |
---|
653 | /home/warner/testnet/node-1/storage/shares/44k/44kai1tui348689nrw8fjegc8c/9 |
---|
654 | /home/warner/testnet/node-2/storage/shares/44k/44kai1tui348689nrw8fjegc8c/2 |
---|
655 | """ |
---|
656 | from allmydata.storage.server import si_a2b, storage_index_to_dir |
---|
657 | from allmydata.util.encodingutil import listdir_unicode, quote_local_unicode_path |
---|
658 | |
---|
659 | out = options.stdout |
---|
660 | sharedir = storage_index_to_dir(si_a2b(options.si_s.encode("utf-8"))) |
---|
661 | for d in options.nodedirs: |
---|
662 | d = os.path.join(d, "storage", "shares", sharedir) |
---|
663 | if os.path.exists(d): |
---|
664 | for shnum in listdir_unicode(d): |
---|
665 | print(quote_local_unicode_path(os.path.join(d, shnum), quotemarks=False), file=out) |
---|
666 | |
---|
667 | return 0 |
---|
668 | |
---|
669 | |
---|
670 | class CatalogSharesOptions(BaseOptions): |
---|
671 | def parseArgs(self, *nodedirs): |
---|
672 | from allmydata.util.encodingutil import argv_to_abspath |
---|
673 | self.nodedirs = list(map(argv_to_abspath, nodedirs)) |
---|
674 | if not nodedirs: |
---|
675 | raise usage.UsageError("must specify at least one node directory") |
---|
676 | |
---|
677 | def getSynopsis(self): |
---|
678 | return "Usage: tahoe [global-options] debug catalog-shares NODEDIRS.." |
---|
679 | |
---|
680 | description = """ |
---|
681 | Locate all shares in the given node directories, and emit a one-line summary |
---|
682 | of each share. Run it like this: |
---|
683 | |
---|
684 | tahoe debug catalog-shares testgrid/node-* >allshares.txt |
---|
685 | |
---|
686 | The lines it emits will look like the following: |
---|
687 | |
---|
688 | CHK $SI $k/$N $filesize $UEB_hash $expiration $abspath_sharefile |
---|
689 | SDMF $SI $k/$N $filesize $seqnum/$roothash $expiration $abspath_sharefile |
---|
690 | UNKNOWN $abspath_sharefile |
---|
691 | |
---|
692 | This command can be used to build up a catalog of shares from many storage |
---|
693 | servers and then sort the results to compare all shares for the same file. If |
---|
694 | you see shares with the same SI but different parameters/filesize/UEB_hash, |
---|
695 | then something is wrong. The misc/find-share/anomalies.py script may be |
---|
696 | useful for purpose. |
---|
697 | """ |
---|
698 | |
---|
699 | def call(c, *args, **kwargs): |
---|
700 | # take advantage of the fact that ImmediateReadBucketProxy returns |
---|
701 | # Deferreds that are already fired |
---|
702 | results = [] |
---|
703 | failures = [] |
---|
704 | d = defer.maybeDeferred(c, *args, **kwargs) |
---|
705 | d.addCallbacks(results.append, failures.append) |
---|
706 | if failures: |
---|
707 | failures[0].raiseException() |
---|
708 | return results[0] |
---|
709 | |
---|
710 | def describe_share(abs_sharefile, si_s, shnum_s, now, out): |
---|
711 | with open(abs_sharefile, "rb") as f: |
---|
712 | prefix = f.read(32) |
---|
713 | if MutableShareFile.is_valid_header(prefix): |
---|
714 | _describe_mutable_share(abs_sharefile, f, now, si_s, out) |
---|
715 | elif ShareFile.is_valid_header(prefix): |
---|
716 | _describe_immutable_share(abs_sharefile, now, si_s, out) |
---|
717 | else: |
---|
718 | print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out) |
---|
719 | |
---|
720 | def _describe_mutable_share(abs_sharefile, f, now, si_s, out): |
---|
721 | # mutable share |
---|
722 | m = MutableShareFile(abs_sharefile) |
---|
723 | WE, nodeid = m._read_write_enabler_and_nodeid(f) |
---|
724 | data_length = m._read_data_length(f) |
---|
725 | expiration_time = min( [lease.get_expiration_time() |
---|
726 | for (i,lease) in m._enumerate_leases(f)] ) |
---|
727 | expiration = max(0, expiration_time - now) |
---|
728 | |
---|
729 | share_type = "unknown" |
---|
730 | f.seek(m.DATA_OFFSET) |
---|
731 | version = f.read(1) |
---|
732 | if version == b"\x00": |
---|
733 | # this slot contains an SMDF share |
---|
734 | share_type = "SDMF" |
---|
735 | elif version == b"\x01": |
---|
736 | share_type = "MDMF" |
---|
737 | |
---|
738 | if share_type == "SDMF": |
---|
739 | f.seek(m.DATA_OFFSET) |
---|
740 | |
---|
741 | # Read at least the mutable header length, if possible. If there's |
---|
742 | # less data than that in the share, don't try to read more (we won't |
---|
743 | # be able to unpack the header in this case but we surely don't want |
---|
744 | # to try to unpack bytes *following* the data section as if they were |
---|
745 | # header data). Rather than 2000 we could use HEADER_LENGTH from |
---|
746 | # allmydata/mutable/layout.py, probably. |
---|
747 | data = f.read(min(data_length, 2000)) |
---|
748 | |
---|
749 | try: |
---|
750 | pieces = unpack_share(data) |
---|
751 | except NeedMoreDataError as e: |
---|
752 | # retry once with the larger size |
---|
753 | size = e.needed_bytes |
---|
754 | f.seek(m.DATA_OFFSET) |
---|
755 | data = f.read(min(data_length, size)) |
---|
756 | pieces = unpack_share(data) |
---|
757 | (seqnum, root_hash, IV, k, N, segsize, datalen, |
---|
758 | pubkey, signature, share_hash_chain, block_hash_tree, |
---|
759 | share_data, enc_privkey) = pieces |
---|
760 | |
---|
761 | print("SDMF %s %d/%d %d #%d:%s %d %s" % \ |
---|
762 | (si_s, k, N, datalen, |
---|
763 | seqnum, str(base32.b2a(root_hash), "utf-8"), |
---|
764 | expiration, quote_output(abs_sharefile)), file=out) |
---|
765 | elif share_type == "MDMF": |
---|
766 | fake_shnum = 0 |
---|
767 | # TODO: factor this out with dump_MDMF_share() |
---|
768 | class ShareDumper(MDMFSlotReadProxy): |
---|
769 | def _read(self, readvs, force_remote=False, queue=False): |
---|
770 | data = [] |
---|
771 | for (where,length) in readvs: |
---|
772 | f.seek(m.DATA_OFFSET+where) |
---|
773 | data.append(f.read(length)) |
---|
774 | return defer.succeed({fake_shnum: data}) |
---|
775 | |
---|
776 | p = ShareDumper(None, "fake-si", fake_shnum) |
---|
777 | def extract(func): |
---|
778 | stash = [] |
---|
779 | # these methods return Deferreds, but we happen to know that |
---|
780 | # they run synchronously when not actually talking to a |
---|
781 | # remote server |
---|
782 | d = func() |
---|
783 | d.addCallback(stash.append) |
---|
784 | return stash[0] |
---|
785 | |
---|
786 | verinfo = extract(p.get_verinfo) |
---|
787 | (seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix, |
---|
788 | offsets) = verinfo |
---|
789 | print("MDMF %s %d/%d %d #%d:%s %d %s" % \ |
---|
790 | (si_s, k, N, datalen, |
---|
791 | seqnum, str(base32.b2a(root_hash), "utf-8"), |
---|
792 | expiration, quote_output(abs_sharefile)), file=out) |
---|
793 | else: |
---|
794 | print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out) |
---|
795 | |
---|
796 | |
---|
797 | def _describe_immutable_share(abs_sharefile, now, si_s, out): |
---|
798 | class ImmediateReadBucketProxy(ReadBucketProxy): |
---|
799 | def __init__(self, sf): |
---|
800 | self.sf = sf |
---|
801 | ReadBucketProxy.__init__(self, None, None, "") |
---|
802 | def __repr__(self): |
---|
803 | return "<ImmediateReadBucketProxy>" |
---|
804 | def _read(self, offset, size): |
---|
805 | return defer.succeed(sf.read_share_data(offset, size)) |
---|
806 | |
---|
807 | # use a ReadBucketProxy to parse the bucket and find the uri extension |
---|
808 | sf = ShareFile(abs_sharefile) |
---|
809 | bp = ImmediateReadBucketProxy(sf) |
---|
810 | |
---|
811 | expiration_time = min(lease.get_expiration_time() |
---|
812 | for lease in sf.get_leases()) |
---|
813 | expiration = max(0, expiration_time - now) |
---|
814 | |
---|
815 | UEB_data = call(bp.get_uri_extension) |
---|
816 | unpacked = uri.unpack_extension_readable(UEB_data) |
---|
817 | |
---|
818 | k = unpacked["needed_shares"] |
---|
819 | N = unpacked["total_shares"] |
---|
820 | filesize = unpacked["size"] |
---|
821 | ueb_hash = unpacked["UEB_hash"] |
---|
822 | |
---|
823 | print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize, |
---|
824 | str(ueb_hash, "utf-8"), expiration, |
---|
825 | quote_output(abs_sharefile)), file=out) |
---|
826 | |
---|
827 | |
---|
828 | def catalog_shares(options): |
---|
829 | from allmydata.util.encodingutil import listdir_unicode, quote_output |
---|
830 | |
---|
831 | out = options.stdout |
---|
832 | err = options.stderr |
---|
833 | now = time.time() |
---|
834 | for d in options.nodedirs: |
---|
835 | d = os.path.join(d, "storage", "shares") |
---|
836 | try: |
---|
837 | abbrevs = listdir_unicode(d) |
---|
838 | except EnvironmentError: |
---|
839 | # ignore nodes that have storage turned off altogether |
---|
840 | pass |
---|
841 | else: |
---|
842 | for abbrevdir in sorted(abbrevs): |
---|
843 | if abbrevdir == "incoming": |
---|
844 | continue |
---|
845 | abbrevdir = os.path.join(d, abbrevdir) |
---|
846 | # this tool may get run against bad disks, so we can't assume |
---|
847 | # that listdir_unicode will always succeed. Try to catalog as much |
---|
848 | # as possible. |
---|
849 | try: |
---|
850 | sharedirs = listdir_unicode(abbrevdir) |
---|
851 | for si_s in sorted(sharedirs): |
---|
852 | si_dir = os.path.join(abbrevdir, si_s) |
---|
853 | catalog_shares_one_abbrevdir(si_s, si_dir, now, out,err) |
---|
854 | except: |
---|
855 | print("Error processing %s" % quote_output(abbrevdir), file=err) |
---|
856 | failure.Failure().printTraceback(err) |
---|
857 | |
---|
858 | return 0 |
---|
859 | |
---|
860 | def _as_number(s): |
---|
861 | try: |
---|
862 | return int(s) |
---|
863 | except ValueError: |
---|
864 | return "not int" |
---|
865 | |
---|
866 | def catalog_shares_one_abbrevdir(si_s, si_dir, now, out, err): |
---|
867 | from allmydata.util.encodingutil import listdir_unicode, quote_output |
---|
868 | |
---|
869 | try: |
---|
870 | for shnum_s in sorted(listdir_unicode(si_dir), key=_as_number): |
---|
871 | abs_sharefile = os.path.join(si_dir, shnum_s) |
---|
872 | assert os.path.isfile(abs_sharefile) |
---|
873 | try: |
---|
874 | describe_share(abs_sharefile, si_s, shnum_s, now, |
---|
875 | out) |
---|
876 | except: |
---|
877 | print("Error processing %s" % quote_output(abs_sharefile), file=err) |
---|
878 | failure.Failure().printTraceback(err) |
---|
879 | except: |
---|
880 | print("Error processing %s" % quote_output(si_dir), file=err) |
---|
881 | failure.Failure().printTraceback(err) |
---|
882 | |
---|
883 | class CorruptShareOptions(BaseOptions): |
---|
884 | def getSynopsis(self): |
---|
885 | return "Usage: tahoe [global-options] debug corrupt-share SHARE_FILENAME" |
---|
886 | |
---|
887 | optParameters = [ |
---|
888 | ["offset", "o", "block-random", "Specify which bit to flip."], |
---|
889 | ] |
---|
890 | |
---|
891 | description = """ |
---|
892 | Corrupt the given share by flipping a bit. This will cause a |
---|
893 | verifying/downloading client to log an integrity-check failure incident, and |
---|
894 | downloads will proceed with a different share. |
---|
895 | |
---|
896 | The --offset parameter controls which bit should be flipped. The default is |
---|
897 | to flip a single random bit of the block data. |
---|
898 | |
---|
899 | tahoe debug corrupt-share testgrid/node-3/storage/shares/4v/4vozh77tsrw7mdhnj7qvp5ky74/0 |
---|
900 | |
---|
901 | Obviously, this command should not be used in normal operation. |
---|
902 | """ |
---|
903 | def parseArgs(self, filename): |
---|
904 | self['filename'] = filename |
---|
905 | |
---|
906 | def corrupt_share(options): |
---|
907 | import random |
---|
908 | from allmydata.storage.mutable import MutableShareFile |
---|
909 | from allmydata.storage.immutable import ShareFile |
---|
910 | from allmydata.mutable.layout import unpack_header |
---|
911 | from allmydata.immutable.layout import ReadBucketProxy |
---|
912 | out = options.stdout |
---|
913 | fn = options['filename'] |
---|
914 | assert options["offset"] == "block-random", "other offsets not implemented" |
---|
915 | # first, what kind of share is it? |
---|
916 | |
---|
917 | def flip_bit(start, end): |
---|
918 | offset = random.randrange(start, end) |
---|
919 | bit = random.randrange(0, 8) |
---|
920 | print("[%d..%d): %d.b%d" % (start, end, offset, bit), file=out) |
---|
921 | f = open(fn, "rb+") |
---|
922 | f.seek(offset) |
---|
923 | d = f.read(1) |
---|
924 | d = bchr(ord(d) ^ 0x01) |
---|
925 | f.seek(offset) |
---|
926 | f.write(d) |
---|
927 | f.close() |
---|
928 | |
---|
929 | with open(fn, "rb") as f: |
---|
930 | prefix = f.read(32) |
---|
931 | |
---|
932 | if MutableShareFile.is_valid_header(prefix): |
---|
933 | # mutable |
---|
934 | m = MutableShareFile(fn) |
---|
935 | with open(fn, "rb") as f: |
---|
936 | f.seek(m.DATA_OFFSET) |
---|
937 | # Read enough data to get a mutable header to unpack. |
---|
938 | data = f.read(2000) |
---|
939 | # make sure this slot contains an SMDF share |
---|
940 | assert data[0:1] == b"\x00", "non-SDMF mutable shares not supported" |
---|
941 | f.close() |
---|
942 | |
---|
943 | (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, |
---|
944 | ig_datalen, offsets) = unpack_header(data) |
---|
945 | |
---|
946 | assert version == 0, "we only handle v0 SDMF files" |
---|
947 | start = m.DATA_OFFSET + offsets["share_data"] |
---|
948 | end = m.DATA_OFFSET + offsets["enc_privkey"] |
---|
949 | flip_bit(start, end) |
---|
950 | else: |
---|
951 | # otherwise assume it's immutable |
---|
952 | f = ShareFile(fn) |
---|
953 | bp = ReadBucketProxy(None, None, '') |
---|
954 | offsets = bp._parse_offsets(f.read_share_data(0, 0x24)) |
---|
955 | start = f._data_offset + offsets["data"] |
---|
956 | end = f._data_offset + offsets["plaintext_hash_tree"] |
---|
957 | flip_bit(start, end) |
---|
958 | |
---|
959 | |
---|
960 | |
---|
961 | class ReplOptions(BaseOptions): |
---|
962 | def getSynopsis(self): |
---|
963 | return "Usage: tahoe debug repl (OBSOLETE)" |
---|
964 | |
---|
965 | def repl(options): |
---|
966 | print("'tahoe debug repl' is obsolete. Please run 'python' in a virtualenv.", file=options.stderr) |
---|
967 | return 1 |
---|
968 | |
---|
969 | |
---|
970 | DEFAULT_TESTSUITE = 'allmydata' |
---|
971 | |
---|
972 | class TrialOptions(BaseOptions): |
---|
973 | def getSynopsis(self): |
---|
974 | return "Usage: tahoe debug trial (OBSOLETE)" |
---|
975 | |
---|
976 | def trial(config): |
---|
977 | print("'tahoe debug trial' is obsolete. Please run 'tox', or use 'trial' in a virtualenv.", file=config.stderr) |
---|
978 | return 1 |
---|
979 | |
---|
980 | def fixOptionsClass(args): |
---|
981 | (subcmd, shortcut, OptionsClass, desc) = args |
---|
982 | class FixedOptionsClass(OptionsClass): |
---|
983 | def getSynopsis(self): |
---|
984 | t = OptionsClass.getSynopsis(self) |
---|
985 | i = t.find("Usage: flogtool ") |
---|
986 | if i >= 0: |
---|
987 | return "Usage: tahoe [global-options] debug flogtool " + t[i+len("Usage: flogtool "):] |
---|
988 | else: |
---|
989 | return "Usage: tahoe [global-options] debug flogtool %s [options]" % (subcmd,) |
---|
990 | return (subcmd, shortcut, FixedOptionsClass, desc) |
---|
991 | |
---|
992 | class FlogtoolOptions(foolscap_cli.Options): |
---|
993 | def __init__(self): |
---|
994 | super(FlogtoolOptions, self).__init__() |
---|
995 | self.subCommands = list(map(fixOptionsClass, self.subCommands)) |
---|
996 | |
---|
997 | def getSynopsis(self): |
---|
998 | return "Usage: tahoe [global-options] debug flogtool COMMAND [flogtool-options]" |
---|
999 | |
---|
1000 | def parseOptions(self, all_subargs, *a, **kw): |
---|
1001 | self.flogtool_args = list(all_subargs) |
---|
1002 | return super(FlogtoolOptions, self).parseOptions(self.flogtool_args, *a, **kw) |
---|
1003 | |
---|
1004 | def getUsage(self, width=None): |
---|
1005 | t = super(FlogtoolOptions, self).getUsage(width) |
---|
1006 | t += """ |
---|
1007 | The 'tahoe debug flogtool' command uses the correct imports for this instance |
---|
1008 | of Tahoe-LAFS. |
---|
1009 | |
---|
1010 | Please run 'tahoe debug flogtool COMMAND --help' for more details on each |
---|
1011 | subcommand. |
---|
1012 | """ |
---|
1013 | return t |
---|
1014 | |
---|
1015 | def opt_help(self): |
---|
1016 | print(str(self)) |
---|
1017 | sys.exit(0) |
---|
1018 | |
---|
1019 | def flogtool(config): |
---|
1020 | sys.argv = ['flogtool'] + config.flogtool_args |
---|
1021 | return foolscap_cli.run_flogtool() |
---|
1022 | |
---|
1023 | |
---|
1024 | class DebugCommand(BaseOptions): |
---|
1025 | subCommands = [ |
---|
1026 | ["dump-share", None, DumpOptions, |
---|
1027 | "Unpack and display the contents of a share (uri_extension and leases)."], |
---|
1028 | ["dump-cap", None, DumpCapOptions, "Unpack a read-cap or write-cap."], |
---|
1029 | ["find-shares", None, FindSharesOptions, "Locate sharefiles in node dirs."], |
---|
1030 | ["catalog-shares", None, CatalogSharesOptions, "Describe all shares in node dirs."], |
---|
1031 | ["corrupt-share", None, CorruptShareOptions, "Corrupt a share by flipping a bit."], |
---|
1032 | ["repl", None, ReplOptions, "OBSOLETE"], |
---|
1033 | ["trial", None, TrialOptions, "OBSOLETE"], |
---|
1034 | ["flogtool", None, FlogtoolOptions, "Utilities to access log files."], |
---|
1035 | ] |
---|
1036 | def postOptions(self): |
---|
1037 | if not hasattr(self, 'subOptions'): |
---|
1038 | raise usage.UsageError("must specify a subcommand") |
---|
1039 | synopsis = "COMMAND" |
---|
1040 | |
---|
1041 | def getUsage(self, width=None): |
---|
1042 | t = BaseOptions.getUsage(self, width) |
---|
1043 | t += """\ |
---|
1044 | |
---|
1045 | Please run e.g. 'tahoe debug dump-share --help' for more details on each |
---|
1046 | subcommand. |
---|
1047 | """ |
---|
1048 | return t |
---|
1049 | |
---|
1050 | subDispatch = { |
---|
1051 | "dump-share": dump_share, |
---|
1052 | "dump-cap": dump_cap, |
---|
1053 | "find-shares": find_shares, |
---|
1054 | "catalog-shares": catalog_shares, |
---|
1055 | "corrupt-share": corrupt_share, |
---|
1056 | "repl": repl, |
---|
1057 | "trial": trial, |
---|
1058 | "flogtool": flogtool, |
---|
1059 | } |
---|
1060 | |
---|
1061 | |
---|
1062 | def do_debug(options): |
---|
1063 | so = options.subOptions |
---|
1064 | so.stdout = options.stdout |
---|
1065 | so.stderr = options.stderr |
---|
1066 | f = subDispatch[options.subCommand] |
---|
1067 | return f(so) |
---|
1068 | |
---|
1069 | |
---|
1070 | subCommands : SubCommands = [ |
---|
1071 | ("debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list."), |
---|
1072 | ] |
---|
1073 | |
---|
1074 | dispatch = { |
---|
1075 | "debug": do_debug, |
---|
1076 | } |
---|