1 | 1 patch for repository /home/davidsarah/tahoe/1.9alpha: |
---|
2 | |
---|
3 | Sat Sep 17 03:00:04 BST 2011 david-sarah@jacaranda.org |
---|
4 | * Work-in-progress patch for pluggable backends. Still fails many tests. refs #999 |
---|
5 | |
---|
6 | New patches: |
---|
7 | |
---|
8 | [Work-in-progress patch for pluggable backends. Still fails many tests. refs #999 |
---|
9 | david-sarah@jacaranda.org**20110917020004 |
---|
10 | Ignore-this: b2a0d7c8e20037c690e0be02e81d37fe |
---|
11 | ] { |
---|
12 | adddir ./src/allmydata/storage/backends |
---|
13 | adddir ./src/allmydata/storage/backends/disk |
---|
14 | move ./src/allmydata/storage/immutable.py ./src/allmydata/storage/backends/disk/immutable.py |
---|
15 | move ./src/allmydata/storage/mutable.py ./src/allmydata/storage/backends/disk/mutable.py |
---|
16 | adddir ./src/allmydata/storage/backends/null |
---|
17 | hunk ./docs/garbage-collection.rst 177 |
---|
18 | use this parameter to implement it. |
---|
19 | |
---|
20 | This key is only valid when age-based expiration is in use (i.e. when |
---|
21 | - ``expire.mode = age`` is used). It will be rejected if cutoff-date |
---|
22 | + ``expire.mode = age`` is used). It will be ignored if cutoff-date |
---|
23 | expiration is in use. |
---|
24 | |
---|
25 | ``expire.cutoff_date = (date string, required if mode=cutoff-date)`` |
---|
26 | hunk ./docs/garbage-collection.rst 196 |
---|
27 | the last renewal time and the cutoff date. |
---|
28 | |
---|
29 | This key is only valid when cutoff-based expiration is in use (i.e. when |
---|
30 | - "expire.mode = cutoff-date"). It will be rejected if age-based expiration |
---|
31 | + "expire.mode = cutoff-date"). It will be ignored if age-based expiration |
---|
32 | is in use. |
---|
33 | |
---|
34 | expire.immutable = (boolean, optional) |
---|
35 | hunk ./src/allmydata/client.py 245 |
---|
36 | sharetypes.append("immutable") |
---|
37 | if self.get_config("storage", "expire.mutable", True, boolean=True): |
---|
38 | sharetypes.append("mutable") |
---|
39 | - expiration_sharetypes = tuple(sharetypes) |
---|
40 | |
---|
41 | hunk ./src/allmydata/client.py 246 |
---|
42 | + expiration_policy = { |
---|
43 | + 'enabled': expire, |
---|
44 | + 'mode': mode, |
---|
45 | + 'override_lease_duration': o_l_d, |
---|
46 | + 'cutoff_date': cutoff_date, |
---|
47 | + 'sharetypes': tuple(sharetypes), |
---|
48 | + } |
---|
49 | ss = StorageServer(storedir, self.nodeid, |
---|
50 | reserved_space=reserved, |
---|
51 | discard_storage=discard, |
---|
52 | hunk ./src/allmydata/client.py 258 |
---|
53 | readonly_storage=readonly, |
---|
54 | stats_provider=self.stats_provider, |
---|
55 | - expiration_enabled=expire, |
---|
56 | - expiration_mode=mode, |
---|
57 | - expiration_override_lease_duration=o_l_d, |
---|
58 | - expiration_cutoff_date=cutoff_date, |
---|
59 | - expiration_sharetypes=expiration_sharetypes) |
---|
60 | + expiration_policy=expiration_policy) |
---|
61 | self.add_service(ss) |
---|
62 | |
---|
63 | d = self.when_tub_ready() |
---|
64 | hunk ./src/allmydata/immutable/offloaded.py 306 |
---|
65 | if os.path.exists(self._encoding_file): |
---|
66 | self.log("ciphertext already present, bypassing fetch", |
---|
67 | level=log.UNUSUAL) |
---|
68 | + # XXX the following comment is probably stale, since |
---|
69 | + # LocalCiphertextReader.get_plaintext_hashtree_leaves does not exist. |
---|
70 | + # |
---|
71 | # we'll still need the plaintext hashes (when |
---|
72 | # LocalCiphertextReader.get_plaintext_hashtree_leaves() is |
---|
73 | # called), and currently the easiest way to get them is to ask |
---|
74 | hunk ./src/allmydata/immutable/upload.py 765 |
---|
75 | self._status.set_progress(1, progress) |
---|
76 | return cryptdata |
---|
77 | |
---|
78 | - |
---|
79 | def get_plaintext_hashtree_leaves(self, first, last, num_segments): |
---|
80 | hunk ./src/allmydata/immutable/upload.py 766 |
---|
81 | + """OBSOLETE; Get the leaf nodes of a merkle hash tree over the |
---|
82 | + plaintext segments, i.e. get the tagged hashes of the given segments. |
---|
83 | + The segment size is expected to be generated by the |
---|
84 | + IEncryptedUploadable before any plaintext is read or ciphertext |
---|
85 | + produced, so that the segment hashes can be generated with only a |
---|
86 | + single pass. |
---|
87 | + |
---|
88 | + This returns a Deferred that fires with a sequence of hashes, using: |
---|
89 | + |
---|
90 | + tuple(segment_hashes[first:last]) |
---|
91 | + |
---|
92 | + 'num_segments' is used to assert that the number of segments that the |
---|
93 | + IEncryptedUploadable handled matches the number of segments that the |
---|
94 | + encoder was expecting. |
---|
95 | + |
---|
96 | + This method must not be called until the final byte has been read |
---|
97 | + from read_encrypted(). Once this method is called, read_encrypted() |
---|
98 | + can never be called again. |
---|
99 | + """ |
---|
100 | # this is currently unused, but will live again when we fix #453 |
---|
101 | if len(self._plaintext_segment_hashes) < num_segments: |
---|
102 | # close out the last one |
---|
103 | hunk ./src/allmydata/immutable/upload.py 803 |
---|
104 | return defer.succeed(tuple(self._plaintext_segment_hashes[first:last])) |
---|
105 | |
---|
106 | def get_plaintext_hash(self): |
---|
107 | + """OBSOLETE; Get the hash of the whole plaintext. |
---|
108 | + |
---|
109 | + This returns a Deferred that fires with a tagged SHA-256 hash of the |
---|
110 | + whole plaintext, obtained from hashutil.plaintext_hash(data). |
---|
111 | + """ |
---|
112 | + # this is currently unused, but will live again when we fix #453 |
---|
113 | h = self._plaintext_hasher.digest() |
---|
114 | return defer.succeed(h) |
---|
115 | |
---|
116 | hunk ./src/allmydata/interfaces.py 29 |
---|
117 | Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes |
---|
118 | Offset = Number |
---|
119 | ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments |
---|
120 | -WriteEnablerSecret = Hash # used to protect mutable bucket modifications |
---|
121 | -LeaseRenewSecret = Hash # used to protect bucket lease renewal requests |
---|
122 | -LeaseCancelSecret = Hash # used to protect bucket lease cancellation requests |
---|
123 | +WriteEnablerSecret = Hash # used to protect mutable share modifications |
---|
124 | +LeaseRenewSecret = Hash # used to protect lease renewal requests |
---|
125 | +LeaseCancelSecret = Hash # used to protect lease cancellation requests |
---|
126 | |
---|
127 | class RIStubClient(RemoteInterface): |
---|
128 | """Each client publishes a service announcement for a dummy object called |
---|
129 | hunk ./src/allmydata/interfaces.py 106 |
---|
130 | sharenums=SetOf(int, maxLength=MAX_BUCKETS), |
---|
131 | allocated_size=Offset, canary=Referenceable): |
---|
132 | """ |
---|
133 | - @param storage_index: the index of the bucket to be created or |
---|
134 | + @param storage_index: the index of the shareset to be created or |
---|
135 | increfed. |
---|
136 | @param sharenums: these are the share numbers (probably between 0 and |
---|
137 | 99) that the sender is proposing to store on this |
---|
138 | hunk ./src/allmydata/interfaces.py 111 |
---|
139 | server. |
---|
140 | - @param renew_secret: This is the secret used to protect bucket refresh |
---|
141 | + @param renew_secret: This is the secret used to protect lease renewal. |
---|
142 | This secret is generated by the client and |
---|
143 | stored for later comparison by the server. Each |
---|
144 | server is given a different secret. |
---|
145 | hunk ./src/allmydata/interfaces.py 115 |
---|
146 | - @param cancel_secret: Like renew_secret, but protects bucket decref. |
---|
147 | - @param canary: If the canary is lost before close(), the bucket is |
---|
148 | + @param cancel_secret: ignored |
---|
149 | + @param canary: If the canary is lost before close(), the allocation is |
---|
150 | deleted. |
---|
151 | @return: tuple of (alreadygot, allocated), where alreadygot is what we |
---|
152 | already have and allocated is what we hereby agree to accept. |
---|
153 | hunk ./src/allmydata/interfaces.py 129 |
---|
154 | renew_secret=LeaseRenewSecret, |
---|
155 | cancel_secret=LeaseCancelSecret): |
---|
156 | """ |
---|
157 | - Add a new lease on the given bucket. If the renew_secret matches an |
---|
158 | + Add a new lease on the given shareset. If the renew_secret matches an |
---|
159 | existing lease, that lease will be renewed instead. If there is no |
---|
160 | hunk ./src/allmydata/interfaces.py 131 |
---|
161 | - bucket for the given storage_index, return silently. (note that in |
---|
162 | + shareset for the given storage_index, return silently. (Note that in |
---|
163 | tahoe-1.3.0 and earlier, IndexError was raised if there was no |
---|
164 | hunk ./src/allmydata/interfaces.py 133 |
---|
165 | - bucket) |
---|
166 | + shareset.) |
---|
167 | """ |
---|
168 | return Any() # returns None now, but future versions might change |
---|
169 | |
---|
170 | hunk ./src/allmydata/interfaces.py 139 |
---|
171 | def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret): |
---|
172 | """ |
---|
173 | - Renew the lease on a given bucket, resetting the timer to 31 days. |
---|
174 | - Some networks will use this, some will not. If there is no bucket for |
---|
175 | + Renew the lease on a given shareset, resetting the timer to 31 days. |
---|
176 | + Some networks will use this, some will not. If there is no shareset for |
---|
177 | the given storage_index, IndexError will be raised. |
---|
178 | |
---|
179 | For mutable shares, if the given renew_secret does not match an |
---|
180 | hunk ./src/allmydata/interfaces.py 146 |
---|
181 | existing lease, IndexError will be raised with a note listing the |
---|
182 | server-nodeids on the existing leases, so leases on migrated shares |
---|
183 | - can be renewed or cancelled. For immutable shares, IndexError |
---|
184 | - (without the note) will be raised. |
---|
185 | + can be renewed. For immutable shares, IndexError (without the note) |
---|
186 | + will be raised. |
---|
187 | """ |
---|
188 | return Any() |
---|
189 | |
---|
190 | hunk ./src/allmydata/interfaces.py 154 |
---|
191 | def get_buckets(storage_index=StorageIndex): |
---|
192 | return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS) |
---|
193 | |
---|
194 | - |
---|
195 | - |
---|
196 | def slot_readv(storage_index=StorageIndex, |
---|
197 | shares=ListOf(int), readv=ReadVector): |
---|
198 | """Read a vector from the numbered shares associated with the given |
---|
199 | hunk ./src/allmydata/interfaces.py 163 |
---|
200 | |
---|
201 | def slot_testv_and_readv_and_writev(storage_index=StorageIndex, |
---|
202 | secrets=TupleOf(WriteEnablerSecret, |
---|
203 | - LeaseRenewSecret, |
---|
204 | - LeaseCancelSecret), |
---|
205 | + LeaseRenewSecret), |
---|
206 | tw_vectors=TestAndWriteVectorsForShares, |
---|
207 | r_vector=ReadVector, |
---|
208 | ): |
---|
209 | hunk ./src/allmydata/interfaces.py 167 |
---|
210 | - """General-purpose test-and-set operation for mutable slots. Perform |
---|
211 | - a bunch of comparisons against the existing shares. If they all pass, |
---|
212 | - then apply a bunch of write vectors to those shares. Then use the |
---|
213 | - read vectors to extract data from all the shares and return the data. |
---|
214 | + """ |
---|
215 | + General-purpose atomic test-read-and-set operation for mutable slots. |
---|
216 | + Perform a bunch of comparisons against the existing shares. If they |
---|
217 | + all pass: use the read vectors to extract data from all the shares, |
---|
218 | + then apply a bunch of write vectors to those shares. Return the read |
---|
219 | + data, which does not include any modifications made by the writes. |
---|
220 | |
---|
221 | This method is, um, large. The goal is to allow clients to update all |
---|
222 | the shares associated with a mutable file in a single round trip. |
---|
223 | hunk ./src/allmydata/interfaces.py 177 |
---|
224 | |
---|
225 | - @param storage_index: the index of the bucket to be created or |
---|
226 | + @param storage_index: the index of the shareset to be created or |
---|
227 | increfed. |
---|
228 | @param write_enabler: a secret that is stored along with the slot. |
---|
229 | Writes are accepted from any caller who can |
---|
230 | hunk ./src/allmydata/interfaces.py 183 |
---|
231 | present the matching secret. A different secret |
---|
232 | should be used for each slot*server pair. |
---|
233 | - @param renew_secret: This is the secret used to protect bucket refresh |
---|
234 | + @param renew_secret: This is the secret used to protect lease renewal. |
---|
235 | This secret is generated by the client and |
---|
236 | stored for later comparison by the server. Each |
---|
237 | server is given a different secret. |
---|
238 | hunk ./src/allmydata/interfaces.py 187 |
---|
239 | - @param cancel_secret: Like renew_secret, but protects bucket decref. |
---|
240 | + @param cancel_secret: ignored |
---|
241 | |
---|
242 | hunk ./src/allmydata/interfaces.py 189 |
---|
243 | - The 'secrets' argument is a tuple of (write_enabler, renew_secret, |
---|
244 | - cancel_secret). The first is required to perform any write. The |
---|
245 | - latter two are used when allocating new shares. To simply acquire a |
---|
246 | - new lease on existing shares, use an empty testv and an empty writev. |
---|
247 | + The 'secrets' argument is a tuple with (write_enabler, renew_secret). |
---|
248 | + The write_enabler is required to perform any write. The renew_secret |
---|
249 | + is used when allocating new shares. |
---|
250 | |
---|
251 | Each share can have a separate test vector (i.e. a list of |
---|
252 | comparisons to perform). If all vectors for all shares pass, then all |
---|
253 | hunk ./src/allmydata/interfaces.py 280 |
---|
254 | store that on disk. |
---|
255 | """ |
---|
256 | |
---|
257 | -class IStorageBucketWriter(Interface): |
---|
258 | + |
---|
259 | +class IStorageBackend(Interface): |
---|
260 | """ |
---|
261 | hunk ./src/allmydata/interfaces.py 283 |
---|
262 | - Objects of this kind live on the client side. |
---|
263 | + Objects of this kind live on the server side and are used by the |
---|
264 | + storage server object. |
---|
265 | """ |
---|
266 | hunk ./src/allmydata/interfaces.py 286 |
---|
267 | - def put_block(segmentnum=int, data=ShareData): |
---|
268 | - """@param data: For most segments, this data will be 'blocksize' |
---|
269 | - bytes in length. The last segment might be shorter. |
---|
270 | - @return: a Deferred that fires (with None) when the operation completes |
---|
271 | + def get_available_space(): |
---|
272 | + """ |
---|
273 | + Returns available space for share storage in bytes, or |
---|
274 | + None if this information is not available or if the available |
---|
275 | + space is unlimited. |
---|
276 | + |
---|
277 | + If the backend is configured for read-only mode then this will |
---|
278 | + return 0. |
---|
279 | + """ |
---|
280 | + |
---|
281 | + def get_sharesets_for_prefix(prefix): |
---|
282 | + """ |
---|
283 | + Generates IShareSet objects for all storage indices matching the |
---|
284 | + given prefix for which this backend holds shares. |
---|
285 | + """ |
---|
286 | + |
---|
287 | + def get_shareset(storageindex): |
---|
288 | + """ |
---|
289 | + Get an IShareSet object for the given storage index. |
---|
290 | + """ |
---|
291 | + |
---|
292 | + def advise_corrupt_share(storageindex, sharetype, shnum, reason): |
---|
293 | + """ |
---|
294 | + Clients who discover hash failures in shares that they have |
---|
295 | + downloaded from me will use this method to inform me about the |
---|
296 | + failures. I will record their concern so that my operator can |
---|
297 | + manually inspect the shares in question. |
---|
298 | + |
---|
299 | + 'sharetype' is either 'mutable' or 'immutable'. 'shnum' is the integer |
---|
300 | + share number. 'reason' is a human-readable explanation of the problem, |
---|
301 | + probably including some expected hash values and the computed ones |
---|
302 | + that did not match. Corruption advisories for mutable shares should |
---|
303 | + include a hash of the public key (the same value that appears in the |
---|
304 | + mutable-file verify-cap), since the current share format does not |
---|
305 | + store that on disk. |
---|
306 | + |
---|
307 | + @param storageindex=str |
---|
308 | + @param sharetype=str |
---|
309 | + @param shnum=int |
---|
310 | + @param reason=str |
---|
311 | + """ |
---|
312 | + |
---|
313 | + |
---|
314 | +class IShareSet(Interface): |
---|
315 | + def get_storage_index(): |
---|
316 | + """ |
---|
317 | + Returns the storage index for this shareset. |
---|
318 | + """ |
---|
319 | + |
---|
320 | + def get_storage_index_string(): |
---|
321 | + """ |
---|
322 | + Returns the base32-encoded storage index for this shareset. |
---|
323 | + """ |
---|
324 | + |
---|
325 | + def get_overhead(): |
---|
326 | + """ |
---|
327 | + Returns the storage overhead, in bytes, of this shareset (exclusive |
---|
328 | + of the space used by its shares). |
---|
329 | + """ |
---|
330 | + |
---|
331 | + def get_shares(): |
---|
332 | + """ |
---|
333 | + Generates the IStoredShare objects held in this shareset. |
---|
334 | + """ |
---|
335 | + |
---|
336 | + def get_incoming_shnums(): |
---|
337 | + """ |
---|
338 | + Return a frozenset of the shnums (as ints) of incoming shares. |
---|
339 | + """ |
---|
340 | + |
---|
341 | + def make_bucket_writer(storageserver, shnum, max_space_per_bucket, lease_info, canary): |
---|
342 | + """ |
---|
343 | + Create a bucket writer that can be used to write data to a given share. |
---|
344 | + |
---|
345 | + @param storageserver=RIStorageServer |
---|
346 | + @param shnum=int: A share number in this shareset |
---|
347 | + @param max_space_per_bucket=int: The maximum space allocated for the |
---|
348 | + share, in bytes |
---|
349 | + @param lease_info=LeaseInfo: The initial lease information |
---|
350 | + @param canary=Referenceable: If the canary is lost before close(), the |
---|
351 | + bucket is deleted. |
---|
352 | + @return an IStorageBucketWriter for the given share |
---|
353 | + """ |
---|
354 | + |
---|
355 | + def make_bucket_reader(storageserver, share): |
---|
356 | + """ |
---|
357 | + Create a bucket reader that can be used to read data from a given share. |
---|
358 | + |
---|
359 | + @param storageserver=RIStorageServer |
---|
360 | + @param share=IStoredShare |
---|
361 | + @return an IStorageBucketReader for the given share |
---|
362 | """ |
---|
363 | |
---|
364 | hunk ./src/allmydata/interfaces.py 379 |
---|
365 | - def put_plaintext_hashes(hashes=ListOf(Hash)): |
---|
366 | + def readv(wanted_shnums, read_vector): |
---|
367 | """ |
---|
368 | hunk ./src/allmydata/interfaces.py 381 |
---|
369 | + Read a vector from the numbered shares in this shareset. An empty |
---|
370 | + wanted_shnums list means to return data from all known shares. |
---|
371 | + |
---|
372 | + @param wanted_shnums=ListOf(int) |
---|
373 | + @param read_vector=ReadVector |
---|
374 | + @return DictOf(int, ReadData): shnum -> results, with one key per share |
---|
375 | + """ |
---|
376 | + |
---|
377 | + def testv_and_readv_and_writev(storageserver, secrets, test_and_write_vectors, read_vector, expiration_time): |
---|
378 | + """ |
---|
379 | + General-purpose atomic test-read-and-set operation for mutable slots. |
---|
380 | + Perform a bunch of comparisons against the existing shares in this |
---|
381 | + shareset. If they all pass: use the read vectors to extract data from |
---|
382 | + all the shares, then apply a bunch of write vectors to those shares. |
---|
383 | + Return the read data, which does not include any modifications made by |
---|
384 | + the writes. |
---|
385 | + |
---|
386 | + See the similar method in RIStorageServer for more detail. |
---|
387 | + |
---|
388 | + @param storageserver=RIStorageServer |
---|
389 | + @param secrets=TupleOf(WriteEnablerSecret, LeaseRenewSecret[, ...]) |
---|
390 | + @param test_and_write_vectors=TestAndWriteVectorsForShares |
---|
391 | + @param read_vector=ReadVector |
---|
392 | + @param expiration_time=int |
---|
393 | + @return TupleOf(bool, DictOf(int, ReadData)) |
---|
394 | + """ |
---|
395 | + |
---|
396 | + def add_or_renew_lease(lease_info): |
---|
397 | + """ |
---|
398 | + Add a new lease on the shares in this shareset. If the renew_secret |
---|
399 | + matches an existing lease, that lease will be renewed instead. If |
---|
400 | + there are no shares in this shareset, return silently. (Note that |
---|
401 | + in Tahoe-LAFS v1.3.0 and earlier, IndexError was raised if there were |
---|
402 | + no shares with this shareset's storage index.) |
---|
403 | + |
---|
404 | + @param lease_info=LeaseInfo |
---|
405 | + """ |
---|
406 | + |
---|
407 | + def renew_lease(renew_secret, new_expiration_time): |
---|
408 | + """ |
---|
409 | + Renew a lease on the shares in this shareset, resetting the timer |
---|
410 | + to 31 days. Some grids will use this, some will not. If there are no |
---|
411 | + shares in this shareset, IndexError will be raised. |
---|
412 | + |
---|
413 | + For mutable shares, if the given renew_secret does not match an |
---|
414 | + existing lease, IndexError will be raised with a note listing the |
---|
415 | + server-nodeids on the existing leases, so leases on migrated shares |
---|
416 | + can be renewed. For immutable shares, IndexError (without the note) |
---|
417 | + will be raised. |
---|
418 | + |
---|
419 | + @param renew_secret=LeaseRenewSecret |
---|
420 | + """ |
---|
421 | + |
---|
422 | + |
---|
423 | +class IStoredShare(Interface): |
---|
424 | + """ |
---|
425 | + This object contains as much as all of the share data. It is intended |
---|
426 | + for lazy evaluation, such that in many use cases substantially less than |
---|
427 | + all of the share data will be accessed. |
---|
428 | + """ |
---|
429 | + def close(): |
---|
430 | + """ |
---|
431 | + Complete writing to this share. |
---|
432 | + """ |
---|
433 | + |
---|
434 | + def get_storage_index(): |
---|
435 | + """ |
---|
436 | + Returns the storage index. |
---|
437 | + """ |
---|
438 | + |
---|
439 | + def get_shnum(): |
---|
440 | + """ |
---|
441 | + Returns the share number. |
---|
442 | + """ |
---|
443 | + |
---|
444 | + def get_data_length(): |
---|
445 | + """ |
---|
446 | + Returns the data length in bytes. |
---|
447 | + """ |
---|
448 | + |
---|
449 | + def get_size(): |
---|
450 | + """ |
---|
451 | + Returns the size of the share in bytes. |
---|
452 | + """ |
---|
453 | + |
---|
454 | + def get_used_space(): |
---|
455 | + """ |
---|
456 | + Returns the amount of backend storage including overhead, in bytes, used |
---|
457 | + by this share. |
---|
458 | + """ |
---|
459 | + |
---|
460 | + def unlink(): |
---|
461 | + """ |
---|
462 | + Signal that this share can be removed from the backend storage. This does |
---|
463 | + not guarantee that the share data will be immediately inaccessible, or |
---|
464 | + that it will be securely erased. |
---|
465 | + """ |
---|
466 | + |
---|
467 | + def readv(read_vector): |
---|
468 | + """ |
---|
469 | + XXX |
---|
470 | + """ |
---|
471 | + |
---|
472 | + |
---|
473 | +class IStoredMutableShare(IStoredShare): |
---|
474 | + def check_write_enabler(write_enabler, si_s): |
---|
475 | + """ |
---|
476 | + XXX |
---|
477 | + """ |
---|
478 | + |
---|
479 | + def check_testv(test_vector): |
---|
480 | + """ |
---|
481 | + XXX |
---|
482 | + """ |
---|
483 | + |
---|
484 | + def writev(datav, new_length): |
---|
485 | + """ |
---|
486 | + XXX |
---|
487 | + """ |
---|
488 | + |
---|
489 | + |
---|
490 | +class IStorageBucketWriter(Interface): |
---|
491 | + """ |
---|
492 | + Objects of this kind live on the client side. |
---|
493 | + """ |
---|
494 | + def put_block(segmentnum, data): |
---|
495 | + """ |
---|
496 | + @param segmentnum=int |
---|
497 | + @param data=ShareData: For most segments, this data will be 'blocksize' |
---|
498 | + bytes in length. The last segment might be shorter. |
---|
499 | @return: a Deferred that fires (with None) when the operation completes |
---|
500 | """ |
---|
501 | |
---|
502 | hunk ./src/allmydata/interfaces.py 514 |
---|
503 | - def put_crypttext_hashes(hashes=ListOf(Hash)): |
---|
504 | + def put_crypttext_hashes(hashes): |
---|
505 | """ |
---|
506 | hunk ./src/allmydata/interfaces.py 516 |
---|
507 | + @param hashes=ListOf(Hash) |
---|
508 | @return: a Deferred that fires (with None) when the operation completes |
---|
509 | """ |
---|
510 | |
---|
511 | hunk ./src/allmydata/interfaces.py 520 |
---|
512 | - def put_block_hashes(blockhashes=ListOf(Hash)): |
---|
513 | + def put_block_hashes(blockhashes): |
---|
514 | """ |
---|
515 | hunk ./src/allmydata/interfaces.py 522 |
---|
516 | + @param blockhashes=ListOf(Hash) |
---|
517 | @return: a Deferred that fires (with None) when the operation completes |
---|
518 | """ |
---|
519 | |
---|
520 | hunk ./src/allmydata/interfaces.py 526 |
---|
521 | - def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash))): |
---|
522 | + def put_share_hashes(sharehashes): |
---|
523 | """ |
---|
524 | hunk ./src/allmydata/interfaces.py 528 |
---|
525 | + @param sharehashes=ListOf(TupleOf(int, Hash)) |
---|
526 | @return: a Deferred that fires (with None) when the operation completes |
---|
527 | """ |
---|
528 | |
---|
529 | hunk ./src/allmydata/interfaces.py 532 |
---|
530 | - def put_uri_extension(data=URIExtensionData): |
---|
531 | + def put_uri_extension(data): |
---|
532 | """This block of data contains integrity-checking information (hashes |
---|
533 | of plaintext, crypttext, and shares), as well as encoding parameters |
---|
534 | that are necessary to recover the data. This is a serialized dict |
---|
535 | hunk ./src/allmydata/interfaces.py 537 |
---|
536 | mapping strings to other strings. The hash of this data is kept in |
---|
537 | - the URI and verified before any of the data is used. All buckets for |
---|
538 | - a given file contain identical copies of this data. |
---|
539 | + the URI and verified before any of the data is used. All share |
---|
540 | + containers for a given file contain identical copies of this data. |
---|
541 | |
---|
542 | The serialization format is specified with the following pseudocode: |
---|
543 | for k in sorted(dict.keys()): |
---|
544 | hunk ./src/allmydata/interfaces.py 545 |
---|
545 | assert re.match(r'^[a-zA-Z_\-]+$', k) |
---|
546 | write(k + ':' + netstring(dict[k])) |
---|
547 | |
---|
548 | + @param data=URIExtensionData |
---|
549 | @return: a Deferred that fires (with None) when the operation completes |
---|
550 | """ |
---|
551 | |
---|
552 | hunk ./src/allmydata/interfaces.py 560 |
---|
553 | |
---|
554 | class IStorageBucketReader(Interface): |
---|
555 | |
---|
556 | - def get_block_data(blocknum=int, blocksize=int, size=int): |
---|
557 | + def get_block_data(blocknum, blocksize, size): |
---|
558 | """Most blocks will be the same size. The last block might be shorter |
---|
559 | than the others. |
---|
560 | |
---|
561 | hunk ./src/allmydata/interfaces.py 564 |
---|
562 | + @param blocknum=int |
---|
563 | + @param blocksize=int |
---|
564 | + @param size=int |
---|
565 | @return: ShareData |
---|
566 | """ |
---|
567 | |
---|
568 | hunk ./src/allmydata/interfaces.py 575 |
---|
569 | @return: ListOf(Hash) |
---|
570 | """ |
---|
571 | |
---|
572 | - def get_block_hashes(at_least_these=SetOf(int)): |
---|
573 | + def get_block_hashes(at_least_these=()): |
---|
574 | """ |
---|
575 | hunk ./src/allmydata/interfaces.py 577 |
---|
576 | + @param at_least_these=SetOf(int) |
---|
577 | @return: ListOf(Hash) |
---|
578 | """ |
---|
579 | |
---|
580 | hunk ./src/allmydata/interfaces.py 581 |
---|
581 | - def get_share_hashes(at_least_these=SetOf(int)): |
---|
582 | + def get_share_hashes(): |
---|
583 | """ |
---|
584 | @return: ListOf(TupleOf(int, Hash)) |
---|
585 | """ |
---|
586 | hunk ./src/allmydata/interfaces.py 613 |
---|
587 | @return: unicode nickname, or None |
---|
588 | """ |
---|
589 | |
---|
590 | - # methods moved from IntroducerClient, need review |
---|
591 | - def get_all_connections(): |
---|
592 | - """Return a frozenset of (nodeid, service_name, rref) tuples, one for |
---|
593 | - each active connection we've established to a remote service. This is |
---|
594 | - mostly useful for unit tests that need to wait until a certain number |
---|
595 | - of connections have been made.""" |
---|
596 | - |
---|
597 | - def get_all_connectors(): |
---|
598 | - """Return a dict that maps from (nodeid, service_name) to a |
---|
599 | - RemoteServiceConnector instance for all services that we are actively |
---|
600 | - trying to connect to. Each RemoteServiceConnector has the following |
---|
601 | - public attributes:: |
---|
602 | - |
---|
603 | - service_name: the type of service provided, like 'storage' |
---|
604 | - announcement_time: when we first heard about this service |
---|
605 | - last_connect_time: when we last established a connection |
---|
606 | - last_loss_time: when we last lost a connection |
---|
607 | - |
---|
608 | - version: the peer's version, from the most recent connection |
---|
609 | - oldest_supported: the peer's oldest supported version, same |
---|
610 | - |
---|
611 | - rref: the RemoteReference, if connected, otherwise None |
---|
612 | - remote_host: the IAddress, if connected, otherwise None |
---|
613 | - |
---|
614 | - This method is intended for monitoring interfaces, such as a web page |
---|
615 | - that describes connecting and connected peers. |
---|
616 | - """ |
---|
617 | - |
---|
618 | - def get_all_peerids(): |
---|
619 | - """Return a frozenset of all peerids to whom we have a connection (to |
---|
620 | - one or more services) established. Mostly useful for unit tests.""" |
---|
621 | - |
---|
622 | - def get_all_connections_for(service_name): |
---|
623 | - """Return a frozenset of (nodeid, service_name, rref) tuples, one |
---|
624 | - for each active connection that provides the given SERVICE_NAME.""" |
---|
625 | - |
---|
626 | - def get_permuted_peers(service_name, key): |
---|
627 | - """Returns an ordered list of (peerid, rref) tuples, selecting from |
---|
628 | - the connections that provide SERVICE_NAME, using a hash-based |
---|
629 | - permutation keyed by KEY. This randomizes the service list in a |
---|
630 | - repeatable way, to distribute load over many peers. |
---|
631 | - """ |
---|
632 | - |
---|
633 | |
---|
634 | class IMutableSlotWriter(Interface): |
---|
635 | """ |
---|
636 | hunk ./src/allmydata/interfaces.py 618 |
---|
637 | The interface for a writer around a mutable slot on a remote server. |
---|
638 | """ |
---|
639 | - def set_checkstring(checkstring, checkstring_or_seqnum, root_hash=None, salt=None): |
---|
640 | + def set_checkstring(seqnum_or_checkstring, root_hash=None, salt=None): |
---|
641 | """ |
---|
642 | Set the checkstring that I will pass to the remote server when |
---|
643 | writing. |
---|
644 | hunk ./src/allmydata/interfaces.py 642 |
---|
645 | Add a block and salt to the share. |
---|
646 | """ |
---|
647 | |
---|
648 | - def put_encprivey(encprivkey): |
---|
649 | + def put_encprivkey(encprivkey): |
---|
650 | """ |
---|
651 | Add the encrypted private key to the share. |
---|
652 | """ |
---|
653 | hunk ./src/allmydata/interfaces.py 881 |
---|
654 | writer-visible data using this writekey. |
---|
655 | """ |
---|
656 | |
---|
657 | - # TODO: Can this be overwrite instead of replace? |
---|
658 | - def replace(new_contents): |
---|
659 | - """Replace the contents of the mutable file, provided that no other |
---|
660 | + def overwrite(new_contents): |
---|
661 | + """Overwrite the contents of the mutable file, provided that no other |
---|
662 | node has published (or is attempting to publish, concurrently) a |
---|
663 | newer version of the file than this one. |
---|
664 | |
---|
665 | hunk ./src/allmydata/interfaces.py 1348 |
---|
666 | is empty, the metadata will be an empty dictionary. |
---|
667 | """ |
---|
668 | |
---|
669 | - def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True): |
---|
670 | + def set_uri(name, writecap, readcap, metadata=None, overwrite=True): |
---|
671 | """I add a child (by writecap+readcap) at the specific name. I return |
---|
672 | a Deferred that fires when the operation finishes. If overwrite= is |
---|
673 | True, I will replace any existing child of the same name, otherwise |
---|
674 | hunk ./src/allmydata/interfaces.py 1747 |
---|
675 | Block Hash, and the encoding parameters, both of which must be included |
---|
676 | in the URI. |
---|
677 | |
---|
678 | - I do not choose shareholders, that is left to the IUploader. I must be |
---|
679 | - given a dict of RemoteReferences to storage buckets that are ready and |
---|
680 | - willing to receive data. |
---|
681 | + I do not choose shareholders, that is left to the IUploader. |
---|
682 | """ |
---|
683 | |
---|
684 | def set_size(size): |
---|
685 | hunk ./src/allmydata/interfaces.py 1754 |
---|
686 | """Specify the number of bytes that will be encoded. This must be |
---|
687 | peformed before get_serialized_params() can be called. |
---|
688 | """ |
---|
689 | + |
---|
690 | def set_params(params): |
---|
691 | """Override the default encoding parameters. 'params' is a tuple of |
---|
692 | (k,d,n), where 'k' is the number of required shares, 'd' is the |
---|
693 | hunk ./src/allmydata/interfaces.py 1850 |
---|
694 | download, validate, decode, and decrypt data from them, writing the |
---|
695 | results to an output file. |
---|
696 | |
---|
697 | - I do not locate the shareholders, that is left to the IDownloader. I must |
---|
698 | - be given a dict of RemoteReferences to storage buckets that are ready to |
---|
699 | - send data. |
---|
700 | + I do not locate the shareholders, that is left to the IDownloader. |
---|
701 | """ |
---|
702 | |
---|
703 | def setup(outfile): |
---|
704 | hunk ./src/allmydata/interfaces.py 1952 |
---|
705 | resuming an interrupted upload (where we need to compute the |
---|
706 | plaintext hashes, but don't need the redundant encrypted data).""" |
---|
707 | |
---|
708 | - def get_plaintext_hashtree_leaves(first, last, num_segments): |
---|
709 | - """OBSOLETE; Get the leaf nodes of a merkle hash tree over the |
---|
710 | - plaintext segments, i.e. get the tagged hashes of the given segments. |
---|
711 | - The segment size is expected to be generated by the |
---|
712 | - IEncryptedUploadable before any plaintext is read or ciphertext |
---|
713 | - produced, so that the segment hashes can be generated with only a |
---|
714 | - single pass. |
---|
715 | - |
---|
716 | - This returns a Deferred that fires with a sequence of hashes, using: |
---|
717 | - |
---|
718 | - tuple(segment_hashes[first:last]) |
---|
719 | - |
---|
720 | - 'num_segments' is used to assert that the number of segments that the |
---|
721 | - IEncryptedUploadable handled matches the number of segments that the |
---|
722 | - encoder was expecting. |
---|
723 | - |
---|
724 | - This method must not be called until the final byte has been read |
---|
725 | - from read_encrypted(). Once this method is called, read_encrypted() |
---|
726 | - can never be called again. |
---|
727 | - """ |
---|
728 | - |
---|
729 | - def get_plaintext_hash(): |
---|
730 | - """OBSOLETE; Get the hash of the whole plaintext. |
---|
731 | - |
---|
732 | - This returns a Deferred that fires with a tagged SHA-256 hash of the |
---|
733 | - whole plaintext, obtained from hashutil.plaintext_hash(data). |
---|
734 | - """ |
---|
735 | - |
---|
736 | def close(): |
---|
737 | """Just like IUploadable.close().""" |
---|
738 | |
---|
739 | hunk ./src/allmydata/interfaces.py 2579 |
---|
740 | Tahoe process will typically have a single NodeMaker, but unit tests may |
---|
741 | create simplified/mocked forms for testing purposes. |
---|
742 | """ |
---|
743 | - def create_from_cap(writecap, readcap=None, **kwargs): |
---|
744 | + def create_from_cap(writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"): |
---|
745 | """I create an IFilesystemNode from the given writecap/readcap. I can |
---|
746 | only provide nodes for existing file/directory objects: use my other |
---|
747 | methods to create new objects. I return synchronously.""" |
---|
748 | hunk ./src/allmydata/mutable/filenode.py 753 |
---|
749 | self._writekey = writekey |
---|
750 | self._serializer = defer.succeed(None) |
---|
751 | |
---|
752 | - |
---|
753 | def get_sequence_number(self): |
---|
754 | """ |
---|
755 | Get the sequence number of the mutable version that I represent. |
---|
756 | hunk ./src/allmydata/mutable/filenode.py 759 |
---|
757 | """ |
---|
758 | return self._version[0] # verinfo[0] == the sequence number |
---|
759 | |
---|
760 | + def get_servermap(self): |
---|
761 | + return self._servermap |
---|
762 | |
---|
763 | hunk ./src/allmydata/mutable/filenode.py 762 |
---|
764 | - # TODO: Terminology? |
---|
765 | def get_writekey(self): |
---|
766 | """ |
---|
767 | I return a writekey or None if I don't have a writekey. |
---|
768 | hunk ./src/allmydata/mutable/filenode.py 768 |
---|
769 | """ |
---|
770 | return self._writekey |
---|
771 | |
---|
772 | - |
---|
773 | def set_downloader_hints(self, hints): |
---|
774 | """ |
---|
775 | I set the downloader hints. |
---|
776 | hunk ./src/allmydata/mutable/filenode.py 776 |
---|
777 | |
---|
778 | self._downloader_hints = hints |
---|
779 | |
---|
780 | - |
---|
781 | def get_downloader_hints(self): |
---|
782 | """ |
---|
783 | I return the downloader hints. |
---|
784 | hunk ./src/allmydata/mutable/filenode.py 782 |
---|
785 | """ |
---|
786 | return self._downloader_hints |
---|
787 | |
---|
788 | - |
---|
789 | def overwrite(self, new_contents): |
---|
790 | """ |
---|
791 | I overwrite the contents of this mutable file version with the |
---|
792 | hunk ./src/allmydata/mutable/filenode.py 791 |
---|
793 | |
---|
794 | return self._do_serialized(self._overwrite, new_contents) |
---|
795 | |
---|
796 | - |
---|
797 | def _overwrite(self, new_contents): |
---|
798 | assert IMutableUploadable.providedBy(new_contents) |
---|
799 | assert self._servermap.last_update_mode == MODE_WRITE |
---|
800 | hunk ./src/allmydata/mutable/filenode.py 797 |
---|
801 | |
---|
802 | return self._upload(new_contents) |
---|
803 | |
---|
804 | - |
---|
805 | def modify(self, modifier, backoffer=None): |
---|
806 | """I use a modifier callback to apply a change to the mutable file. |
---|
807 | I implement the following pseudocode:: |
---|
808 | hunk ./src/allmydata/mutable/filenode.py 841 |
---|
809 | |
---|
810 | return self._do_serialized(self._modify, modifier, backoffer) |
---|
811 | |
---|
812 | - |
---|
813 | def _modify(self, modifier, backoffer): |
---|
814 | if backoffer is None: |
---|
815 | backoffer = BackoffAgent().delay |
---|
816 | hunk ./src/allmydata/mutable/filenode.py 846 |
---|
817 | return self._modify_and_retry(modifier, backoffer, True) |
---|
818 | |
---|
819 | - |
---|
820 | def _modify_and_retry(self, modifier, backoffer, first_time): |
---|
821 | """ |
---|
822 | I try to apply modifier to the contents of this version of the |
---|
823 | hunk ./src/allmydata/mutable/filenode.py 878 |
---|
824 | d.addErrback(_retry) |
---|
825 | return d |
---|
826 | |
---|
827 | - |
---|
828 | def _modify_once(self, modifier, first_time): |
---|
829 | """ |
---|
830 | I attempt to apply a modifier to the contents of the mutable |
---|
831 | hunk ./src/allmydata/mutable/filenode.py 913 |
---|
832 | d.addCallback(_apply) |
---|
833 | return d |
---|
834 | |
---|
835 | - |
---|
836 | def is_readonly(self): |
---|
837 | """ |
---|
838 | I return True if this MutableFileVersion provides no write |
---|
839 | hunk ./src/allmydata/mutable/filenode.py 921 |
---|
840 | """ |
---|
841 | return self._writekey is None |
---|
842 | |
---|
843 | - |
---|
844 | def is_mutable(self): |
---|
845 | """ |
---|
846 | I return True, since mutable files are always mutable by |
---|
847 | hunk ./src/allmydata/mutable/filenode.py 928 |
---|
848 | """ |
---|
849 | return True |
---|
850 | |
---|
851 | - |
---|
852 | def get_storage_index(self): |
---|
853 | """ |
---|
854 | I return the storage index of the reference that I encapsulate. |
---|
855 | hunk ./src/allmydata/mutable/filenode.py 934 |
---|
856 | """ |
---|
857 | return self._storage_index |
---|
858 | |
---|
859 | - |
---|
860 | def get_size(self): |
---|
861 | """ |
---|
862 | I return the length, in bytes, of this readable object. |
---|
863 | hunk ./src/allmydata/mutable/filenode.py 940 |
---|
864 | """ |
---|
865 | return self._servermap.size_of_version(self._version) |
---|
866 | |
---|
867 | - |
---|
868 | def download_to_data(self, fetch_privkey=False): |
---|
869 | """ |
---|
870 | I return a Deferred that fires with the contents of this |
---|
871 | hunk ./src/allmydata/mutable/filenode.py 951 |
---|
872 | d.addCallback(lambda mc: "".join(mc.chunks)) |
---|
873 | return d |
---|
874 | |
---|
875 | - |
---|
876 | def _try_to_download_data(self): |
---|
877 | """ |
---|
878 | I am an unserialized cousin of download_to_data; I am called |
---|
879 | hunk ./src/allmydata/mutable/filenode.py 963 |
---|
880 | d.addCallback(lambda mc: "".join(mc.chunks)) |
---|
881 | return d |
---|
882 | |
---|
883 | - |
---|
884 | def read(self, consumer, offset=0, size=None, fetch_privkey=False): |
---|
885 | """ |
---|
886 | I read a portion (possibly all) of the mutable file that I |
---|
887 | hunk ./src/allmydata/mutable/filenode.py 971 |
---|
888 | return self._do_serialized(self._read, consumer, offset, size, |
---|
889 | fetch_privkey) |
---|
890 | |
---|
891 | - |
---|
892 | def _read(self, consumer, offset=0, size=None, fetch_privkey=False): |
---|
893 | """ |
---|
894 | I am the serialized companion of read. |
---|
895 | hunk ./src/allmydata/mutable/filenode.py 981 |
---|
896 | d = r.download(consumer, offset, size) |
---|
897 | return d |
---|
898 | |
---|
899 | - |
---|
900 | def _do_serialized(self, cb, *args, **kwargs): |
---|
901 | # note: to avoid deadlock, this callable is *not* allowed to invoke |
---|
902 | # other serialized methods within this (or any other) |
---|
903 | hunk ./src/allmydata/mutable/filenode.py 999 |
---|
904 | self._serializer.addErrback(log.err) |
---|
905 | return d |
---|
906 | |
---|
907 | - |
---|
908 | def _upload(self, new_contents): |
---|
909 | #assert self._pubkey, "update_servermap must be called before publish" |
---|
910 | p = Publish(self._node, self._storage_broker, self._servermap) |
---|
911 | hunk ./src/allmydata/mutable/filenode.py 1009 |
---|
912 | d.addCallback(self._did_upload, new_contents.get_size()) |
---|
913 | return d |
---|
914 | |
---|
915 | - |
---|
916 | def _did_upload(self, res, size): |
---|
917 | self._most_recent_size = size |
---|
918 | return res |
---|
919 | hunk ./src/allmydata/mutable/filenode.py 1029 |
---|
920 | """ |
---|
921 | return self._do_serialized(self._update, data, offset) |
---|
922 | |
---|
923 | - |
---|
924 | def _update(self, data, offset): |
---|
925 | """ |
---|
926 | I update the mutable file version represented by this particular |
---|
927 | hunk ./src/allmydata/mutable/filenode.py 1058 |
---|
928 | d.addCallback(self._build_uploadable_and_finish, data, offset) |
---|
929 | return d |
---|
930 | |
---|
931 | - |
---|
932 | def _do_modify_update(self, data, offset): |
---|
933 | """ |
---|
934 | I perform a file update by modifying the contents of the file |
---|
935 | hunk ./src/allmydata/mutable/filenode.py 1073 |
---|
936 | return new |
---|
937 | return self._modify(m, None) |
---|
938 | |
---|
939 | - |
---|
940 | def _do_update_update(self, data, offset): |
---|
941 | """ |
---|
942 | I start the Servermap update that gets us the data we need to |
---|
943 | hunk ./src/allmydata/mutable/filenode.py 1108 |
---|
944 | return self._update_servermap(update_range=(start_segment, |
---|
945 | end_segment)) |
---|
946 | |
---|
947 | - |
---|
948 | def _decode_and_decrypt_segments(self, ignored, data, offset): |
---|
949 | """ |
---|
950 | After the servermap update, I take the encrypted and encoded |
---|
951 | hunk ./src/allmydata/mutable/filenode.py 1148 |
---|
952 | d3 = defer.succeed(blockhashes) |
---|
953 | return deferredutil.gatherResults([d1, d2, d3]) |
---|
954 | |
---|
955 | - |
---|
956 | def _build_uploadable_and_finish(self, segments_and_bht, data, offset): |
---|
957 | """ |
---|
958 | After the process has the plaintext segments, I build the |
---|
959 | hunk ./src/allmydata/mutable/filenode.py 1163 |
---|
960 | p = Publish(self._node, self._storage_broker, self._servermap) |
---|
961 | return p.update(u, offset, segments_and_bht[2], self._version) |
---|
962 | |
---|
963 | - |
---|
964 | def _update_servermap(self, mode=MODE_WRITE, update_range=None): |
---|
965 | """ |
---|
966 | I update the servermap. I return a Deferred that fires when the |
---|
967 | addfile ./src/allmydata/storage/backends/__init__.py |
---|
968 | addfile ./src/allmydata/storage/backends/base.py |
---|
969 | hunk ./src/allmydata/storage/backends/base.py 1 |
---|
970 | + |
---|
971 | +from twisted.application import service |
---|
972 | + |
---|
973 | +from allmydata.storage.common import si_b2a |
---|
974 | +from allmydata.storage.lease import LeaseInfo |
---|
975 | +from allmydata.storage.bucket import BucketReader |
---|
976 | + |
---|
977 | + |
---|
978 | +class Backend(service.MultiService): |
---|
979 | + def __init__(self): |
---|
980 | + service.MultiService.__init__(self) |
---|
981 | + |
---|
982 | + |
---|
983 | +class ShareSet(object): |
---|
984 | + """ |
---|
985 | + This class implements shareset logic that could work for all backends, but |
---|
986 | + might be useful to override for efficiency. |
---|
987 | + """ |
---|
988 | + |
---|
989 | + def __init__(self, storageindex): |
---|
990 | + self.storageindex = storageindex |
---|
991 | + |
---|
992 | + def get_storage_index(self): |
---|
993 | + return self.storageindex |
---|
994 | + |
---|
995 | + def get_storage_index_string(self): |
---|
996 | + return si_b2a(self.storageindex) |
---|
997 | + |
---|
998 | + def renew_lease(self, renew_secret, new_expiration_time): |
---|
999 | + found_buckets = False |
---|
1000 | + for share in self.get_shares(): |
---|
1001 | + found_buckets = True |
---|
1002 | + share.renew_lease(renew_secret, new_expiration_time) |
---|
1003 | + |
---|
1004 | + if not found_buckets: |
---|
1005 | + raise IndexError("no such lease to renew") |
---|
1006 | + |
---|
1007 | + def get_leases(self): |
---|
1008 | + # Since all shares get the same lease data, we just grab the leases |
---|
1009 | + # from the first share. |
---|
1010 | + try: |
---|
1011 | + sf = self.get_shares().next() |
---|
1012 | + return sf.get_leases() |
---|
1013 | + except StopIteration: |
---|
1014 | + return iter([]) |
---|
1015 | + |
---|
1016 | + def add_or_renew_lease(self, lease_info): |
---|
1017 | + # This implementation assumes that lease data is duplicated in |
---|
1018 | + # all shares of a shareset, which might not be true for all backends. |
---|
1019 | + for share in self.get_shares(): |
---|
1020 | + share.add_or_renew_lease(lease_info) |
---|
1021 | + |
---|
1022 | + def make_bucket_reader(self, storageserver, share): |
---|
1023 | + return BucketReader(storageserver, share) |
---|
1024 | + |
---|
1025 | + def testv_and_readv_and_writev(self, storageserver, secrets, |
---|
1026 | + test_and_write_vectors, read_vector, |
---|
1027 | + expiration_time): |
---|
1028 | + # The implementation here depends on the following helper methods, |
---|
1029 | + # which must be provided by subclasses: |
---|
1030 | + # |
---|
1031 | + # def _clean_up_after_unlink(self): |
---|
1032 | + # """clean up resources associated with the shareset after some |
---|
1033 | + # shares might have been deleted""" |
---|
1034 | + # |
---|
1035 | + # def _create_mutable_share(self, storageserver, shnum, write_enabler): |
---|
1036 | + # """create a mutable share with the given shnum and write_enabler""" |
---|
1037 | + |
---|
1038 | + # This previously had to be a triple with cancel_secret in secrets[2], |
---|
1039 | + # but we now allow the cancel_secret to be omitted. |
---|
1040 | + write_enabler = secrets[0] |
---|
1041 | + renew_secret = secrets[1] |
---|
1042 | + |
---|
1043 | + si_s = self.get_storage_index_string() |
---|
1044 | + shares = {} |
---|
1045 | + for share in self.get_shares(): |
---|
1046 | + # XXX is ignoring immutable shares correct? Maybe get_shares should |
---|
1047 | + # have a parameter saying what type it's expecting. |
---|
1048 | + if share.sharetype == "mutable": |
---|
1049 | + share.check_write_enabler(write_enabler, si_s) |
---|
1050 | + shares[share.get_shnum()] = share |
---|
1051 | + |
---|
1052 | + # write_enabler is good for all existing shares. |
---|
1053 | + |
---|
1054 | + # Now evaluate test vectors. |
---|
1055 | + testv_is_good = True |
---|
1056 | + for sharenum in test_and_write_vectors: |
---|
1057 | + (testv, datav, new_length) = test_and_write_vectors[sharenum] |
---|
1058 | + if sharenum in shares: |
---|
1059 | + if not shares[sharenum].check_testv(testv): |
---|
1060 | + self.log("testv failed: [%d]: %r" % (sharenum, testv)) |
---|
1061 | + testv_is_good = False |
---|
1062 | + break |
---|
1063 | + else: |
---|
1064 | + # compare the vectors against an empty share, in which all |
---|
1065 | + # reads return empty strings. |
---|
1066 | + if not EmptyShare().check_testv(testv): |
---|
1067 | + self.log("testv failed (empty): [%d] %r" % (sharenum, |
---|
1068 | + testv)) |
---|
1069 | + testv_is_good = False |
---|
1070 | + break |
---|
1071 | + |
---|
1072 | + # now gather the read vectors, before we do any writes |
---|
1073 | + read_data = {} |
---|
1074 | + for shnum, share in shares.items(): |
---|
1075 | + read_data[shnum] = share.readv(read_vector) |
---|
1076 | + |
---|
1077 | + ownerid = 1 # TODO |
---|
1078 | + lease_info = LeaseInfo(ownerid, renew_secret, |
---|
1079 | + expiration_time, storageserver.get_serverid()) |
---|
1080 | + |
---|
1081 | + if testv_is_good: |
---|
1082 | + # now apply the write vectors |
---|
1083 | + for shnum in test_and_write_vectors: |
---|
1084 | + (testv, datav, new_length) = test_and_write_vectors[shnum] |
---|
1085 | + if new_length == 0: |
---|
1086 | + if shnum in shares: |
---|
1087 | + shares[shnum].unlink() |
---|
1088 | + else: |
---|
1089 | + if shnum not in shares: |
---|
1090 | + # allocate a new share |
---|
1091 | + share = self._create_mutable_share(storageserver, shnum, write_enabler) |
---|
1092 | + shares[shnum] = share |
---|
1093 | + shares[shnum].writev(datav, new_length) |
---|
1094 | + # and update the lease |
---|
1095 | + shares[shnum].add_or_renew_lease(lease_info) |
---|
1096 | + |
---|
1097 | + if new_length == 0: |
---|
1098 | + self._clean_up_after_unlink() |
---|
1099 | + |
---|
1100 | + return (testv_is_good, read_data) |
---|
1101 | + |
---|
1102 | + def readv(self, wanted_shnums, read_vector): |
---|
1103 | + """ |
---|
1104 | + Read a vector from the numbered shares in this shareset. An empty |
---|
1105 | + shares list means to return data from all known shares. |
---|
1106 | + |
---|
1107 | + @param wanted_shnums=ListOf(int) |
---|
1108 | + @param read_vector=ReadVector |
---|
1109 | + @return DictOf(int, ReadData): shnum -> results, with one key per share |
---|
1110 | + """ |
---|
1111 | + datavs = {} |
---|
1112 | + for share in self.get_shares(): |
---|
1113 | + # XXX is ignoring immutable shares correct? Maybe get_shares should |
---|
1114 | + # have a parameter saying what type it's expecting. |
---|
1115 | + shnum = share.get_shnum() |
---|
1116 | + if share.sharetype == "mutable" and (not wanted_shnums or shnum in wanted_shnums): |
---|
1117 | + datavs[shnum] = share.readv(read_vector) |
---|
1118 | + |
---|
1119 | + return datavs |
---|
1120 | + |
---|
1121 | + |
---|
1122 | +def testv_compare(a, op, b): |
---|
1123 | + assert op in ("lt", "le", "eq", "ne", "ge", "gt") |
---|
1124 | + if op == "lt": |
---|
1125 | + return a < b |
---|
1126 | + if op == "le": |
---|
1127 | + return a <= b |
---|
1128 | + if op == "eq": |
---|
1129 | + return a == b |
---|
1130 | + if op == "ne": |
---|
1131 | + return a != b |
---|
1132 | + if op == "ge": |
---|
1133 | + return a >= b |
---|
1134 | + if op == "gt": |
---|
1135 | + return a > b |
---|
1136 | + # never reached |
---|
1137 | + |
---|
1138 | + |
---|
1139 | +class EmptyShare: |
---|
1140 | + def check_testv(self, testv): |
---|
1141 | + test_good = True |
---|
1142 | + for (offset, length, operator, specimen) in testv: |
---|
1143 | + data = "" |
---|
1144 | + if not testv_compare(data, operator, specimen): |
---|
1145 | + test_good = False |
---|
1146 | + break |
---|
1147 | + return test_good |
---|
1148 | + |
---|
1149 | addfile ./src/allmydata/storage/backends/disk/__init__.py |
---|
1150 | addfile ./src/allmydata/storage/backends/disk/disk_backend.py |
---|
1151 | hunk ./src/allmydata/storage/backends/disk/disk_backend.py 1 |
---|
1152 | + |
---|
1153 | +import re |
---|
1154 | + |
---|
1155 | +from twisted.python.filepath import FilePath, UnlistableError |
---|
1156 | + |
---|
1157 | +from zope.interface import implements |
---|
1158 | +from allmydata.interfaces import IStorageBackend, IShareSet |
---|
1159 | +from allmydata.util import fileutil, log, time_format |
---|
1160 | +from allmydata.util.assertutil import precondition |
---|
1161 | +from allmydata.storage.common import si_b2a, si_a2b |
---|
1162 | +from allmydata.storage.bucket import BucketWriter |
---|
1163 | +from allmydata.storage.backends.base import Backend, ShareSet |
---|
1164 | +from allmydata.storage.backends.disk.immutable import ImmutableDiskShare |
---|
1165 | +from allmydata.storage.backends.disk.mutable import MutableDiskShare, create_mutable_disk_share |
---|
1166 | + |
---|
1167 | +# storage/ |
---|
1168 | +# storage/shares/incoming |
---|
1169 | +# incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will |
---|
1170 | +# be moved to storage/shares/$START/$STORAGEINDEX/$SHARENUM upon success |
---|
1171 | +# storage/shares/$START/$STORAGEINDEX |
---|
1172 | +# storage/shares/$START/$STORAGEINDEX/$SHARENUM |
---|
1173 | + |
---|
1174 | +# Where "$START" denotes the first 10 bits worth of $STORAGEINDEX (that's 2 |
---|
1175 | +# base-32 chars). |
---|
1176 | +# $SHARENUM matches this regex: |
---|
1177 | +NUM_RE=re.compile("^[0-9]+$") |
---|
1178 | + |
---|
1179 | + |
---|
1180 | +def si_si2dir(startfp, storageindex): |
---|
1181 | + sia = si_b2a(storageindex) |
---|
1182 | + newfp = startfp.child(sia[:2]) |
---|
1183 | + return newfp.child(sia) |
---|
1184 | + |
---|
1185 | + |
---|
1186 | +def get_share(fp): |
---|
1187 | + f = fp.open('rb') |
---|
1188 | + try: |
---|
1189 | + prefix = f.read(32) |
---|
1190 | + finally: |
---|
1191 | + f.close() |
---|
1192 | + |
---|
1193 | + if prefix == MutableDiskShare.MAGIC: |
---|
1194 | + return MutableDiskShare(fp) |
---|
1195 | + else: |
---|
1196 | + # assume it's immutable |
---|
1197 | + return ImmutableDiskShare(fp) |
---|
1198 | + |
---|
1199 | + |
---|
1200 | +class DiskBackend(Backend): |
---|
1201 | + implements(IStorageBackend) |
---|
1202 | + |
---|
1203 | + def __init__(self, storedir, expiration_policy, readonly=False, reserved_space=0): |
---|
1204 | + Backend.__init__(self) |
---|
1205 | + self._setup_storage(storedir, readonly, reserved_space) |
---|
1206 | + self._setup_corruption_advisory() |
---|
1207 | + |
---|
1208 | + def _setup_storage(self, storedir, readonly, reserved_space): |
---|
1209 | + precondition(isinstance(storedir, FilePath), storedir, FilePath) |
---|
1210 | + self.storedir = storedir |
---|
1211 | + self.readonly = readonly |
---|
1212 | + self.reserved_space = int(reserved_space) |
---|
1213 | + self.sharedir = self.storedir.child("shares") |
---|
1214 | + fileutil.fp_make_dirs(self.sharedir) |
---|
1215 | + self.incomingdir = self.sharedir.child('incoming') |
---|
1216 | + self._clean_incomplete() |
---|
1217 | + if self.reserved_space and (self.get_available_space() is None): |
---|
1218 | + log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", |
---|
1219 | + umid="0wZ27w", level=log.UNUSUAL) |
---|
1220 | + |
---|
1221 | + def _clean_incomplete(self): |
---|
1222 | + fileutil.fp_remove(self.incomingdir) |
---|
1223 | + fileutil.fp_make_dirs(self.incomingdir) |
---|
1224 | + |
---|
1225 | + def _setup_corruption_advisory(self): |
---|
1226 | + # we don't actually create the corruption-advisory dir until necessary |
---|
1227 | + self.corruption_advisory_dir = self.storedir.child("corruption-advisories") |
---|
1228 | + |
---|
1229 | + def _make_shareset(self, sharehomedir): |
---|
1230 | + return self.get_shareset(si_a2b(sharehomedir.basename())) |
---|
1231 | + |
---|
1232 | + def get_sharesets_for_prefix(self, prefix): |
---|
1233 | + prefixfp = self.sharedir.child(prefix) |
---|
1234 | + try: |
---|
1235 | + sharesets = map(self._make_shareset, prefixfp.children()) |
---|
1236 | + def _by_base32si(b): |
---|
1237 | + return b.get_storage_index_string() |
---|
1238 | + sharesets.sort(key=_by_base32si) |
---|
1239 | + except EnvironmentError: |
---|
1240 | + sharesets = [] |
---|
1241 | + return sharesets |
---|
1242 | + |
---|
1243 | + def get_shareset(self, storageindex): |
---|
1244 | + sharehomedir = si_si2dir(self.sharedir, storageindex) |
---|
1245 | + incominghomedir = si_si2dir(self.incomingdir, storageindex) |
---|
1246 | + return DiskShareSet(storageindex, sharehomedir, incominghomedir) |
---|
1247 | + |
---|
1248 | + def fill_in_space_stats(self, stats): |
---|
1249 | + try: |
---|
1250 | + disk = fileutil.get_disk_stats(self.sharedir, self.reserved_space) |
---|
1251 | + writeable = disk['avail'] > 0 |
---|
1252 | + |
---|
1253 | + # spacetime predictors should use disk_avail / (d(disk_used)/dt) |
---|
1254 | + stats['storage_server.disk_total'] = disk['total'] |
---|
1255 | + stats['storage_server.disk_used'] = disk['used'] |
---|
1256 | + stats['storage_server.disk_free_for_root'] = disk['free_for_root'] |
---|
1257 | + stats['storage_server.disk_free_for_nonroot'] = disk['free_for_nonroot'] |
---|
1258 | + stats['storage_server.disk_avail'] = disk['avail'] |
---|
1259 | + except AttributeError: |
---|
1260 | + writeable = True |
---|
1261 | + except EnvironmentError: |
---|
1262 | + log.msg("OS call to get disk statistics failed", level=log.UNUSUAL) |
---|
1263 | + writeable = False |
---|
1264 | + |
---|
1265 | + if self.readonly_storage: |
---|
1266 | + stats['storage_server.disk_avail'] = 0 |
---|
1267 | + writeable = False |
---|
1268 | + |
---|
1269 | + stats['storage_server.accepting_immutable_shares'] = int(writeable) |
---|
1270 | + |
---|
1271 | + def get_available_space(self): |
---|
1272 | + if self.readonly: |
---|
1273 | + return 0 |
---|
1274 | + return fileutil.get_available_space(self.sharedir, self.reserved_space) |
---|
1275 | + |
---|
1276 | + #def set_storage_server(self, ss): |
---|
1277 | + # self.ss = ss |
---|
1278 | + |
---|
1279 | + def advise_corrupt_share(self, sharetype, storageindex, shnum, reason): |
---|
1280 | + fileutil.fp_make_dirs(self.corruption_advisory_dir) |
---|
1281 | + now = time_format.iso_utc(sep="T") |
---|
1282 | + si_s = si_b2a(storageindex) |
---|
1283 | + |
---|
1284 | + # Windows can't handle colons in the filename. |
---|
1285 | + name = ("%s--%s-%d" % (now, si_s, shnum)).replace(":", "") |
---|
1286 | + f = self.corruption_advisory_dir.child(name).open("w") |
---|
1287 | + try: |
---|
1288 | + f.write("report: Share Corruption\n") |
---|
1289 | + f.write("type: %s\n" % sharetype) |
---|
1290 | + f.write("storage_index: %s\n" % si_s) |
---|
1291 | + f.write("share_number: %d\n" % shnum) |
---|
1292 | + f.write("\n") |
---|
1293 | + f.write(reason) |
---|
1294 | + f.write("\n") |
---|
1295 | + finally: |
---|
1296 | + f.close() |
---|
1297 | + |
---|
1298 | + log.msg(format=("client claims corruption in (%(share_type)s) " + |
---|
1299 | + "%(si)s-%(shnum)d: %(reason)s"), |
---|
1300 | + share_type=sharetype, si=si_s, shnum=shnum, reason=reason, |
---|
1301 | + level=log.SCARY, umid="SGx2fA") |
---|
1302 | + |
---|
1303 | + |
---|
1304 | +class DiskShareSet(ShareSet): |
---|
1305 | + implements(IShareSet) |
---|
1306 | + |
---|
1307 | + def __init__(self, storageindex, sharehomedir, incominghomedir=None): |
---|
1308 | + ShareSet.__init__(storageindex) |
---|
1309 | + self._sharehomedir = sharehomedir |
---|
1310 | + self._incominghomedir = incominghomedir |
---|
1311 | + |
---|
1312 | + def get_overhead(self): |
---|
1313 | + return (fileutil.get_disk_usage(self._sharehomedir) + |
---|
1314 | + fileutil.get_disk_usage(self._incominghomedir)) |
---|
1315 | + |
---|
1316 | + def get_shares(self): |
---|
1317 | + """ |
---|
1318 | + Generate IStorageBackendShare objects for shares we have for this storage index. |
---|
1319 | + ("Shares we have" means completed ones, excluding incoming ones.) |
---|
1320 | + """ |
---|
1321 | + try: |
---|
1322 | + for fp in self._sharehomedir.children(): |
---|
1323 | + shnumstr = fp.basename() |
---|
1324 | + if not NUM_RE.match(shnumstr): |
---|
1325 | + continue |
---|
1326 | + sharehome = self._sharehomedir.child(shnumstr) |
---|
1327 | + yield self.get_share(sharehome) |
---|
1328 | + except UnlistableError: |
---|
1329 | + # There is no shares directory at all. |
---|
1330 | + pass |
---|
1331 | + |
---|
1332 | + def get_incoming_shnums(self): |
---|
1333 | + """ |
---|
1334 | + Return a frozenset of the shnum (as ints) of incoming shares. |
---|
1335 | + """ |
---|
1336 | + if self._incominghomedir is None: |
---|
1337 | + return frozenset() |
---|
1338 | + try: |
---|
1339 | + childfps = [ fp for fp in self._incominghomedir.children() if NUM_RE.match(fp.basename()) ] |
---|
1340 | + shnums = [ int(fp.basename()) for fp in childfps] |
---|
1341 | + return frozenset(shnums) |
---|
1342 | + except UnlistableError: |
---|
1343 | + # There is no incoming directory at all. |
---|
1344 | + return frozenset() |
---|
1345 | + |
---|
1346 | + def make_bucket_writer(self, storageserver, shnum, max_space_per_bucket, lease_info, canary): |
---|
1347 | + sharehome = self._sharehomedir.child(str(shnum)) |
---|
1348 | + incominghome = self._incominghomedir.child(str(shnum)) |
---|
1349 | + immsh = ImmutableDiskShare(self.get_storage_index(), shnum, sharehome, incominghome, |
---|
1350 | + max_size=max_space_per_bucket, create=True) |
---|
1351 | + bw = BucketWriter(storageserver, immsh, max_space_per_bucket, lease_info, canary) |
---|
1352 | + return bw |
---|
1353 | + |
---|
1354 | + def _create_mutable_share(self, storageserver, shnum, write_enabler): |
---|
1355 | + fileutil.fp_make_dirs(self.sharehomedir) |
---|
1356 | + sharehome = self.sharehomedir.child(str(shnum)) |
---|
1357 | + nodeid = storageserver.get_nodeid() |
---|
1358 | + return create_mutable_disk_share(sharehome, nodeid, write_enabler, storageserver) |
---|
1359 | + |
---|
1360 | + def _clean_up_after_unlink(self): |
---|
1361 | + fileutil.fp_rmdir_if_empty(self._sharehomedir) |
---|
1362 | + |
---|
1363 | hunk ./src/allmydata/storage/backends/disk/immutable.py 1 |
---|
1364 | -import os, stat, struct, time |
---|
1365 | |
---|
1366 | hunk ./src/allmydata/storage/backends/disk/immutable.py 2 |
---|
1367 | -from foolscap.api import Referenceable |
---|
1368 | +import struct |
---|
1369 | |
---|
1370 | from zope.interface import implements |
---|
1371 | hunk ./src/allmydata/storage/backends/disk/immutable.py 5 |
---|
1372 | -from allmydata.interfaces import RIBucketWriter, RIBucketReader |
---|
1373 | -from allmydata.util import base32, fileutil, log |
---|
1374 | + |
---|
1375 | +from allmydata.interfaces import IStoredShare |
---|
1376 | +from allmydata.util import fileutil |
---|
1377 | from allmydata.util.assertutil import precondition |
---|
1378 | hunk ./src/allmydata/storage/backends/disk/immutable.py 9 |
---|
1379 | +from allmydata.util.fileutil import fp_make_dirs |
---|
1380 | from allmydata.util.hashutil import constant_time_compare |
---|
1381 | hunk ./src/allmydata/storage/backends/disk/immutable.py 11 |
---|
1382 | +from allmydata.util.encodingutil import quote_filepath |
---|
1383 | +from allmydata.storage.common import si_b2a, UnknownImmutableContainerVersionError, DataTooLargeError |
---|
1384 | from allmydata.storage.lease import LeaseInfo |
---|
1385 | hunk ./src/allmydata/storage/backends/disk/immutable.py 14 |
---|
1386 | -from allmydata.storage.common import UnknownImmutableContainerVersionError, \ |
---|
1387 | - DataTooLargeError |
---|
1388 | + |
---|
1389 | |
---|
1390 | # each share file (in storage/shares/$SI/$SHNUM) contains lease information |
---|
1391 | # and share data. The share data is accessed by RIBucketWriter.write and |
---|
1392 | hunk ./src/allmydata/storage/backends/disk/immutable.py 41 |
---|
1393 | # then the value stored in this field will be the actual share data length |
---|
1394 | # modulo 2**32. |
---|
1395 | |
---|
1396 | -class ShareFile: |
---|
1397 | - LEASE_SIZE = struct.calcsize(">L32s32sL") |
---|
1398 | +class ImmutableDiskShare(object): |
---|
1399 | + implements(IStoredShare) |
---|
1400 | + |
---|
1401 | sharetype = "immutable" |
---|
1402 | hunk ./src/allmydata/storage/backends/disk/immutable.py 45 |
---|
1403 | + LEASE_SIZE = struct.calcsize(">L32s32sL") |
---|
1404 | + |
---|
1405 | |
---|
1406 | hunk ./src/allmydata/storage/backends/disk/immutable.py 48 |
---|
1407 | - def __init__(self, filename, max_size=None, create=False): |
---|
1408 | - """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """ |
---|
1409 | + def __init__(self, storageindex, shnum, finalhome=None, incominghome=None, max_size=None, create=False): |
---|
1410 | + """ If max_size is not None then I won't allow more than |
---|
1411 | + max_size to be written to me. If create=True then max_size |
---|
1412 | + must not be None. """ |
---|
1413 | precondition((max_size is not None) or (not create), max_size, create) |
---|
1414 | hunk ./src/allmydata/storage/backends/disk/immutable.py 53 |
---|
1415 | - self.home = filename |
---|
1416 | + self._storageindex = storageindex |
---|
1417 | self._max_size = max_size |
---|
1418 | hunk ./src/allmydata/storage/backends/disk/immutable.py 55 |
---|
1419 | + self._incominghome = incominghome |
---|
1420 | + self._home = finalhome |
---|
1421 | + self._shnum = shnum |
---|
1422 | if create: |
---|
1423 | # touch the file, so later callers will see that we're working on |
---|
1424 | # it. Also construct the metadata. |
---|
1425 | hunk ./src/allmydata/storage/backends/disk/immutable.py 61 |
---|
1426 | - assert not os.path.exists(self.home) |
---|
1427 | - fileutil.make_dirs(os.path.dirname(self.home)) |
---|
1428 | - f = open(self.home, 'wb') |
---|
1429 | + assert not finalhome.exists() |
---|
1430 | + fp_make_dirs(self._incominghome.parent()) |
---|
1431 | # The second field -- the four-byte share data length -- is no |
---|
1432 | # longer used as of Tahoe v1.3.0, but we continue to write it in |
---|
1433 | # there in case someone downgrades a storage server from >= |
---|
1434 | hunk ./src/allmydata/storage/backends/disk/immutable.py 72 |
---|
1435 | # the largest length that can fit into the field. That way, even |
---|
1436 | # if this does happen, the old < v1.3.0 server will still allow |
---|
1437 | # clients to read the first part of the share. |
---|
1438 | - f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0)) |
---|
1439 | - f.close() |
---|
1440 | + self._incominghome.setContent(struct.pack(">LLL", 1, min(2**32-1, max_size), 0) ) |
---|
1441 | self._lease_offset = max_size + 0x0c |
---|
1442 | self._num_leases = 0 |
---|
1443 | else: |
---|
1444 | hunk ./src/allmydata/storage/backends/disk/immutable.py 76 |
---|
1445 | - f = open(self.home, 'rb') |
---|
1446 | - filesize = os.path.getsize(self.home) |
---|
1447 | - (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1448 | - f.close() |
---|
1449 | + f = self._home.open(mode='rb') |
---|
1450 | + try: |
---|
1451 | + (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1452 | + finally: |
---|
1453 | + f.close() |
---|
1454 | + filesize = self._home.getsize() |
---|
1455 | if version != 1: |
---|
1456 | msg = "sharefile %s had version %d but we wanted 1" % \ |
---|
1457 | hunk ./src/allmydata/storage/backends/disk/immutable.py 84 |
---|
1458 | - (filename, version) |
---|
1459 | + (self._home, version) |
---|
1460 | raise UnknownImmutableContainerVersionError(msg) |
---|
1461 | self._num_leases = num_leases |
---|
1462 | self._lease_offset = filesize - (num_leases * self.LEASE_SIZE) |
---|
1463 | hunk ./src/allmydata/storage/backends/disk/immutable.py 90 |
---|
1464 | self._data_offset = 0xc |
---|
1465 | |
---|
1466 | + def __repr__(self): |
---|
1467 | + return ("<ImmutableDiskShare %s:%r at %s>" |
---|
1468 | + % (si_b2a(self._storageindex), self._shnum, quote_filepath(self._home))) |
---|
1469 | + |
---|
1470 | + def close(self): |
---|
1471 | + fileutil.fp_make_dirs(self._home.parent()) |
---|
1472 | + self._incominghome.moveTo(self._home) |
---|
1473 | + try: |
---|
1474 | + # self._incominghome is like storage/shares/incoming/ab/abcde/4 . |
---|
1475 | + # We try to delete the parent (.../ab/abcde) to avoid leaving |
---|
1476 | + # these directories lying around forever, but the delete might |
---|
1477 | + # fail if we're working on another share for the same storage |
---|
1478 | + # index (like ab/abcde/5). The alternative approach would be to |
---|
1479 | + # use a hierarchy of objects (PrefixHolder, BucketHolder, |
---|
1480 | + # ShareWriter), each of which is responsible for a single |
---|
1481 | + # directory on disk, and have them use reference counting of |
---|
1482 | + # their children to know when they should do the rmdir. This |
---|
1483 | + # approach is simpler, but relies on os.rmdir refusing to delete |
---|
1484 | + # a non-empty directory. Do *not* use fileutil.fp_remove() here! |
---|
1485 | + fileutil.fp_rmdir_if_empty(self._incominghome.parent()) |
---|
1486 | + # we also delete the grandparent (prefix) directory, .../ab , |
---|
1487 | + # again to avoid leaving directories lying around. This might |
---|
1488 | + # fail if there is another bucket open that shares a prefix (like |
---|
1489 | + # ab/abfff). |
---|
1490 | + fileutil.fp_rmdir_if_empty(self._incominghome.parent().parent()) |
---|
1491 | + # we leave the great-grandparent (incoming/) directory in place. |
---|
1492 | + except EnvironmentError: |
---|
1493 | + # ignore the "can't rmdir because the directory is not empty" |
---|
1494 | + # exceptions, those are normal consequences of the |
---|
1495 | + # above-mentioned conditions. |
---|
1496 | + pass |
---|
1497 | + pass |
---|
1498 | + |
---|
1499 | + def get_used_space(self): |
---|
1500 | + return (fileutil.get_used_space(self._home) + |
---|
1501 | + fileutil.get_used_space(self._incominghome)) |
---|
1502 | + |
---|
1503 | + def get_storage_index(self): |
---|
1504 | + return self._storageindex |
---|
1505 | + |
---|
1506 | + def get_shnum(self): |
---|
1507 | + return self._shnum |
---|
1508 | + |
---|
1509 | def unlink(self): |
---|
1510 | hunk ./src/allmydata/storage/backends/disk/immutable.py 134 |
---|
1511 | - os.unlink(self.home) |
---|
1512 | + self._home.remove() |
---|
1513 | + |
---|
1514 | + def get_size(self): |
---|
1515 | + return self._home.getsize() |
---|
1516 | + |
---|
1517 | + def get_data_length(self): |
---|
1518 | + return self._lease_offset - self._data_offset |
---|
1519 | + |
---|
1520 | + #def readv(self, read_vector): |
---|
1521 | + # ... |
---|
1522 | |
---|
1523 | def read_share_data(self, offset, length): |
---|
1524 | precondition(offset >= 0) |
---|
1525 | hunk ./src/allmydata/storage/backends/disk/immutable.py 147 |
---|
1526 | - # reads beyond the end of the data are truncated. Reads that start |
---|
1527 | + |
---|
1528 | + # Reads beyond the end of the data are truncated. Reads that start |
---|
1529 | # beyond the end of the data return an empty string. |
---|
1530 | seekpos = self._data_offset+offset |
---|
1531 | actuallength = max(0, min(length, self._lease_offset-seekpos)) |
---|
1532 | hunk ./src/allmydata/storage/backends/disk/immutable.py 154 |
---|
1533 | if actuallength == 0: |
---|
1534 | return "" |
---|
1535 | - f = open(self.home, 'rb') |
---|
1536 | - f.seek(seekpos) |
---|
1537 | - return f.read(actuallength) |
---|
1538 | + f = self._home.open(mode='rb') |
---|
1539 | + try: |
---|
1540 | + f.seek(seekpos) |
---|
1541 | + sharedata = f.read(actuallength) |
---|
1542 | + finally: |
---|
1543 | + f.close() |
---|
1544 | + return sharedata |
---|
1545 | |
---|
1546 | def write_share_data(self, offset, data): |
---|
1547 | length = len(data) |
---|
1548 | hunk ./src/allmydata/storage/backends/disk/immutable.py 167 |
---|
1549 | precondition(offset >= 0, offset) |
---|
1550 | if self._max_size is not None and offset+length > self._max_size: |
---|
1551 | raise DataTooLargeError(self._max_size, offset, length) |
---|
1552 | - f = open(self.home, 'rb+') |
---|
1553 | - real_offset = self._data_offset+offset |
---|
1554 | - f.seek(real_offset) |
---|
1555 | - assert f.tell() == real_offset |
---|
1556 | - f.write(data) |
---|
1557 | - f.close() |
---|
1558 | + f = self._incominghome.open(mode='rb+') |
---|
1559 | + try: |
---|
1560 | + real_offset = self._data_offset+offset |
---|
1561 | + f.seek(real_offset) |
---|
1562 | + assert f.tell() == real_offset |
---|
1563 | + f.write(data) |
---|
1564 | + finally: |
---|
1565 | + f.close() |
---|
1566 | |
---|
1567 | def _write_lease_record(self, f, lease_number, lease_info): |
---|
1568 | offset = self._lease_offset + lease_number * self.LEASE_SIZE |
---|
1569 | hunk ./src/allmydata/storage/backends/disk/immutable.py 184 |
---|
1570 | |
---|
1571 | def _read_num_leases(self, f): |
---|
1572 | f.seek(0x08) |
---|
1573 | - (num_leases,) = struct.unpack(">L", f.read(4)) |
---|
1574 | + ro = f.read(4) |
---|
1575 | + (num_leases,) = struct.unpack(">L", ro) |
---|
1576 | return num_leases |
---|
1577 | |
---|
1578 | def _write_num_leases(self, f, num_leases): |
---|
1579 | hunk ./src/allmydata/storage/backends/disk/immutable.py 195 |
---|
1580 | def _truncate_leases(self, f, num_leases): |
---|
1581 | f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE) |
---|
1582 | |
---|
1583 | + # These lease operations are intended for use by disk_backend.py. |
---|
1584 | + # Other clients should not depend on the fact that the disk backend |
---|
1585 | + # stores leases in share files. |
---|
1586 | + |
---|
1587 | def get_leases(self): |
---|
1588 | """Yields a LeaseInfo instance for all leases.""" |
---|
1589 | hunk ./src/allmydata/storage/backends/disk/immutable.py 201 |
---|
1590 | - f = open(self.home, 'rb') |
---|
1591 | - (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1592 | - f.seek(self._lease_offset) |
---|
1593 | - for i in range(num_leases): |
---|
1594 | - data = f.read(self.LEASE_SIZE) |
---|
1595 | - if data: |
---|
1596 | - yield LeaseInfo().from_immutable_data(data) |
---|
1597 | + f = self._home.open(mode='rb') |
---|
1598 | + try: |
---|
1599 | + (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1600 | + f.seek(self._lease_offset) |
---|
1601 | + for i in range(num_leases): |
---|
1602 | + data = f.read(self.LEASE_SIZE) |
---|
1603 | + if data: |
---|
1604 | + yield LeaseInfo().from_immutable_data(data) |
---|
1605 | + finally: |
---|
1606 | + f.close() |
---|
1607 | |
---|
1608 | def add_lease(self, lease_info): |
---|
1609 | hunk ./src/allmydata/storage/backends/disk/immutable.py 213 |
---|
1610 | - f = open(self.home, 'rb+') |
---|
1611 | - num_leases = self._read_num_leases(f) |
---|
1612 | - self._write_lease_record(f, num_leases, lease_info) |
---|
1613 | - self._write_num_leases(f, num_leases+1) |
---|
1614 | - f.close() |
---|
1615 | + num_leases = self._read_num_leases(self._incominghome) |
---|
1616 | + f = self._home.open(mode='wb+') |
---|
1617 | + try: |
---|
1618 | + self._write_lease_record(f, num_leases, lease_info) |
---|
1619 | + self._write_num_leases(f, num_leases+1) |
---|
1620 | + finally: |
---|
1621 | + f.close() |
---|
1622 | |
---|
1623 | def renew_lease(self, renew_secret, new_expire_time): |
---|
1624 | hunk ./src/allmydata/storage/backends/disk/immutable.py 222 |
---|
1625 | - for i,lease in enumerate(self.get_leases()): |
---|
1626 | + for i, lease in enumerate(self.get_leases()): |
---|
1627 | if constant_time_compare(lease.renew_secret, renew_secret): |
---|
1628 | # yup. See if we need to update the owner time. |
---|
1629 | if new_expire_time > lease.expiration_time: |
---|
1630 | hunk ./src/allmydata/storage/backends/disk/immutable.py 228 |
---|
1631 | # yes |
---|
1632 | lease.expiration_time = new_expire_time |
---|
1633 | - f = open(self.home, 'rb+') |
---|
1634 | - self._write_lease_record(f, i, lease) |
---|
1635 | - f.close() |
---|
1636 | + f = self._home.open('rb+') |
---|
1637 | + try: |
---|
1638 | + self._write_lease_record(f, i, lease) |
---|
1639 | + finally: |
---|
1640 | + f.close() |
---|
1641 | return |
---|
1642 | raise IndexError("unable to renew non-existent lease") |
---|
1643 | |
---|
1644 | hunk ./src/allmydata/storage/backends/disk/immutable.py 242 |
---|
1645 | lease_info.expiration_time) |
---|
1646 | except IndexError: |
---|
1647 | self.add_lease(lease_info) |
---|
1648 | - |
---|
1649 | - |
---|
1650 | - def cancel_lease(self, cancel_secret): |
---|
1651 | - """Remove a lease with the given cancel_secret. If the last lease is |
---|
1652 | - cancelled, the file will be removed. Return the number of bytes that |
---|
1653 | - were freed (by truncating the list of leases, and possibly by |
---|
1654 | - deleting the file. Raise IndexError if there was no lease with the |
---|
1655 | - given cancel_secret. |
---|
1656 | - """ |
---|
1657 | - |
---|
1658 | - leases = list(self.get_leases()) |
---|
1659 | - num_leases_removed = 0 |
---|
1660 | - for i,lease in enumerate(leases): |
---|
1661 | - if constant_time_compare(lease.cancel_secret, cancel_secret): |
---|
1662 | - leases[i] = None |
---|
1663 | - num_leases_removed += 1 |
---|
1664 | - if not num_leases_removed: |
---|
1665 | - raise IndexError("unable to find matching lease to cancel") |
---|
1666 | - if num_leases_removed: |
---|
1667 | - # pack and write out the remaining leases. We write these out in |
---|
1668 | - # the same order as they were added, so that if we crash while |
---|
1669 | - # doing this, we won't lose any non-cancelled leases. |
---|
1670 | - leases = [l for l in leases if l] # remove the cancelled leases |
---|
1671 | - f = open(self.home, 'rb+') |
---|
1672 | - for i,lease in enumerate(leases): |
---|
1673 | - self._write_lease_record(f, i, lease) |
---|
1674 | - self._write_num_leases(f, len(leases)) |
---|
1675 | - self._truncate_leases(f, len(leases)) |
---|
1676 | - f.close() |
---|
1677 | - space_freed = self.LEASE_SIZE * num_leases_removed |
---|
1678 | - if not len(leases): |
---|
1679 | - space_freed += os.stat(self.home)[stat.ST_SIZE] |
---|
1680 | - self.unlink() |
---|
1681 | - return space_freed |
---|
1682 | - |
---|
1683 | - |
---|
1684 | -class BucketWriter(Referenceable): |
---|
1685 | - implements(RIBucketWriter) |
---|
1686 | - |
---|
1687 | - def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary): |
---|
1688 | - self.ss = ss |
---|
1689 | - self.incominghome = incominghome |
---|
1690 | - self.finalhome = finalhome |
---|
1691 | - self._max_size = max_size # don't allow the client to write more than this |
---|
1692 | - self._canary = canary |
---|
1693 | - self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected) |
---|
1694 | - self.closed = False |
---|
1695 | - self.throw_out_all_data = False |
---|
1696 | - self._sharefile = ShareFile(incominghome, create=True, max_size=max_size) |
---|
1697 | - # also, add our lease to the file now, so that other ones can be |
---|
1698 | - # added by simultaneous uploaders |
---|
1699 | - self._sharefile.add_lease(lease_info) |
---|
1700 | - |
---|
1701 | - def allocated_size(self): |
---|
1702 | - return self._max_size |
---|
1703 | - |
---|
1704 | - def remote_write(self, offset, data): |
---|
1705 | - start = time.time() |
---|
1706 | - precondition(not self.closed) |
---|
1707 | - if self.throw_out_all_data: |
---|
1708 | - return |
---|
1709 | - self._sharefile.write_share_data(offset, data) |
---|
1710 | - self.ss.add_latency("write", time.time() - start) |
---|
1711 | - self.ss.count("write") |
---|
1712 | - |
---|
1713 | - def remote_close(self): |
---|
1714 | - precondition(not self.closed) |
---|
1715 | - start = time.time() |
---|
1716 | - |
---|
1717 | - fileutil.make_dirs(os.path.dirname(self.finalhome)) |
---|
1718 | - fileutil.rename(self.incominghome, self.finalhome) |
---|
1719 | - try: |
---|
1720 | - # self.incominghome is like storage/shares/incoming/ab/abcde/4 . |
---|
1721 | - # We try to delete the parent (.../ab/abcde) to avoid leaving |
---|
1722 | - # these directories lying around forever, but the delete might |
---|
1723 | - # fail if we're working on another share for the same storage |
---|
1724 | - # index (like ab/abcde/5). The alternative approach would be to |
---|
1725 | - # use a hierarchy of objects (PrefixHolder, BucketHolder, |
---|
1726 | - # ShareWriter), each of which is responsible for a single |
---|
1727 | - # directory on disk, and have them use reference counting of |
---|
1728 | - # their children to know when they should do the rmdir. This |
---|
1729 | - # approach is simpler, but relies on os.rmdir refusing to delete |
---|
1730 | - # a non-empty directory. Do *not* use fileutil.rm_dir() here! |
---|
1731 | - os.rmdir(os.path.dirname(self.incominghome)) |
---|
1732 | - # we also delete the grandparent (prefix) directory, .../ab , |
---|
1733 | - # again to avoid leaving directories lying around. This might |
---|
1734 | - # fail if there is another bucket open that shares a prefix (like |
---|
1735 | - # ab/abfff). |
---|
1736 | - os.rmdir(os.path.dirname(os.path.dirname(self.incominghome))) |
---|
1737 | - # we leave the great-grandparent (incoming/) directory in place. |
---|
1738 | - except EnvironmentError: |
---|
1739 | - # ignore the "can't rmdir because the directory is not empty" |
---|
1740 | - # exceptions, those are normal consequences of the |
---|
1741 | - # above-mentioned conditions. |
---|
1742 | - pass |
---|
1743 | - self._sharefile = None |
---|
1744 | - self.closed = True |
---|
1745 | - self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
1746 | - |
---|
1747 | - filelen = os.stat(self.finalhome)[stat.ST_SIZE] |
---|
1748 | - self.ss.bucket_writer_closed(self, filelen) |
---|
1749 | - self.ss.add_latency("close", time.time() - start) |
---|
1750 | - self.ss.count("close") |
---|
1751 | - |
---|
1752 | - def _disconnected(self): |
---|
1753 | - if not self.closed: |
---|
1754 | - self._abort() |
---|
1755 | - |
---|
1756 | - def remote_abort(self): |
---|
1757 | - log.msg("storage: aborting sharefile %s" % self.incominghome, |
---|
1758 | - facility="tahoe.storage", level=log.UNUSUAL) |
---|
1759 | - if not self.closed: |
---|
1760 | - self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
1761 | - self._abort() |
---|
1762 | - self.ss.count("abort") |
---|
1763 | - |
---|
1764 | - def _abort(self): |
---|
1765 | - if self.closed: |
---|
1766 | - return |
---|
1767 | - |
---|
1768 | - os.remove(self.incominghome) |
---|
1769 | - # if we were the last share to be moved, remove the incoming/ |
---|
1770 | - # directory that was our parent |
---|
1771 | - parentdir = os.path.split(self.incominghome)[0] |
---|
1772 | - if not os.listdir(parentdir): |
---|
1773 | - os.rmdir(parentdir) |
---|
1774 | - self._sharefile = None |
---|
1775 | - |
---|
1776 | - # We are now considered closed for further writing. We must tell |
---|
1777 | - # the storage server about this so that it stops expecting us to |
---|
1778 | - # use the space it allocated for us earlier. |
---|
1779 | - self.closed = True |
---|
1780 | - self.ss.bucket_writer_closed(self, 0) |
---|
1781 | - |
---|
1782 | - |
---|
1783 | -class BucketReader(Referenceable): |
---|
1784 | - implements(RIBucketReader) |
---|
1785 | - |
---|
1786 | - def __init__(self, ss, sharefname, storage_index=None, shnum=None): |
---|
1787 | - self.ss = ss |
---|
1788 | - self._share_file = ShareFile(sharefname) |
---|
1789 | - self.storage_index = storage_index |
---|
1790 | - self.shnum = shnum |
---|
1791 | - |
---|
1792 | - def __repr__(self): |
---|
1793 | - return "<%s %s %s>" % (self.__class__.__name__, |
---|
1794 | - base32.b2a_l(self.storage_index[:8], 60), |
---|
1795 | - self.shnum) |
---|
1796 | - |
---|
1797 | - def remote_read(self, offset, length): |
---|
1798 | - start = time.time() |
---|
1799 | - data = self._share_file.read_share_data(offset, length) |
---|
1800 | - self.ss.add_latency("read", time.time() - start) |
---|
1801 | - self.ss.count("read") |
---|
1802 | - return data |
---|
1803 | - |
---|
1804 | - def remote_advise_corrupt_share(self, reason): |
---|
1805 | - return self.ss.remote_advise_corrupt_share("immutable", |
---|
1806 | - self.storage_index, |
---|
1807 | - self.shnum, |
---|
1808 | - reason) |
---|
1809 | hunk ./src/allmydata/storage/backends/disk/mutable.py 1 |
---|
1810 | -import os, stat, struct |
---|
1811 | |
---|
1812 | hunk ./src/allmydata/storage/backends/disk/mutable.py 2 |
---|
1813 | -from allmydata.interfaces import BadWriteEnablerError |
---|
1814 | -from allmydata.util import idlib, log |
---|
1815 | +import struct |
---|
1816 | + |
---|
1817 | +from zope.interface import implements |
---|
1818 | + |
---|
1819 | +from allmydata.interfaces import IStoredMutableShare, BadWriteEnablerError |
---|
1820 | +from allmydata.util import fileutil, idlib, log |
---|
1821 | from allmydata.util.assertutil import precondition |
---|
1822 | from allmydata.util.hashutil import constant_time_compare |
---|
1823 | hunk ./src/allmydata/storage/backends/disk/mutable.py 10 |
---|
1824 | -from allmydata.storage.lease import LeaseInfo |
---|
1825 | -from allmydata.storage.common import UnknownMutableContainerVersionError, \ |
---|
1826 | +from allmydata.util.encodingutil import quote_filepath |
---|
1827 | +from allmydata.storage.common import si_b2a, UnknownMutableContainerVersionError, \ |
---|
1828 | DataTooLargeError |
---|
1829 | hunk ./src/allmydata/storage/backends/disk/mutable.py 13 |
---|
1830 | +from allmydata.storage.lease import LeaseInfo |
---|
1831 | +from allmydata.storage.backends.base import testv_compare |
---|
1832 | |
---|
1833 | hunk ./src/allmydata/storage/backends/disk/mutable.py 16 |
---|
1834 | -# the MutableShareFile is like the ShareFile, but used for mutable data. It |
---|
1835 | -# has a different layout. See docs/mutable.txt for more details. |
---|
1836 | + |
---|
1837 | +# The MutableDiskShare is like the ImmutableDiskShare, but used for mutable data. |
---|
1838 | +# It has a different layout. See docs/mutable.rst for more details. |
---|
1839 | |
---|
1840 | # # offset size name |
---|
1841 | # 1 0 32 magic verstr "tahoe mutable container v1" plus binary |
---|
1842 | hunk ./src/allmydata/storage/backends/disk/mutable.py 31 |
---|
1843 | # 4 4 expiration timestamp |
---|
1844 | # 8 32 renewal token |
---|
1845 | # 40 32 cancel token |
---|
1846 | -# 72 20 nodeid which accepted the tokens |
---|
1847 | +# 72 20 nodeid that accepted the tokens |
---|
1848 | # 7 468 (a) data |
---|
1849 | # 8 ?? 4 count of extra leases |
---|
1850 | # 9 ?? n*92 extra leases |
---|
1851 | hunk ./src/allmydata/storage/backends/disk/mutable.py 37 |
---|
1852 | |
---|
1853 | |
---|
1854 | -# The struct module doc says that L's are 4 bytes in size., and that Q's are |
---|
1855 | +# The struct module doc says that L's are 4 bytes in size, and that Q's are |
---|
1856 | # 8 bytes in size. Since compatibility depends upon this, double-check it. |
---|
1857 | assert struct.calcsize(">L") == 4, struct.calcsize(">L") |
---|
1858 | assert struct.calcsize(">Q") == 8, struct.calcsize(">Q") |
---|
1859 | hunk ./src/allmydata/storage/backends/disk/mutable.py 42 |
---|
1860 | |
---|
1861 | -class MutableShareFile: |
---|
1862 | + |
---|
1863 | +class MutableDiskShare(object): |
---|
1864 | + implements(IStoredMutableShare) |
---|
1865 | |
---|
1866 | sharetype = "mutable" |
---|
1867 | DATA_LENGTH_OFFSET = struct.calcsize(">32s20s32s") |
---|
1868 | hunk ./src/allmydata/storage/backends/disk/mutable.py 54 |
---|
1869 | assert LEASE_SIZE == 92 |
---|
1870 | DATA_OFFSET = HEADER_SIZE + 4*LEASE_SIZE |
---|
1871 | assert DATA_OFFSET == 468, DATA_OFFSET |
---|
1872 | + |
---|
1873 | # our sharefiles share with a recognizable string, plus some random |
---|
1874 | # binary data to reduce the chance that a regular text file will look |
---|
1875 | # like a sharefile. |
---|
1876 | hunk ./src/allmydata/storage/backends/disk/mutable.py 63 |
---|
1877 | MAX_SIZE = 2*1000*1000*1000 # 2GB, kind of arbitrary |
---|
1878 | # TODO: decide upon a policy for max share size |
---|
1879 | |
---|
1880 | - def __init__(self, filename, parent=None): |
---|
1881 | - self.home = filename |
---|
1882 | - if os.path.exists(self.home): |
---|
1883 | + def __init__(self, storageindex, shnum, home, parent=None): |
---|
1884 | + self._storageindex = storageindex |
---|
1885 | + self._shnum = shnum |
---|
1886 | + self._home = home |
---|
1887 | + if self._home.exists(): |
---|
1888 | # we don't cache anything, just check the magic |
---|
1889 | hunk ./src/allmydata/storage/backends/disk/mutable.py 69 |
---|
1890 | - f = open(self.home, 'rb') |
---|
1891 | - data = f.read(self.HEADER_SIZE) |
---|
1892 | - (magic, |
---|
1893 | - write_enabler_nodeid, write_enabler, |
---|
1894 | - data_length, extra_least_offset) = \ |
---|
1895 | - struct.unpack(">32s20s32sQQ", data) |
---|
1896 | - if magic != self.MAGIC: |
---|
1897 | - msg = "sharefile %s had magic '%r' but we wanted '%r'" % \ |
---|
1898 | - (filename, magic, self.MAGIC) |
---|
1899 | - raise UnknownMutableContainerVersionError(msg) |
---|
1900 | + f = self._home.open('rb') |
---|
1901 | + try: |
---|
1902 | + data = f.read(self.HEADER_SIZE) |
---|
1903 | + (magic, |
---|
1904 | + write_enabler_nodeid, write_enabler, |
---|
1905 | + data_length, extra_least_offset) = \ |
---|
1906 | + struct.unpack(">32s20s32sQQ", data) |
---|
1907 | + if magic != self.MAGIC: |
---|
1908 | + msg = "sharefile %s had magic '%r' but we wanted '%r'" % \ |
---|
1909 | + (quote_filepath(self._home), magic, self.MAGIC) |
---|
1910 | + raise UnknownMutableContainerVersionError(msg) |
---|
1911 | + finally: |
---|
1912 | + f.close() |
---|
1913 | self.parent = parent # for logging |
---|
1914 | |
---|
1915 | def log(self, *args, **kwargs): |
---|
1916 | hunk ./src/allmydata/storage/backends/disk/mutable.py 88 |
---|
1917 | return self.parent.log(*args, **kwargs) |
---|
1918 | |
---|
1919 | def create(self, my_nodeid, write_enabler): |
---|
1920 | - assert not os.path.exists(self.home) |
---|
1921 | + assert not self._home.exists() |
---|
1922 | data_length = 0 |
---|
1923 | extra_lease_offset = (self.HEADER_SIZE |
---|
1924 | + 4 * self.LEASE_SIZE |
---|
1925 | hunk ./src/allmydata/storage/backends/disk/mutable.py 95 |
---|
1926 | + data_length) |
---|
1927 | assert extra_lease_offset == self.DATA_OFFSET # true at creation |
---|
1928 | num_extra_leases = 0 |
---|
1929 | - f = open(self.home, 'wb') |
---|
1930 | - header = struct.pack(">32s20s32sQQ", |
---|
1931 | - self.MAGIC, my_nodeid, write_enabler, |
---|
1932 | - data_length, extra_lease_offset, |
---|
1933 | - ) |
---|
1934 | - leases = ("\x00"*self.LEASE_SIZE) * 4 |
---|
1935 | - f.write(header + leases) |
---|
1936 | - # data goes here, empty after creation |
---|
1937 | - f.write(struct.pack(">L", num_extra_leases)) |
---|
1938 | - # extra leases go here, none at creation |
---|
1939 | - f.close() |
---|
1940 | + f = self._home.open('wb') |
---|
1941 | + try: |
---|
1942 | + header = struct.pack(">32s20s32sQQ", |
---|
1943 | + self.MAGIC, my_nodeid, write_enabler, |
---|
1944 | + data_length, extra_lease_offset, |
---|
1945 | + ) |
---|
1946 | + leases = ("\x00"*self.LEASE_SIZE) * 4 |
---|
1947 | + f.write(header + leases) |
---|
1948 | + # data goes here, empty after creation |
---|
1949 | + f.write(struct.pack(">L", num_extra_leases)) |
---|
1950 | + # extra leases go here, none at creation |
---|
1951 | + finally: |
---|
1952 | + f.close() |
---|
1953 | + |
---|
1954 | + def __repr__(self): |
---|
1955 | + return ("<MutableDiskShare %s:%r at %s>" |
---|
1956 | + % (si_b2a(self._storageindex), self._shnum, quote_filepath(self._home))) |
---|
1957 | + |
---|
1958 | + def get_used_space(self): |
---|
1959 | + return fileutil.get_used_space(self._home) |
---|
1960 | + |
---|
1961 | + def get_storage_index(self): |
---|
1962 | + return self._storageindex |
---|
1963 | + |
---|
1964 | + def get_shnum(self): |
---|
1965 | + return self._shnum |
---|
1966 | |
---|
1967 | def unlink(self): |
---|
1968 | hunk ./src/allmydata/storage/backends/disk/mutable.py 123 |
---|
1969 | - os.unlink(self.home) |
---|
1970 | + self._home.remove() |
---|
1971 | |
---|
1972 | def _read_data_length(self, f): |
---|
1973 | f.seek(self.DATA_LENGTH_OFFSET) |
---|
1974 | hunk ./src/allmydata/storage/backends/disk/mutable.py 291 |
---|
1975 | |
---|
1976 | def get_leases(self): |
---|
1977 | """Yields a LeaseInfo instance for all leases.""" |
---|
1978 | - f = open(self.home, 'rb') |
---|
1979 | - for i, lease in self._enumerate_leases(f): |
---|
1980 | - yield lease |
---|
1981 | - f.close() |
---|
1982 | + f = self._home.open('rb') |
---|
1983 | + try: |
---|
1984 | + for i, lease in self._enumerate_leases(f): |
---|
1985 | + yield lease |
---|
1986 | + finally: |
---|
1987 | + f.close() |
---|
1988 | |
---|
1989 | def _enumerate_leases(self, f): |
---|
1990 | for i in range(self._get_num_lease_slots(f)): |
---|
1991 | hunk ./src/allmydata/storage/backends/disk/mutable.py 303 |
---|
1992 | try: |
---|
1993 | data = self._read_lease_record(f, i) |
---|
1994 | if data is not None: |
---|
1995 | - yield i,data |
---|
1996 | + yield i, data |
---|
1997 | except IndexError: |
---|
1998 | return |
---|
1999 | |
---|
2000 | hunk ./src/allmydata/storage/backends/disk/mutable.py 307 |
---|
2001 | + # These lease operations are intended for use by disk_backend.py. |
---|
2002 | + # Other non-test clients should not depend on the fact that the disk |
---|
2003 | + # backend stores leases in share files. |
---|
2004 | + |
---|
2005 | def add_lease(self, lease_info): |
---|
2006 | precondition(lease_info.owner_num != 0) # 0 means "no lease here" |
---|
2007 | hunk ./src/allmydata/storage/backends/disk/mutable.py 313 |
---|
2008 | - f = open(self.home, 'rb+') |
---|
2009 | - num_lease_slots = self._get_num_lease_slots(f) |
---|
2010 | - empty_slot = self._get_first_empty_lease_slot(f) |
---|
2011 | - if empty_slot is not None: |
---|
2012 | - self._write_lease_record(f, empty_slot, lease_info) |
---|
2013 | - else: |
---|
2014 | - self._write_lease_record(f, num_lease_slots, lease_info) |
---|
2015 | - f.close() |
---|
2016 | + f = self._home.open('rb+') |
---|
2017 | + try: |
---|
2018 | + num_lease_slots = self._get_num_lease_slots(f) |
---|
2019 | + empty_slot = self._get_first_empty_lease_slot(f) |
---|
2020 | + if empty_slot is not None: |
---|
2021 | + self._write_lease_record(f, empty_slot, lease_info) |
---|
2022 | + else: |
---|
2023 | + self._write_lease_record(f, num_lease_slots, lease_info) |
---|
2024 | + finally: |
---|
2025 | + f.close() |
---|
2026 | |
---|
2027 | def renew_lease(self, renew_secret, new_expire_time): |
---|
2028 | accepting_nodeids = set() |
---|
2029 | hunk ./src/allmydata/storage/backends/disk/mutable.py 326 |
---|
2030 | - f = open(self.home, 'rb+') |
---|
2031 | - for (leasenum,lease) in self._enumerate_leases(f): |
---|
2032 | - if constant_time_compare(lease.renew_secret, renew_secret): |
---|
2033 | - # yup. See if we need to update the owner time. |
---|
2034 | - if new_expire_time > lease.expiration_time: |
---|
2035 | - # yes |
---|
2036 | - lease.expiration_time = new_expire_time |
---|
2037 | - self._write_lease_record(f, leasenum, lease) |
---|
2038 | - f.close() |
---|
2039 | - return |
---|
2040 | - accepting_nodeids.add(lease.nodeid) |
---|
2041 | - f.close() |
---|
2042 | + f = self._home.open('rb+') |
---|
2043 | + try: |
---|
2044 | + for (leasenum, lease) in self._enumerate_leases(f): |
---|
2045 | + if constant_time_compare(lease.renew_secret, renew_secret): |
---|
2046 | + # yup. See if we need to update the owner time. |
---|
2047 | + if new_expire_time > lease.expiration_time: |
---|
2048 | + # yes |
---|
2049 | + lease.expiration_time = new_expire_time |
---|
2050 | + self._write_lease_record(f, leasenum, lease) |
---|
2051 | + return |
---|
2052 | + accepting_nodeids.add(lease.nodeid) |
---|
2053 | + finally: |
---|
2054 | + f.close() |
---|
2055 | # Return the accepting_nodeids set, to give the client a chance to |
---|
2056 | hunk ./src/allmydata/storage/backends/disk/mutable.py 340 |
---|
2057 | - # update the leases on a share which has been migrated from its |
---|
2058 | + # update the leases on a share that has been migrated from its |
---|
2059 | # original server to a new one. |
---|
2060 | msg = ("Unable to renew non-existent lease. I have leases accepted by" |
---|
2061 | " nodeids: ") |
---|
2062 | hunk ./src/allmydata/storage/backends/disk/mutable.py 357 |
---|
2063 | except IndexError: |
---|
2064 | self.add_lease(lease_info) |
---|
2065 | |
---|
2066 | - def cancel_lease(self, cancel_secret): |
---|
2067 | - """Remove any leases with the given cancel_secret. If the last lease |
---|
2068 | - is cancelled, the file will be removed. Return the number of bytes |
---|
2069 | - that were freed (by truncating the list of leases, and possibly by |
---|
2070 | - deleting the file. Raise IndexError if there was no lease with the |
---|
2071 | - given cancel_secret.""" |
---|
2072 | - |
---|
2073 | - accepting_nodeids = set() |
---|
2074 | - modified = 0 |
---|
2075 | - remaining = 0 |
---|
2076 | - blank_lease = LeaseInfo(owner_num=0, |
---|
2077 | - renew_secret="\x00"*32, |
---|
2078 | - cancel_secret="\x00"*32, |
---|
2079 | - expiration_time=0, |
---|
2080 | - nodeid="\x00"*20) |
---|
2081 | - f = open(self.home, 'rb+') |
---|
2082 | - for (leasenum,lease) in self._enumerate_leases(f): |
---|
2083 | - accepting_nodeids.add(lease.nodeid) |
---|
2084 | - if constant_time_compare(lease.cancel_secret, cancel_secret): |
---|
2085 | - self._write_lease_record(f, leasenum, blank_lease) |
---|
2086 | - modified += 1 |
---|
2087 | - else: |
---|
2088 | - remaining += 1 |
---|
2089 | - if modified: |
---|
2090 | - freed_space = self._pack_leases(f) |
---|
2091 | - f.close() |
---|
2092 | - if not remaining: |
---|
2093 | - freed_space += os.stat(self.home)[stat.ST_SIZE] |
---|
2094 | - self.unlink() |
---|
2095 | - return freed_space |
---|
2096 | - |
---|
2097 | - msg = ("Unable to cancel non-existent lease. I have leases " |
---|
2098 | - "accepted by nodeids: ") |
---|
2099 | - msg += ",".join([("'%s'" % idlib.nodeid_b2a(anid)) |
---|
2100 | - for anid in accepting_nodeids]) |
---|
2101 | - msg += " ." |
---|
2102 | - raise IndexError(msg) |
---|
2103 | - |
---|
2104 | - def _pack_leases(self, f): |
---|
2105 | - # TODO: reclaim space from cancelled leases |
---|
2106 | - return 0 |
---|
2107 | - |
---|
2108 | def _read_write_enabler_and_nodeid(self, f): |
---|
2109 | f.seek(0) |
---|
2110 | data = f.read(self.HEADER_SIZE) |
---|
2111 | hunk ./src/allmydata/storage/backends/disk/mutable.py 369 |
---|
2112 | |
---|
2113 | def readv(self, readv): |
---|
2114 | datav = [] |
---|
2115 | - f = open(self.home, 'rb') |
---|
2116 | - for (offset, length) in readv: |
---|
2117 | - datav.append(self._read_share_data(f, offset, length)) |
---|
2118 | - f.close() |
---|
2119 | + f = self._home.open('rb') |
---|
2120 | + try: |
---|
2121 | + for (offset, length) in readv: |
---|
2122 | + datav.append(self._read_share_data(f, offset, length)) |
---|
2123 | + finally: |
---|
2124 | + f.close() |
---|
2125 | return datav |
---|
2126 | |
---|
2127 | hunk ./src/allmydata/storage/backends/disk/mutable.py 377 |
---|
2128 | -# def remote_get_length(self): |
---|
2129 | -# f = open(self.home, 'rb') |
---|
2130 | -# data_length = self._read_data_length(f) |
---|
2131 | -# f.close() |
---|
2132 | -# return data_length |
---|
2133 | + def get_size(self): |
---|
2134 | + return self._home.getsize() |
---|
2135 | + |
---|
2136 | + def get_data_length(self): |
---|
2137 | + f = self._home.open('rb') |
---|
2138 | + try: |
---|
2139 | + data_length = self._read_data_length(f) |
---|
2140 | + finally: |
---|
2141 | + f.close() |
---|
2142 | + return data_length |
---|
2143 | |
---|
2144 | def check_write_enabler(self, write_enabler, si_s): |
---|
2145 | hunk ./src/allmydata/storage/backends/disk/mutable.py 389 |
---|
2146 | - f = open(self.home, 'rb+') |
---|
2147 | - (real_write_enabler, write_enabler_nodeid) = \ |
---|
2148 | - self._read_write_enabler_and_nodeid(f) |
---|
2149 | - f.close() |
---|
2150 | + f = self._home.open('rb+') |
---|
2151 | + try: |
---|
2152 | + (real_write_enabler, write_enabler_nodeid) = self._read_write_enabler_and_nodeid(f) |
---|
2153 | + finally: |
---|
2154 | + f.close() |
---|
2155 | # avoid a timing attack |
---|
2156 | #if write_enabler != real_write_enabler: |
---|
2157 | if not constant_time_compare(write_enabler, real_write_enabler): |
---|
2158 | hunk ./src/allmydata/storage/backends/disk/mutable.py 410 |
---|
2159 | |
---|
2160 | def check_testv(self, testv): |
---|
2161 | test_good = True |
---|
2162 | - f = open(self.home, 'rb+') |
---|
2163 | - for (offset, length, operator, specimen) in testv: |
---|
2164 | - data = self._read_share_data(f, offset, length) |
---|
2165 | - if not testv_compare(data, operator, specimen): |
---|
2166 | - test_good = False |
---|
2167 | - break |
---|
2168 | - f.close() |
---|
2169 | + f = self._home.open('rb+') |
---|
2170 | + try: |
---|
2171 | + for (offset, length, operator, specimen) in testv: |
---|
2172 | + data = self._read_share_data(f, offset, length) |
---|
2173 | + if not testv_compare(data, operator, specimen): |
---|
2174 | + test_good = False |
---|
2175 | + break |
---|
2176 | + finally: |
---|
2177 | + f.close() |
---|
2178 | return test_good |
---|
2179 | |
---|
2180 | def writev(self, datav, new_length): |
---|
2181 | hunk ./src/allmydata/storage/backends/disk/mutable.py 422 |
---|
2182 | - f = open(self.home, 'rb+') |
---|
2183 | - for (offset, data) in datav: |
---|
2184 | - self._write_share_data(f, offset, data) |
---|
2185 | - if new_length is not None: |
---|
2186 | - cur_length = self._read_data_length(f) |
---|
2187 | - if new_length < cur_length: |
---|
2188 | - self._write_data_length(f, new_length) |
---|
2189 | - # TODO: if we're going to shrink the share file when the |
---|
2190 | - # share data has shrunk, then call |
---|
2191 | - # self._change_container_size() here. |
---|
2192 | - f.close() |
---|
2193 | - |
---|
2194 | -def testv_compare(a, op, b): |
---|
2195 | - assert op in ("lt", "le", "eq", "ne", "ge", "gt") |
---|
2196 | - if op == "lt": |
---|
2197 | - return a < b |
---|
2198 | - if op == "le": |
---|
2199 | - return a <= b |
---|
2200 | - if op == "eq": |
---|
2201 | - return a == b |
---|
2202 | - if op == "ne": |
---|
2203 | - return a != b |
---|
2204 | - if op == "ge": |
---|
2205 | - return a >= b |
---|
2206 | - if op == "gt": |
---|
2207 | - return a > b |
---|
2208 | - # never reached |
---|
2209 | + f = self._home.open('rb+') |
---|
2210 | + try: |
---|
2211 | + for (offset, data) in datav: |
---|
2212 | + self._write_share_data(f, offset, data) |
---|
2213 | + if new_length is not None: |
---|
2214 | + cur_length = self._read_data_length(f) |
---|
2215 | + if new_length < cur_length: |
---|
2216 | + self._write_data_length(f, new_length) |
---|
2217 | + # TODO: if we're going to shrink the share file when the |
---|
2218 | + # share data has shrunk, then call |
---|
2219 | + # self._change_container_size() here. |
---|
2220 | + finally: |
---|
2221 | + f.close() |
---|
2222 | |
---|
2223 | hunk ./src/allmydata/storage/backends/disk/mutable.py 436 |
---|
2224 | -class EmptyShare: |
---|
2225 | + def close(self): |
---|
2226 | + pass |
---|
2227 | |
---|
2228 | hunk ./src/allmydata/storage/backends/disk/mutable.py 439 |
---|
2229 | - def check_testv(self, testv): |
---|
2230 | - test_good = True |
---|
2231 | - for (offset, length, operator, specimen) in testv: |
---|
2232 | - data = "" |
---|
2233 | - if not testv_compare(data, operator, specimen): |
---|
2234 | - test_good = False |
---|
2235 | - break |
---|
2236 | - return test_good |
---|
2237 | |
---|
2238 | hunk ./src/allmydata/storage/backends/disk/mutable.py 440 |
---|
2239 | -def create_mutable_sharefile(filename, my_nodeid, write_enabler, parent): |
---|
2240 | - ms = MutableShareFile(filename, parent) |
---|
2241 | - ms.create(my_nodeid, write_enabler) |
---|
2242 | +def create_mutable_disk_share(fp, nodeid, write_enabler, parent): |
---|
2243 | + ms = MutableDiskShare(fp, parent) |
---|
2244 | + ms.create(nodeid, write_enabler) |
---|
2245 | del ms |
---|
2246 | hunk ./src/allmydata/storage/backends/disk/mutable.py 444 |
---|
2247 | - return MutableShareFile(filename, parent) |
---|
2248 | - |
---|
2249 | + return MutableDiskShare(fp, parent) |
---|
2250 | addfile ./src/allmydata/storage/backends/null/__init__.py |
---|
2251 | addfile ./src/allmydata/storage/backends/null/null_backend.py |
---|
2252 | hunk ./src/allmydata/storage/backends/null/null_backend.py 2 |
---|
2253 | |
---|
2254 | +import os, struct |
---|
2255 | + |
---|
2256 | +from zope.interface import implements |
---|
2257 | + |
---|
2258 | +from allmydata.interfaces import IStorageBackend, IShareSet, IStoredShare, IStoredMutableShare |
---|
2259 | +from allmydata.util.assertutil import precondition |
---|
2260 | +from allmydata.util.hashutil import constant_time_compare |
---|
2261 | +from allmydata.storage.backends.base import Backend, ShareSet |
---|
2262 | +from allmydata.storage.bucket import BucketWriter |
---|
2263 | +from allmydata.storage.common import si_b2a |
---|
2264 | +from allmydata.storage.lease import LeaseInfo |
---|
2265 | + |
---|
2266 | + |
---|
2267 | +class NullBackend(Backend): |
---|
2268 | + implements(IStorageBackend) |
---|
2269 | + |
---|
2270 | + def __init__(self): |
---|
2271 | + Backend.__init__(self) |
---|
2272 | + |
---|
2273 | + def get_available_space(self, reserved_space): |
---|
2274 | + return None |
---|
2275 | + |
---|
2276 | + def get_sharesets_for_prefix(self, prefix): |
---|
2277 | + pass |
---|
2278 | + |
---|
2279 | + def get_shareset(self, storageindex): |
---|
2280 | + return NullShareSet(storageindex) |
---|
2281 | + |
---|
2282 | + def fill_in_space_stats(self, stats): |
---|
2283 | + pass |
---|
2284 | + |
---|
2285 | + def set_storage_server(self, ss): |
---|
2286 | + self.ss = ss |
---|
2287 | + |
---|
2288 | + def advise_corrupt_share(self, sharetype, storageindex, shnum, reason): |
---|
2289 | + pass |
---|
2290 | + |
---|
2291 | + |
---|
2292 | +class NullShareSet(ShareSet): |
---|
2293 | + implements(IShareSet) |
---|
2294 | + |
---|
2295 | + def __init__(self, storageindex): |
---|
2296 | + self.storageindex = storageindex |
---|
2297 | + |
---|
2298 | + def get_overhead(self): |
---|
2299 | + return 0 |
---|
2300 | + |
---|
2301 | + def get_incoming_shnums(self): |
---|
2302 | + return frozenset() |
---|
2303 | + |
---|
2304 | + def get_shares(self): |
---|
2305 | + pass |
---|
2306 | + |
---|
2307 | + def get_share(self, shnum): |
---|
2308 | + return None |
---|
2309 | + |
---|
2310 | + def get_storage_index(self): |
---|
2311 | + return self.storageindex |
---|
2312 | + |
---|
2313 | + def get_storage_index_string(self): |
---|
2314 | + return si_b2a(self.storageindex) |
---|
2315 | + |
---|
2316 | + def make_bucket_writer(self, storageserver, shnum, max_space_per_bucket, lease_info, canary): |
---|
2317 | + immutableshare = ImmutableNullShare() |
---|
2318 | + return BucketWriter(self.ss, immutableshare, max_space_per_bucket, lease_info, canary) |
---|
2319 | + |
---|
2320 | + def _create_mutable_share(self, storageserver, shnum, write_enabler): |
---|
2321 | + return MutableNullShare() |
---|
2322 | + |
---|
2323 | + def _clean_up_after_unlink(self): |
---|
2324 | + pass |
---|
2325 | + |
---|
2326 | + |
---|
2327 | +class ImmutableNullShare: |
---|
2328 | + implements(IStoredShare) |
---|
2329 | + sharetype = "immutable" |
---|
2330 | + |
---|
2331 | + def __init__(self): |
---|
2332 | + """ If max_size is not None then I won't allow more than |
---|
2333 | + max_size to be written to me. If create=True then max_size |
---|
2334 | + must not be None. """ |
---|
2335 | + pass |
---|
2336 | + |
---|
2337 | + def get_shnum(self): |
---|
2338 | + return self.shnum |
---|
2339 | + |
---|
2340 | + def unlink(self): |
---|
2341 | + os.unlink(self.fname) |
---|
2342 | + |
---|
2343 | + def read_share_data(self, offset, length): |
---|
2344 | + precondition(offset >= 0) |
---|
2345 | + # Reads beyond the end of the data are truncated. Reads that start |
---|
2346 | + # beyond the end of the data return an empty string. |
---|
2347 | + seekpos = self._data_offset+offset |
---|
2348 | + fsize = os.path.getsize(self.fname) |
---|
2349 | + actuallength = max(0, min(length, fsize-seekpos)) # XXX #1528 |
---|
2350 | + if actuallength == 0: |
---|
2351 | + return "" |
---|
2352 | + f = open(self.fname, 'rb') |
---|
2353 | + f.seek(seekpos) |
---|
2354 | + return f.read(actuallength) |
---|
2355 | + |
---|
2356 | + def write_share_data(self, offset, data): |
---|
2357 | + pass |
---|
2358 | + |
---|
2359 | + def _write_lease_record(self, f, lease_number, lease_info): |
---|
2360 | + offset = self._lease_offset + lease_number * self.LEASE_SIZE |
---|
2361 | + f.seek(offset) |
---|
2362 | + assert f.tell() == offset |
---|
2363 | + f.write(lease_info.to_immutable_data()) |
---|
2364 | + |
---|
2365 | + def _read_num_leases(self, f): |
---|
2366 | + f.seek(0x08) |
---|
2367 | + (num_leases,) = struct.unpack(">L", f.read(4)) |
---|
2368 | + return num_leases |
---|
2369 | + |
---|
2370 | + def _write_num_leases(self, f, num_leases): |
---|
2371 | + f.seek(0x08) |
---|
2372 | + f.write(struct.pack(">L", num_leases)) |
---|
2373 | + |
---|
2374 | + def _truncate_leases(self, f, num_leases): |
---|
2375 | + f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE) |
---|
2376 | + |
---|
2377 | + def get_leases(self): |
---|
2378 | + """Yields a LeaseInfo instance for all leases.""" |
---|
2379 | + f = open(self.fname, 'rb') |
---|
2380 | + (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
2381 | + f.seek(self._lease_offset) |
---|
2382 | + for i in range(num_leases): |
---|
2383 | + data = f.read(self.LEASE_SIZE) |
---|
2384 | + if data: |
---|
2385 | + yield LeaseInfo().from_immutable_data(data) |
---|
2386 | + |
---|
2387 | + def add_lease(self, lease): |
---|
2388 | + pass |
---|
2389 | + |
---|
2390 | + def renew_lease(self, renew_secret, new_expire_time): |
---|
2391 | + for i,lease in enumerate(self.get_leases()): |
---|
2392 | + if constant_time_compare(lease.renew_secret, renew_secret): |
---|
2393 | + # yup. See if we need to update the owner time. |
---|
2394 | + if new_expire_time > lease.expiration_time: |
---|
2395 | + # yes |
---|
2396 | + lease.expiration_time = new_expire_time |
---|
2397 | + f = open(self.fname, 'rb+') |
---|
2398 | + self._write_lease_record(f, i, lease) |
---|
2399 | + f.close() |
---|
2400 | + return |
---|
2401 | + raise IndexError("unable to renew non-existent lease") |
---|
2402 | + |
---|
2403 | + def add_or_renew_lease(self, lease_info): |
---|
2404 | + try: |
---|
2405 | + self.renew_lease(lease_info.renew_secret, |
---|
2406 | + lease_info.expiration_time) |
---|
2407 | + except IndexError: |
---|
2408 | + self.add_lease(lease_info) |
---|
2409 | + |
---|
2410 | + |
---|
2411 | +class MutableNullShare: |
---|
2412 | + implements(IStoredMutableShare) |
---|
2413 | + sharetype = "mutable" |
---|
2414 | + |
---|
2415 | + """ XXX: TODO """ |
---|
2416 | addfile ./src/allmydata/storage/bucket.py |
---|
2417 | hunk ./src/allmydata/storage/bucket.py 1 |
---|
2418 | + |
---|
2419 | +import time |
---|
2420 | + |
---|
2421 | +from foolscap.api import Referenceable |
---|
2422 | + |
---|
2423 | +from zope.interface import implements |
---|
2424 | +from allmydata.interfaces import RIBucketWriter, RIBucketReader |
---|
2425 | +from allmydata.util import base32, log |
---|
2426 | +from allmydata.util.assertutil import precondition |
---|
2427 | + |
---|
2428 | + |
---|
2429 | +class BucketWriter(Referenceable): |
---|
2430 | + implements(RIBucketWriter) |
---|
2431 | + |
---|
2432 | + def __init__(self, ss, immutableshare, max_size, lease_info, canary): |
---|
2433 | + self.ss = ss |
---|
2434 | + self._max_size = max_size # don't allow the client to write more than this |
---|
2435 | + self._canary = canary |
---|
2436 | + self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected) |
---|
2437 | + self.closed = False |
---|
2438 | + self.throw_out_all_data = False |
---|
2439 | + self._share = immutableshare |
---|
2440 | + # also, add our lease to the file now, so that other ones can be |
---|
2441 | + # added by simultaneous uploaders |
---|
2442 | + self._share.add_lease(lease_info) |
---|
2443 | + |
---|
2444 | + def allocated_size(self): |
---|
2445 | + return self._max_size |
---|
2446 | + |
---|
2447 | + def remote_write(self, offset, data): |
---|
2448 | + start = time.time() |
---|
2449 | + precondition(not self.closed) |
---|
2450 | + if self.throw_out_all_data: |
---|
2451 | + return |
---|
2452 | + self._share.write_share_data(offset, data) |
---|
2453 | + self.ss.add_latency("write", time.time() - start) |
---|
2454 | + self.ss.count("write") |
---|
2455 | + |
---|
2456 | + def remote_close(self): |
---|
2457 | + precondition(not self.closed) |
---|
2458 | + start = time.time() |
---|
2459 | + |
---|
2460 | + self._share.close() |
---|
2461 | + filelen = self._share.stat() |
---|
2462 | + self._share = None |
---|
2463 | + |
---|
2464 | + self.closed = True |
---|
2465 | + self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
2466 | + |
---|
2467 | + self.ss.bucket_writer_closed(self, filelen) |
---|
2468 | + self.ss.add_latency("close", time.time() - start) |
---|
2469 | + self.ss.count("close") |
---|
2470 | + |
---|
2471 | + def _disconnected(self): |
---|
2472 | + if not self.closed: |
---|
2473 | + self._abort() |
---|
2474 | + |
---|
2475 | + def remote_abort(self): |
---|
2476 | + log.msg("storage: aborting write to share %r" % self._share, |
---|
2477 | + facility="tahoe.storage", level=log.UNUSUAL) |
---|
2478 | + if not self.closed: |
---|
2479 | + self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
2480 | + self._abort() |
---|
2481 | + self.ss.count("abort") |
---|
2482 | + |
---|
2483 | + def _abort(self): |
---|
2484 | + if self.closed: |
---|
2485 | + return |
---|
2486 | + self._share.unlink() |
---|
2487 | + self._share = None |
---|
2488 | + |
---|
2489 | + # We are now considered closed for further writing. We must tell |
---|
2490 | + # the storage server about this so that it stops expecting us to |
---|
2491 | + # use the space it allocated for us earlier. |
---|
2492 | + self.closed = True |
---|
2493 | + self.ss.bucket_writer_closed(self, 0) |
---|
2494 | + |
---|
2495 | + |
---|
2496 | +class BucketReader(Referenceable): |
---|
2497 | + implements(RIBucketReader) |
---|
2498 | + |
---|
2499 | + def __init__(self, ss, share): |
---|
2500 | + self.ss = ss |
---|
2501 | + self._share = share |
---|
2502 | + self.storageindex = share.storageindex |
---|
2503 | + self.shnum = share.shnum |
---|
2504 | + |
---|
2505 | + def __repr__(self): |
---|
2506 | + return "<%s %s %s>" % (self.__class__.__name__, |
---|
2507 | + base32.b2a_l(self.storageindex[:8], 60), |
---|
2508 | + self.shnum) |
---|
2509 | + |
---|
2510 | + def remote_read(self, offset, length): |
---|
2511 | + start = time.time() |
---|
2512 | + data = self._share.read_share_data(offset, length) |
---|
2513 | + self.ss.add_latency("read", time.time() - start) |
---|
2514 | + self.ss.count("read") |
---|
2515 | + return data |
---|
2516 | + |
---|
2517 | + def remote_advise_corrupt_share(self, reason): |
---|
2518 | + return self.ss.remote_advise_corrupt_share("immutable", |
---|
2519 | + self.storageindex, |
---|
2520 | + self.shnum, |
---|
2521 | + reason) |
---|
2522 | hunk ./src/allmydata/storage/common.py 1 |
---|
2523 | - |
---|
2524 | -import os.path |
---|
2525 | from allmydata.util import base32 |
---|
2526 | |
---|
2527 | class DataTooLargeError(Exception): |
---|
2528 | hunk ./src/allmydata/storage/common.py 5 |
---|
2529 | pass |
---|
2530 | + |
---|
2531 | class UnknownMutableContainerVersionError(Exception): |
---|
2532 | pass |
---|
2533 | hunk ./src/allmydata/storage/common.py 8 |
---|
2534 | + |
---|
2535 | class UnknownImmutableContainerVersionError(Exception): |
---|
2536 | pass |
---|
2537 | |
---|
2538 | hunk ./src/allmydata/storage/common.py 18 |
---|
2539 | |
---|
2540 | def si_a2b(ascii_storageindex): |
---|
2541 | return base32.a2b(ascii_storageindex) |
---|
2542 | - |
---|
2543 | -def storage_index_to_dir(storageindex): |
---|
2544 | - sia = si_b2a(storageindex) |
---|
2545 | - return os.path.join(sia[:2], sia) |
---|
2546 | hunk ./src/allmydata/storage/crawler.py 2 |
---|
2547 | |
---|
2548 | -import os, time, struct |
---|
2549 | +import time, struct |
---|
2550 | import cPickle as pickle |
---|
2551 | from twisted.internet import reactor |
---|
2552 | from twisted.application import service |
---|
2553 | hunk ./src/allmydata/storage/crawler.py 7 |
---|
2554 | from allmydata.storage.common import si_b2a |
---|
2555 | -from allmydata.util import fileutil |
---|
2556 | + |
---|
2557 | |
---|
2558 | class TimeSliceExceeded(Exception): |
---|
2559 | pass |
---|
2560 | hunk ./src/allmydata/storage/crawler.py 12 |
---|
2561 | |
---|
2562 | + |
---|
2563 | class ShareCrawler(service.MultiService): |
---|
2564 | hunk ./src/allmydata/storage/crawler.py 14 |
---|
2565 | - """A ShareCrawler subclass is attached to a StorageServer, and |
---|
2566 | - periodically walks all of its shares, processing each one in some |
---|
2567 | - fashion. This crawl is rate-limited, to reduce the IO burden on the host, |
---|
2568 | - since large servers can easily have a terabyte of shares, in several |
---|
2569 | - million files, which can take hours or days to read. |
---|
2570 | + """ |
---|
2571 | + An instance of a subclass of ShareCrawler is attached to a storage |
---|
2572 | + backend, and periodically walks the backend's shares, processing them |
---|
2573 | + in some fashion. This crawl is rate-limited to reduce the I/O burden on |
---|
2574 | + the host, since large servers can easily have a terabyte of shares in |
---|
2575 | + several million files, which can take hours or days to read. |
---|
2576 | |
---|
2577 | Once the crawler starts a cycle, it will proceed at a rate limited by the |
---|
2578 | allowed_cpu_percentage= and cpu_slice= parameters: yielding the reactor |
---|
2579 | hunk ./src/allmydata/storage/crawler.py 30 |
---|
2580 | long enough to ensure that 'minimum_cycle_time' elapses between the start |
---|
2581 | of two consecutive cycles. |
---|
2582 | |
---|
2583 | - We assume that the normal upload/download/get_buckets traffic of a tahoe |
---|
2584 | + We assume that the normal upload/download/DYHB traffic of a Tahoe-LAFS |
---|
2585 | grid will cause the prefixdir contents to be mostly cached in the kernel, |
---|
2586 | hunk ./src/allmydata/storage/crawler.py 32 |
---|
2587 | - or that the number of buckets in each prefixdir will be small enough to |
---|
2588 | - load quickly. A 1TB allmydata.com server was measured to have 2.56M |
---|
2589 | - buckets, spread into the 1024 prefixdirs, with about 2500 buckets per |
---|
2590 | + or that the number of sharesets in each prefixdir will be small enough to |
---|
2591 | + load quickly. A 1TB allmydata.com server was measured to have 2.56 million |
---|
2592 | + sharesets, spread into the 1024 prefixdirs, with about 2500 sharesets per |
---|
2593 | prefix. On this server, each prefixdir took 130ms-200ms to list the first |
---|
2594 | time, and 17ms to list the second time. |
---|
2595 | |
---|
2596 | hunk ./src/allmydata/storage/crawler.py 38 |
---|
2597 | - To use a crawler, create a subclass which implements the process_bucket() |
---|
2598 | - method. It will be called with a prefixdir and a base32 storage index |
---|
2599 | - string. process_bucket() must run synchronously. Any keys added to |
---|
2600 | - self.state will be preserved. Override add_initial_state() to set up |
---|
2601 | - initial state keys. Override finished_cycle() to perform additional |
---|
2602 | - processing when the cycle is complete. Any status that the crawler |
---|
2603 | - produces should be put in the self.state dictionary. Status renderers |
---|
2604 | - (like a web page which describes the accomplishments of your crawler) |
---|
2605 | - will use crawler.get_state() to retrieve this dictionary; they can |
---|
2606 | - present the contents as they see fit. |
---|
2607 | + To implement a crawler, create a subclass that implements the |
---|
2608 | + process_shareset() method. It will be called with a prefixdir and an |
---|
2609 | + object providing the IShareSet interface. process_shareset() must run |
---|
2610 | + synchronously. Any keys added to self.state will be preserved. Override |
---|
2611 | + add_initial_state() to set up initial state keys. Override |
---|
2612 | + finished_cycle() to perform additional processing when the cycle is |
---|
2613 | + complete. Any status that the crawler produces should be put in the |
---|
2614 | + self.state dictionary. Status renderers (like a web page describing the |
---|
2615 | + accomplishments of your crawler) will use crawler.get_state() to retrieve |
---|
2616 | + this dictionary; they can present the contents as they see fit. |
---|
2617 | |
---|
2618 | hunk ./src/allmydata/storage/crawler.py 49 |
---|
2619 | - Then create an instance, with a reference to a StorageServer and a |
---|
2620 | - filename where it can store persistent state. The statefile is used to |
---|
2621 | - keep track of how far around the ring the process has travelled, as well |
---|
2622 | - as timing history to allow the pace to be predicted and controlled. The |
---|
2623 | - statefile will be updated and written to disk after each time slice (just |
---|
2624 | - before the crawler yields to the reactor), and also after each cycle is |
---|
2625 | - finished, and also when stopService() is called. Note that this means |
---|
2626 | - that a crawler which is interrupted with SIGKILL while it is in the |
---|
2627 | - middle of a time slice will lose progress: the next time the node is |
---|
2628 | - started, the crawler will repeat some unknown amount of work. |
---|
2629 | + Then create an instance, with a reference to a backend object providing |
---|
2630 | + the IStorageBackend interface, and a filename where it can store |
---|
2631 | + persistent state. The statefile is used to keep track of how far around |
---|
2632 | + the ring the process has travelled, as well as timing history to allow |
---|
2633 | + the pace to be predicted and controlled. The statefile will be updated |
---|
2634 | + and written to disk after each time slice (just before the crawler yields |
---|
2635 | + to the reactor), and also after each cycle is finished, and also when |
---|
2636 | + stopService() is called. Note that this means that a crawler that is |
---|
2637 | + interrupted with SIGKILL while it is in the middle of a time slice will |
---|
2638 | + lose progress: the next time the node is started, the crawler will repeat |
---|
2639 | + some unknown amount of work. |
---|
2640 | |
---|
2641 | The crawler instance must be started with startService() before it will |
---|
2642 | hunk ./src/allmydata/storage/crawler.py 62 |
---|
2643 | - do any work. To make it stop doing work, call stopService(). |
---|
2644 | + do any work. To make it stop doing work, call stopService(). A crawler |
---|
2645 | + is usually a child service of a StorageServer, although it should not |
---|
2646 | + depend on that. |
---|
2647 | + |
---|
2648 | + For historical reasons, some dictionary key names use the term "bucket" |
---|
2649 | + for what is now preferably called a "shareset" (the set of shares that a |
---|
2650 | + server holds under a given storage index). |
---|
2651 | """ |
---|
2652 | |
---|
2653 | slow_start = 300 # don't start crawling for 5 minutes after startup |
---|
2654 | hunk ./src/allmydata/storage/crawler.py 77 |
---|
2655 | cpu_slice = 1.0 # use up to 1.0 seconds before yielding |
---|
2656 | minimum_cycle_time = 300 # don't run a cycle faster than this |
---|
2657 | |
---|
2658 | - def __init__(self, server, statefile, allowed_cpu_percentage=None): |
---|
2659 | + def __init__(self, backend, statefp, allowed_cpu_percentage=None): |
---|
2660 | service.MultiService.__init__(self) |
---|
2661 | hunk ./src/allmydata/storage/crawler.py 79 |
---|
2662 | + self.backend = backend |
---|
2663 | + self.statefp = statefp |
---|
2664 | if allowed_cpu_percentage is not None: |
---|
2665 | self.allowed_cpu_percentage = allowed_cpu_percentage |
---|
2666 | hunk ./src/allmydata/storage/crawler.py 83 |
---|
2667 | - self.server = server |
---|
2668 | - self.sharedir = server.sharedir |
---|
2669 | - self.statefile = statefile |
---|
2670 | self.prefixes = [si_b2a(struct.pack(">H", i << (16-10)))[:2] |
---|
2671 | for i in range(2**10)] |
---|
2672 | self.prefixes.sort() |
---|
2673 | hunk ./src/allmydata/storage/crawler.py 87 |
---|
2674 | self.timer = None |
---|
2675 | - self.bucket_cache = (None, []) |
---|
2676 | + self.shareset_cache = (None, []) |
---|
2677 | self.current_sleep_time = None |
---|
2678 | self.next_wake_time = None |
---|
2679 | self.last_prefix_finished_time = None |
---|
2680 | hunk ./src/allmydata/storage/crawler.py 150 |
---|
2681 | left = len(self.prefixes) - self.last_complete_prefix_index |
---|
2682 | remaining = left * self.last_prefix_elapsed_time |
---|
2683 | # TODO: remainder of this prefix: we need to estimate the |
---|
2684 | - # per-bucket time, probably by measuring the time spent on |
---|
2685 | - # this prefix so far, divided by the number of buckets we've |
---|
2686 | + # per-shareset time, probably by measuring the time spent on |
---|
2687 | + # this prefix so far, divided by the number of sharesets we've |
---|
2688 | # processed. |
---|
2689 | d["estimated-cycle-complete-time-left"] = remaining |
---|
2690 | # it's possible to call get_progress() from inside a crawler's |
---|
2691 | hunk ./src/allmydata/storage/crawler.py 171 |
---|
2692 | state dictionary. |
---|
2693 | |
---|
2694 | If we are not currently sleeping (i.e. get_state() was called from |
---|
2695 | - inside the process_prefixdir, process_bucket, or finished_cycle() |
---|
2696 | + inside the process_prefixdir, process_shareset, or finished_cycle() |
---|
2697 | methods, or if startService has not yet been called on this crawler), |
---|
2698 | these two keys will be None. |
---|
2699 | |
---|
2700 | hunk ./src/allmydata/storage/crawler.py 184 |
---|
2701 | def load_state(self): |
---|
2702 | # we use this to store state for both the crawler's internals and |
---|
2703 | # anything the subclass-specific code needs. The state is stored |
---|
2704 | - # after each bucket is processed, after each prefixdir is processed, |
---|
2705 | + # after each shareset is processed, after each prefixdir is processed, |
---|
2706 | # and after a cycle is complete. The internal keys we use are: |
---|
2707 | # ["version"]: int, always 1 |
---|
2708 | # ["last-cycle-finished"]: int, or None if we have not yet finished |
---|
2709 | hunk ./src/allmydata/storage/crawler.py 198 |
---|
2710 | # are sleeping between cycles, or if we |
---|
2711 | # have not yet finished any prefixdir since |
---|
2712 | # a cycle was started |
---|
2713 | - # ["last-complete-bucket"]: str, base32 storage index bucket name |
---|
2714 | - # of the last bucket to be processed, or |
---|
2715 | - # None if we are sleeping between cycles |
---|
2716 | + # ["last-complete-bucket"]: str, base32 storage index of the last |
---|
2717 | + # shareset to be processed, or None if we |
---|
2718 | + # are sleeping between cycles |
---|
2719 | try: |
---|
2720 | hunk ./src/allmydata/storage/crawler.py 202 |
---|
2721 | - f = open(self.statefile, "rb") |
---|
2722 | - state = pickle.load(f) |
---|
2723 | - f.close() |
---|
2724 | + state = pickle.loads(self.statefp.getContent()) |
---|
2725 | except EnvironmentError: |
---|
2726 | state = {"version": 1, |
---|
2727 | "last-cycle-finished": None, |
---|
2728 | hunk ./src/allmydata/storage/crawler.py 238 |
---|
2729 | else: |
---|
2730 | last_complete_prefix = self.prefixes[lcpi] |
---|
2731 | self.state["last-complete-prefix"] = last_complete_prefix |
---|
2732 | - tmpfile = self.statefile + ".tmp" |
---|
2733 | - f = open(tmpfile, "wb") |
---|
2734 | - pickle.dump(self.state, f) |
---|
2735 | - f.close() |
---|
2736 | - fileutil.move_into_place(tmpfile, self.statefile) |
---|
2737 | + self.statefp.setContent(pickle.dumps(self.state)) |
---|
2738 | |
---|
2739 | def startService(self): |
---|
2740 | # arrange things to look like we were just sleeping, so |
---|
2741 | hunk ./src/allmydata/storage/crawler.py 280 |
---|
2742 | sleep_time = (this_slice / self.allowed_cpu_percentage) - this_slice |
---|
2743 | # if the math gets weird, or a timequake happens, don't sleep |
---|
2744 | # forever. Note that this means that, while a cycle is running, we |
---|
2745 | - # will process at least one bucket every 5 minutes, no matter how |
---|
2746 | - # long that bucket takes. |
---|
2747 | + # will process at least one shareset every 5 minutes, no matter how |
---|
2748 | + # long that shareset takes. |
---|
2749 | sleep_time = max(0.0, min(sleep_time, 299)) |
---|
2750 | if finished_cycle: |
---|
2751 | # how long should we sleep between cycles? Don't run faster than |
---|
2752 | hunk ./src/allmydata/storage/crawler.py 311 |
---|
2753 | for i in range(self.last_complete_prefix_index+1, len(self.prefixes)): |
---|
2754 | # if we want to yield earlier, just raise TimeSliceExceeded() |
---|
2755 | prefix = self.prefixes[i] |
---|
2756 | - prefixdir = os.path.join(self.sharedir, prefix) |
---|
2757 | - if i == self.bucket_cache[0]: |
---|
2758 | - buckets = self.bucket_cache[1] |
---|
2759 | + if i == self.shareset_cache[0]: |
---|
2760 | + sharesets = self.shareset_cache[1] |
---|
2761 | else: |
---|
2762 | hunk ./src/allmydata/storage/crawler.py 314 |
---|
2763 | - try: |
---|
2764 | - buckets = os.listdir(prefixdir) |
---|
2765 | - buckets.sort() |
---|
2766 | - except EnvironmentError: |
---|
2767 | - buckets = [] |
---|
2768 | - self.bucket_cache = (i, buckets) |
---|
2769 | - self.process_prefixdir(cycle, prefix, prefixdir, |
---|
2770 | - buckets, start_slice) |
---|
2771 | + sharesets = self.backend.get_sharesets_for_prefix(prefix) |
---|
2772 | + self.shareset_cache = (i, sharesets) |
---|
2773 | + self.process_prefixdir(cycle, prefix, sharesets, start_slice) |
---|
2774 | self.last_complete_prefix_index = i |
---|
2775 | |
---|
2776 | now = time.time() |
---|
2777 | hunk ./src/allmydata/storage/crawler.py 341 |
---|
2778 | self.finished_cycle(cycle) |
---|
2779 | self.save_state() |
---|
2780 | |
---|
2781 | - def process_prefixdir(self, cycle, prefix, prefixdir, buckets, start_slice): |
---|
2782 | - """This gets a list of bucket names (i.e. storage index strings, |
---|
2783 | + def process_prefixdir(self, cycle, prefix, sharesets, start_slice): |
---|
2784 | + """ |
---|
2785 | + This gets a list of shareset names (i.e. storage index strings, |
---|
2786 | base32-encoded) in sorted order. |
---|
2787 | |
---|
2788 | You can override this if your crawler doesn't care about the actual |
---|
2789 | hunk ./src/allmydata/storage/crawler.py 348 |
---|
2790 | shares, for example a crawler which merely keeps track of how many |
---|
2791 | - buckets are being managed by this server. |
---|
2792 | + sharesets are being managed by this server. |
---|
2793 | |
---|
2794 | hunk ./src/allmydata/storage/crawler.py 350 |
---|
2795 | - Subclasses which *do* care about actual bucket should leave this |
---|
2796 | - method along, and implement process_bucket() instead. |
---|
2797 | + Subclasses which *do* care about actual shareset should leave this |
---|
2798 | + method alone, and implement process_shareset() instead. |
---|
2799 | """ |
---|
2800 | |
---|
2801 | hunk ./src/allmydata/storage/crawler.py 354 |
---|
2802 | - for bucket in buckets: |
---|
2803 | - if bucket <= self.state["last-complete-bucket"]: |
---|
2804 | + for shareset in sharesets: |
---|
2805 | + base32si = shareset.get_storage_index_string() |
---|
2806 | + if base32si <= self.state["last-complete-bucket"]: |
---|
2807 | continue |
---|
2808 | hunk ./src/allmydata/storage/crawler.py 358 |
---|
2809 | - self.process_bucket(cycle, prefix, prefixdir, bucket) |
---|
2810 | - self.state["last-complete-bucket"] = bucket |
---|
2811 | + self.process_shareset(cycle, prefix, shareset) |
---|
2812 | + self.state["last-complete-bucket"] = base32si |
---|
2813 | if time.time() >= start_slice + self.cpu_slice: |
---|
2814 | raise TimeSliceExceeded() |
---|
2815 | |
---|
2816 | hunk ./src/allmydata/storage/crawler.py 366 |
---|
2817 | # the remaining methods are explictly for subclasses to implement. |
---|
2818 | |
---|
2819 | def started_cycle(self, cycle): |
---|
2820 | - """Notify a subclass that the crawler is about to start a cycle. |
---|
2821 | + """ |
---|
2822 | + Notify a subclass that the crawler is about to start a cycle. |
---|
2823 | |
---|
2824 | This method is for subclasses to override. No upcall is necessary. |
---|
2825 | """ |
---|
2826 | hunk ./src/allmydata/storage/crawler.py 373 |
---|
2827 | pass |
---|
2828 | |
---|
2829 | - def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): |
---|
2830 | - """Examine a single bucket. Subclasses should do whatever they want |
---|
2831 | + def process_shareset(self, cycle, prefix, shareset): |
---|
2832 | + """ |
---|
2833 | + Examine a single shareset. Subclasses should do whatever they want |
---|
2834 | to do to the shares therein, then update self.state as necessary. |
---|
2835 | |
---|
2836 | If the crawler is never interrupted by SIGKILL, this method will be |
---|
2837 | hunk ./src/allmydata/storage/crawler.py 379 |
---|
2838 | - called exactly once per share (per cycle). If it *is* interrupted, |
---|
2839 | + called exactly once per shareset (per cycle). If it *is* interrupted, |
---|
2840 | then the next time the node is started, some amount of work will be |
---|
2841 | duplicated, according to when self.save_state() was last called. By |
---|
2842 | default, save_state() is called at the end of each timeslice, and |
---|
2843 | hunk ./src/allmydata/storage/crawler.py 387 |
---|
2844 | |
---|
2845 | To reduce the chance of duplicate work (i.e. to avoid adding multiple |
---|
2846 | records to a database), you can call save_state() at the end of your |
---|
2847 | - process_bucket() method. This will reduce the maximum duplicated work |
---|
2848 | - to one bucket per SIGKILL. It will also add overhead, probably 1-20ms |
---|
2849 | - per bucket (and some disk writes), which will count against your |
---|
2850 | - allowed_cpu_percentage, and which may be considerable if |
---|
2851 | - process_bucket() runs quickly. |
---|
2852 | + process_shareset() method. This will reduce the maximum duplicated |
---|
2853 | + work to one shareset per SIGKILL. It will also add overhead, probably |
---|
2854 | + 1-20ms per shareset (and some disk writes), which will count against |
---|
2855 | + your allowed_cpu_percentage, and which may be considerable if |
---|
2856 | + process_shareset() runs quickly. |
---|
2857 | |
---|
2858 | This method is for subclasses to override. No upcall is necessary. |
---|
2859 | """ |
---|
2860 | hunk ./src/allmydata/storage/crawler.py 398 |
---|
2861 | pass |
---|
2862 | |
---|
2863 | def finished_prefix(self, cycle, prefix): |
---|
2864 | - """Notify a subclass that the crawler has just finished processing a |
---|
2865 | - prefix directory (all buckets with the same two-character/10bit |
---|
2866 | + """ |
---|
2867 | + Notify a subclass that the crawler has just finished processing a |
---|
2868 | + prefix directory (all sharesets with the same two-character/10-bit |
---|
2869 | prefix). To impose a limit on how much work might be duplicated by a |
---|
2870 | SIGKILL that occurs during a timeslice, you can call |
---|
2871 | self.save_state() here, but be aware that it may represent a |
---|
2872 | hunk ./src/allmydata/storage/crawler.py 411 |
---|
2873 | pass |
---|
2874 | |
---|
2875 | def finished_cycle(self, cycle): |
---|
2876 | - """Notify subclass that a cycle (one complete traversal of all |
---|
2877 | + """ |
---|
2878 | + Notify subclass that a cycle (one complete traversal of all |
---|
2879 | prefixdirs) has just finished. 'cycle' is the number of the cycle |
---|
2880 | that just finished. This method should perform summary work and |
---|
2881 | update self.state to publish information to status displays. |
---|
2882 | hunk ./src/allmydata/storage/crawler.py 429 |
---|
2883 | pass |
---|
2884 | |
---|
2885 | def yielding(self, sleep_time): |
---|
2886 | - """The crawler is about to sleep for 'sleep_time' seconds. This |
---|
2887 | + """ |
---|
2888 | + The crawler is about to sleep for 'sleep_time' seconds. This |
---|
2889 | method is mostly for the convenience of unit tests. |
---|
2890 | |
---|
2891 | This method is for subclasses to override. No upcall is necessary. |
---|
2892 | hunk ./src/allmydata/storage/crawler.py 439 |
---|
2893 | |
---|
2894 | |
---|
2895 | class BucketCountingCrawler(ShareCrawler): |
---|
2896 | - """I keep track of how many buckets are being managed by this server. |
---|
2897 | - This is equivalent to the number of distributed files and directories for |
---|
2898 | - which I am providing storage. The actual number of files+directories in |
---|
2899 | - the full grid is probably higher (especially when there are more servers |
---|
2900 | - than 'N', the number of generated shares), because some files+directories |
---|
2901 | - will have shares on other servers instead of me. Also note that the |
---|
2902 | - number of buckets will differ from the number of shares in small grids, |
---|
2903 | - when more than one share is placed on a single server. |
---|
2904 | + """ |
---|
2905 | + I keep track of how many sharesets, each corresponding to a storage index, |
---|
2906 | + are being managed by this server. This is equivalent to the number of |
---|
2907 | + distributed files and directories for which I am providing storage. The |
---|
2908 | + actual number of files and directories in the full grid is probably higher |
---|
2909 | + (especially when there are more servers than 'N', the number of generated |
---|
2910 | + shares), because some files and directories will have shares on other |
---|
2911 | + servers instead of me. Also note that the number of sharesets will differ |
---|
2912 | + from the number of shares in small grids, when more than one share is |
---|
2913 | + placed on a single server. |
---|
2914 | """ |
---|
2915 | |
---|
2916 | minimum_cycle_time = 60*60 # we don't need this more than once an hour |
---|
2917 | hunk ./src/allmydata/storage/crawler.py 453 |
---|
2918 | |
---|
2919 | - def __init__(self, server, statefile, num_sample_prefixes=1): |
---|
2920 | - ShareCrawler.__init__(self, server, statefile) |
---|
2921 | + def __init__(self, backend, statefp, num_sample_prefixes=1): |
---|
2922 | + ShareCrawler.__init__(self, backend, statefp) |
---|
2923 | self.num_sample_prefixes = num_sample_prefixes |
---|
2924 | |
---|
2925 | def add_initial_state(self): |
---|
2926 | hunk ./src/allmydata/storage/crawler.py 467 |
---|
2927 | self.state.setdefault("last-complete-bucket-count", None) |
---|
2928 | self.state.setdefault("storage-index-samples", {}) |
---|
2929 | |
---|
2930 | - def process_prefixdir(self, cycle, prefix, prefixdir, buckets, start_slice): |
---|
2931 | + def process_prefixdir(self, cycle, prefix, sharesets, start_slice): |
---|
2932 | # we override process_prefixdir() because we don't want to look at |
---|
2933 | hunk ./src/allmydata/storage/crawler.py 469 |
---|
2934 | - # the individual buckets. We'll save state after each one. On my |
---|
2935 | + # the individual sharesets. We'll save state after each one. On my |
---|
2936 | # laptop, a mostly-empty storage server can process about 70 |
---|
2937 | # prefixdirs in a 1.0s slice. |
---|
2938 | if cycle not in self.state["bucket-counts"]: |
---|
2939 | hunk ./src/allmydata/storage/crawler.py 474 |
---|
2940 | self.state["bucket-counts"][cycle] = {} |
---|
2941 | - self.state["bucket-counts"][cycle][prefix] = len(buckets) |
---|
2942 | + self.state["bucket-counts"][cycle][prefix] = len(sharesets) |
---|
2943 | if prefix in self.prefixes[:self.num_sample_prefixes]: |
---|
2944 | hunk ./src/allmydata/storage/crawler.py 476 |
---|
2945 | - self.state["storage-index-samples"][prefix] = (cycle, buckets) |
---|
2946 | + self.state["storage-index-samples"][prefix] = (cycle, sharesets) |
---|
2947 | |
---|
2948 | def finished_cycle(self, cycle): |
---|
2949 | last_counts = self.state["bucket-counts"].get(cycle, []) |
---|
2950 | hunk ./src/allmydata/storage/crawler.py 482 |
---|
2951 | if len(last_counts) == len(self.prefixes): |
---|
2952 | # great, we have a whole cycle. |
---|
2953 | - num_buckets = sum(last_counts.values()) |
---|
2954 | - self.state["last-complete-bucket-count"] = num_buckets |
---|
2955 | + num_sharesets = sum(last_counts.values()) |
---|
2956 | + self.state["last-complete-bucket-count"] = num_sharesets |
---|
2957 | # get rid of old counts |
---|
2958 | for old_cycle in list(self.state["bucket-counts"].keys()): |
---|
2959 | if old_cycle != cycle: |
---|
2960 | hunk ./src/allmydata/storage/crawler.py 490 |
---|
2961 | del self.state["bucket-counts"][old_cycle] |
---|
2962 | # get rid of old samples too |
---|
2963 | for prefix in list(self.state["storage-index-samples"].keys()): |
---|
2964 | - old_cycle,buckets = self.state["storage-index-samples"][prefix] |
---|
2965 | + old_cycle, storage_indices = self.state["storage-index-samples"][prefix] |
---|
2966 | if old_cycle != cycle: |
---|
2967 | del self.state["storage-index-samples"][prefix] |
---|
2968 | hunk ./src/allmydata/storage/crawler.py 493 |
---|
2969 | - |
---|
2970 | hunk ./src/allmydata/storage/expirer.py 1 |
---|
2971 | -import time, os, pickle, struct |
---|
2972 | + |
---|
2973 | +import time, pickle, struct |
---|
2974 | +from twisted.python import log as twlog |
---|
2975 | + |
---|
2976 | from allmydata.storage.crawler import ShareCrawler |
---|
2977 | hunk ./src/allmydata/storage/expirer.py 6 |
---|
2978 | -from allmydata.storage.shares import get_share_file |
---|
2979 | -from allmydata.storage.common import UnknownMutableContainerVersionError, \ |
---|
2980 | +from allmydata.storage.common import si_b2a, UnknownMutableContainerVersionError, \ |
---|
2981 | UnknownImmutableContainerVersionError |
---|
2982 | hunk ./src/allmydata/storage/expirer.py 8 |
---|
2983 | -from twisted.python import log as twlog |
---|
2984 | + |
---|
2985 | |
---|
2986 | class LeaseCheckingCrawler(ShareCrawler): |
---|
2987 | """I examine the leases on all shares, determining which are still valid |
---|
2988 | hunk ./src/allmydata/storage/expirer.py 17 |
---|
2989 | removed. |
---|
2990 | |
---|
2991 | I collect statistics on the leases and make these available to a web |
---|
2992 | - status page, including:: |
---|
2993 | + status page, including: |
---|
2994 | |
---|
2995 | Space recovered during this cycle-so-far: |
---|
2996 | actual (only if expiration_enabled=True): |
---|
2997 | hunk ./src/allmydata/storage/expirer.py 21 |
---|
2998 | - num-buckets, num-shares, sum of share sizes, real disk usage |
---|
2999 | + num-storage-indices, num-shares, sum of share sizes, real disk usage |
---|
3000 | ('real disk usage' means we use stat(fn).st_blocks*512 and include any |
---|
3001 | space used by the directory) |
---|
3002 | what it would have been with the original lease expiration time |
---|
3003 | hunk ./src/allmydata/storage/expirer.py 32 |
---|
3004 | |
---|
3005 | Space recovered during the last 10 cycles <-- saved in separate pickle |
---|
3006 | |
---|
3007 | - Shares/buckets examined: |
---|
3008 | + Shares/storage-indices examined: |
---|
3009 | this cycle-so-far |
---|
3010 | prediction of rest of cycle |
---|
3011 | during last 10 cycles <-- separate pickle |
---|
3012 | hunk ./src/allmydata/storage/expirer.py 42 |
---|
3013 | Histogram of leases-per-share: |
---|
3014 | this-cycle-to-date |
---|
3015 | last 10 cycles <-- separate pickle |
---|
3016 | - Histogram of lease ages, buckets = 1day |
---|
3017 | + Histogram of lease ages, storage-indices over 1 day |
---|
3018 | cycle-to-date |
---|
3019 | last 10 cycles <-- separate pickle |
---|
3020 | |
---|
3021 | hunk ./src/allmydata/storage/expirer.py 53 |
---|
3022 | slow_start = 360 # wait 6 minutes after startup |
---|
3023 | minimum_cycle_time = 12*60*60 # not more than twice per day |
---|
3024 | |
---|
3025 | - def __init__(self, server, statefile, historyfile, |
---|
3026 | - expiration_enabled, mode, |
---|
3027 | - override_lease_duration, # used if expiration_mode=="age" |
---|
3028 | - cutoff_date, # used if expiration_mode=="cutoff-date" |
---|
3029 | - sharetypes): |
---|
3030 | - self.historyfile = historyfile |
---|
3031 | - self.expiration_enabled = expiration_enabled |
---|
3032 | - self.mode = mode |
---|
3033 | + def __init__(self, backend, statefp, historyfp, expiration_policy): |
---|
3034 | + ShareCrawler.__init__(self, backend, statefp) |
---|
3035 | + self.historyfp = historyfp |
---|
3036 | + self.expiration_enabled = expiration_policy['enabled'] |
---|
3037 | + self.mode = expiration_policy['mode'] |
---|
3038 | self.override_lease_duration = None |
---|
3039 | self.cutoff_date = None |
---|
3040 | if self.mode == "age": |
---|
3041 | hunk ./src/allmydata/storage/expirer.py 61 |
---|
3042 | - assert isinstance(override_lease_duration, (int, type(None))) |
---|
3043 | - self.override_lease_duration = override_lease_duration # seconds |
---|
3044 | + assert isinstance(expiration_policy['override_lease_duration'], (int, type(None))) |
---|
3045 | + self.override_lease_duration = expiration_policy['override_lease_duration'] # seconds |
---|
3046 | elif self.mode == "cutoff-date": |
---|
3047 | hunk ./src/allmydata/storage/expirer.py 64 |
---|
3048 | - assert isinstance(cutoff_date, int) # seconds-since-epoch |
---|
3049 | - assert cutoff_date is not None |
---|
3050 | - self.cutoff_date = cutoff_date |
---|
3051 | + assert isinstance(expiration_policy['cutoff_date'], int) # seconds-since-epoch |
---|
3052 | + self.cutoff_date = expiration_policy['cutoff_date'] |
---|
3053 | else: |
---|
3054 | hunk ./src/allmydata/storage/expirer.py 67 |
---|
3055 | - raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % mode) |
---|
3056 | - self.sharetypes_to_expire = sharetypes |
---|
3057 | - ShareCrawler.__init__(self, server, statefile) |
---|
3058 | + raise ValueError("GC mode '%s' must be 'age' or 'cutoff-date'" % expiration_policy['mode']) |
---|
3059 | + self.sharetypes_to_expire = expiration_policy['sharetypes'] |
---|
3060 | |
---|
3061 | def add_initial_state(self): |
---|
3062 | # we fill ["cycle-to-date"] here (even though they will be reset in |
---|
3063 | hunk ./src/allmydata/storage/expirer.py 82 |
---|
3064 | self.state["cycle-to-date"].setdefault(k, so_far[k]) |
---|
3065 | |
---|
3066 | # initialize history |
---|
3067 | - if not os.path.exists(self.historyfile): |
---|
3068 | + if not self.historyfp.exists(): |
---|
3069 | history = {} # cyclenum -> dict |
---|
3070 | hunk ./src/allmydata/storage/expirer.py 84 |
---|
3071 | - f = open(self.historyfile, "wb") |
---|
3072 | - pickle.dump(history, f) |
---|
3073 | - f.close() |
---|
3074 | + self.historyfp.setContent(pickle.dumps(history)) |
---|
3075 | |
---|
3076 | def create_empty_cycle_dict(self): |
---|
3077 | recovered = self.create_empty_recovered_dict() |
---|
3078 | hunk ./src/allmydata/storage/expirer.py 97 |
---|
3079 | |
---|
3080 | def create_empty_recovered_dict(self): |
---|
3081 | recovered = {} |
---|
3082 | + # "buckets" is ambiguous; here it means the number of sharesets (one per storage index per server) |
---|
3083 | for a in ("actual", "original", "configured", "examined"): |
---|
3084 | for b in ("buckets", "shares", "sharebytes", "diskbytes"): |
---|
3085 | recovered[a+"-"+b] = 0 |
---|
3086 | hunk ./src/allmydata/storage/expirer.py 108 |
---|
3087 | def started_cycle(self, cycle): |
---|
3088 | self.state["cycle-to-date"] = self.create_empty_cycle_dict() |
---|
3089 | |
---|
3090 | - def stat(self, fn): |
---|
3091 | - return os.stat(fn) |
---|
3092 | - |
---|
3093 | - def process_bucket(self, cycle, prefix, prefixdir, storage_index_b32): |
---|
3094 | - bucketdir = os.path.join(prefixdir, storage_index_b32) |
---|
3095 | - s = self.stat(bucketdir) |
---|
3096 | + def process_storage_index(self, cycle, prefix, container): |
---|
3097 | would_keep_shares = [] |
---|
3098 | wks = None |
---|
3099 | hunk ./src/allmydata/storage/expirer.py 111 |
---|
3100 | + sharetype = None |
---|
3101 | |
---|
3102 | hunk ./src/allmydata/storage/expirer.py 113 |
---|
3103 | - for fn in os.listdir(bucketdir): |
---|
3104 | - try: |
---|
3105 | - shnum = int(fn) |
---|
3106 | - except ValueError: |
---|
3107 | - continue # non-numeric means not a sharefile |
---|
3108 | - sharefile = os.path.join(bucketdir, fn) |
---|
3109 | + for share in container.get_shares(): |
---|
3110 | + sharetype = share.sharetype |
---|
3111 | try: |
---|
3112 | hunk ./src/allmydata/storage/expirer.py 116 |
---|
3113 | - wks = self.process_share(sharefile) |
---|
3114 | + wks = self.process_share(share) |
---|
3115 | except (UnknownMutableContainerVersionError, |
---|
3116 | UnknownImmutableContainerVersionError, |
---|
3117 | struct.error): |
---|
3118 | hunk ./src/allmydata/storage/expirer.py 120 |
---|
3119 | - twlog.msg("lease-checker error processing %s" % sharefile) |
---|
3120 | + twlog.msg("lease-checker error processing %r" % (share,)) |
---|
3121 | twlog.err() |
---|
3122 | hunk ./src/allmydata/storage/expirer.py 122 |
---|
3123 | - which = (storage_index_b32, shnum) |
---|
3124 | + which = (si_b2a(share.storageindex), share.get_shnum()) |
---|
3125 | self.state["cycle-to-date"]["corrupt-shares"].append(which) |
---|
3126 | wks = (1, 1, 1, "unknown") |
---|
3127 | would_keep_shares.append(wks) |
---|
3128 | hunk ./src/allmydata/storage/expirer.py 127 |
---|
3129 | |
---|
3130 | - sharetype = None |
---|
3131 | + container_type = None |
---|
3132 | if wks: |
---|
3133 | hunk ./src/allmydata/storage/expirer.py 129 |
---|
3134 | - # use the last share's sharetype as the buckettype |
---|
3135 | - sharetype = wks[3] |
---|
3136 | + # use the last share's sharetype as the container type |
---|
3137 | + container_type = wks[3] |
---|
3138 | rec = self.state["cycle-to-date"]["space-recovered"] |
---|
3139 | self.increment(rec, "examined-buckets", 1) |
---|
3140 | if sharetype: |
---|
3141 | hunk ./src/allmydata/storage/expirer.py 134 |
---|
3142 | - self.increment(rec, "examined-buckets-"+sharetype, 1) |
---|
3143 | + self.increment(rec, "examined-buckets-"+container_type, 1) |
---|
3144 | + |
---|
3145 | + container_diskbytes = container.get_overhead() |
---|
3146 | |
---|
3147 | hunk ./src/allmydata/storage/expirer.py 138 |
---|
3148 | - try: |
---|
3149 | - bucket_diskbytes = s.st_blocks * 512 |
---|
3150 | - except AttributeError: |
---|
3151 | - bucket_diskbytes = 0 # no stat().st_blocks on windows |
---|
3152 | if sum([wks[0] for wks in would_keep_shares]) == 0: |
---|
3153 | hunk ./src/allmydata/storage/expirer.py 139 |
---|
3154 | - self.increment_bucketspace("original", bucket_diskbytes, sharetype) |
---|
3155 | + self.increment_container_space("original", container_diskbytes, sharetype) |
---|
3156 | if sum([wks[1] for wks in would_keep_shares]) == 0: |
---|
3157 | hunk ./src/allmydata/storage/expirer.py 141 |
---|
3158 | - self.increment_bucketspace("configured", bucket_diskbytes, sharetype) |
---|
3159 | + self.increment_container_space("configured", container_diskbytes, sharetype) |
---|
3160 | if sum([wks[2] for wks in would_keep_shares]) == 0: |
---|
3161 | hunk ./src/allmydata/storage/expirer.py 143 |
---|
3162 | - self.increment_bucketspace("actual", bucket_diskbytes, sharetype) |
---|
3163 | + self.increment_container_space("actual", container_diskbytes, sharetype) |
---|
3164 | |
---|
3165 | hunk ./src/allmydata/storage/expirer.py 145 |
---|
3166 | - def process_share(self, sharefilename): |
---|
3167 | - # first, find out what kind of a share it is |
---|
3168 | - sf = get_share_file(sharefilename) |
---|
3169 | - sharetype = sf.sharetype |
---|
3170 | + def process_share(self, share): |
---|
3171 | + sharetype = share.sharetype |
---|
3172 | now = time.time() |
---|
3173 | hunk ./src/allmydata/storage/expirer.py 148 |
---|
3174 | - s = self.stat(sharefilename) |
---|
3175 | + sharebytes = share.get_size() |
---|
3176 | + diskbytes = share.get_used_space() |
---|
3177 | |
---|
3178 | num_leases = 0 |
---|
3179 | num_valid_leases_original = 0 |
---|
3180 | hunk ./src/allmydata/storage/expirer.py 156 |
---|
3181 | num_valid_leases_configured = 0 |
---|
3182 | expired_leases_configured = [] |
---|
3183 | |
---|
3184 | - for li in sf.get_leases(): |
---|
3185 | + for li in share.get_leases(): |
---|
3186 | num_leases += 1 |
---|
3187 | original_expiration_time = li.get_expiration_time() |
---|
3188 | grant_renew_time = li.get_grant_renew_time_time() |
---|
3189 | hunk ./src/allmydata/storage/expirer.py 169 |
---|
3190 | |
---|
3191 | # expired-or-not according to our configured age limit |
---|
3192 | expired = False |
---|
3193 | - if self.mode == "age": |
---|
3194 | - age_limit = original_expiration_time |
---|
3195 | - if self.override_lease_duration is not None: |
---|
3196 | - age_limit = self.override_lease_duration |
---|
3197 | - if age > age_limit: |
---|
3198 | - expired = True |
---|
3199 | - else: |
---|
3200 | - assert self.mode == "cutoff-date" |
---|
3201 | - if grant_renew_time < self.cutoff_date: |
---|
3202 | - expired = True |
---|
3203 | - if sharetype not in self.sharetypes_to_expire: |
---|
3204 | - expired = False |
---|
3205 | + if sharetype in self.sharetypes_to_expire: |
---|
3206 | + if self.mode == "age": |
---|
3207 | + age_limit = original_expiration_time |
---|
3208 | + if self.override_lease_duration is not None: |
---|
3209 | + age_limit = self.override_lease_duration |
---|
3210 | + if age > age_limit: |
---|
3211 | + expired = True |
---|
3212 | + else: |
---|
3213 | + assert self.mode == "cutoff-date" |
---|
3214 | + if grant_renew_time < self.cutoff_date: |
---|
3215 | + expired = True |
---|
3216 | |
---|
3217 | if expired: |
---|
3218 | expired_leases_configured.append(li) |
---|
3219 | hunk ./src/allmydata/storage/expirer.py 188 |
---|
3220 | |
---|
3221 | so_far = self.state["cycle-to-date"] |
---|
3222 | self.increment(so_far["leases-per-share-histogram"], num_leases, 1) |
---|
3223 | - self.increment_space("examined", s, sharetype) |
---|
3224 | + self.increment_space("examined", diskbytes, sharetype) |
---|
3225 | |
---|
3226 | would_keep_share = [1, 1, 1, sharetype] |
---|
3227 | |
---|
3228 | hunk ./src/allmydata/storage/expirer.py 194 |
---|
3229 | if self.expiration_enabled: |
---|
3230 | for li in expired_leases_configured: |
---|
3231 | - sf.cancel_lease(li.cancel_secret) |
---|
3232 | + share.cancel_lease(li.cancel_secret) |
---|
3233 | |
---|
3234 | if num_valid_leases_original == 0: |
---|
3235 | would_keep_share[0] = 0 |
---|
3236 | hunk ./src/allmydata/storage/expirer.py 198 |
---|
3237 | - self.increment_space("original", s, sharetype) |
---|
3238 | + self.increment_space("original", sharebytes, diskbytes, sharetype) |
---|
3239 | |
---|
3240 | if num_valid_leases_configured == 0: |
---|
3241 | would_keep_share[1] = 0 |
---|
3242 | hunk ./src/allmydata/storage/expirer.py 202 |
---|
3243 | - self.increment_space("configured", s, sharetype) |
---|
3244 | + self.increment_space("configured", sharebytes, diskbytes, sharetype) |
---|
3245 | if self.expiration_enabled: |
---|
3246 | would_keep_share[2] = 0 |
---|
3247 | hunk ./src/allmydata/storage/expirer.py 205 |
---|
3248 | - self.increment_space("actual", s, sharetype) |
---|
3249 | + self.increment_space("actual", sharebytes, diskbytes, sharetype) |
---|
3250 | |
---|
3251 | return would_keep_share |
---|
3252 | |
---|
3253 | hunk ./src/allmydata/storage/expirer.py 209 |
---|
3254 | - def increment_space(self, a, s, sharetype): |
---|
3255 | - sharebytes = s.st_size |
---|
3256 | - try: |
---|
3257 | - # note that stat(2) says that st_blocks is 512 bytes, and that |
---|
3258 | - # st_blksize is "optimal file sys I/O ops blocksize", which is |
---|
3259 | - # independent of the block-size that st_blocks uses. |
---|
3260 | - diskbytes = s.st_blocks * 512 |
---|
3261 | - except AttributeError: |
---|
3262 | - # the docs say that st_blocks is only on linux. I also see it on |
---|
3263 | - # MacOS. But it isn't available on windows. |
---|
3264 | - diskbytes = sharebytes |
---|
3265 | + def increment_space(self, a, sharebytes, diskbytes, sharetype): |
---|
3266 | so_far_sr = self.state["cycle-to-date"]["space-recovered"] |
---|
3267 | self.increment(so_far_sr, a+"-shares", 1) |
---|
3268 | self.increment(so_far_sr, a+"-sharebytes", sharebytes) |
---|
3269 | hunk ./src/allmydata/storage/expirer.py 219 |
---|
3270 | self.increment(so_far_sr, a+"-sharebytes-"+sharetype, sharebytes) |
---|
3271 | self.increment(so_far_sr, a+"-diskbytes-"+sharetype, diskbytes) |
---|
3272 | |
---|
3273 | - def increment_bucketspace(self, a, bucket_diskbytes, sharetype): |
---|
3274 | + def increment_container_space(self, a, container_diskbytes, container_type): |
---|
3275 | rec = self.state["cycle-to-date"]["space-recovered"] |
---|
3276 | hunk ./src/allmydata/storage/expirer.py 221 |
---|
3277 | - self.increment(rec, a+"-diskbytes", bucket_diskbytes) |
---|
3278 | + self.increment(rec, a+"-diskbytes", container_diskbytes) |
---|
3279 | self.increment(rec, a+"-buckets", 1) |
---|
3280 | hunk ./src/allmydata/storage/expirer.py 223 |
---|
3281 | - if sharetype: |
---|
3282 | - self.increment(rec, a+"-diskbytes-"+sharetype, bucket_diskbytes) |
---|
3283 | - self.increment(rec, a+"-buckets-"+sharetype, 1) |
---|
3284 | + if container_type: |
---|
3285 | + self.increment(rec, a+"-diskbytes-"+container_type, container_diskbytes) |
---|
3286 | + self.increment(rec, a+"-buckets-"+container_type, 1) |
---|
3287 | |
---|
3288 | def increment(self, d, k, delta=1): |
---|
3289 | if k not in d: |
---|
3290 | hunk ./src/allmydata/storage/expirer.py 279 |
---|
3291 | # copy() needs to become a deepcopy |
---|
3292 | h["space-recovered"] = s["space-recovered"].copy() |
---|
3293 | |
---|
3294 | - history = pickle.load(open(self.historyfile, "rb")) |
---|
3295 | + history = pickle.load(self.historyfp.getContent()) |
---|
3296 | history[cycle] = h |
---|
3297 | while len(history) > 10: |
---|
3298 | oldcycles = sorted(history.keys()) |
---|
3299 | hunk ./src/allmydata/storage/expirer.py 284 |
---|
3300 | del history[oldcycles[0]] |
---|
3301 | - f = open(self.historyfile, "wb") |
---|
3302 | - pickle.dump(history, f) |
---|
3303 | - f.close() |
---|
3304 | + self.historyfp.setContent(pickle.dumps(history)) |
---|
3305 | |
---|
3306 | def get_state(self): |
---|
3307 | """In addition to the crawler state described in |
---|
3308 | hunk ./src/allmydata/storage/expirer.py 353 |
---|
3309 | progress = self.get_progress() |
---|
3310 | |
---|
3311 | state = ShareCrawler.get_state(self) # does a shallow copy |
---|
3312 | - history = pickle.load(open(self.historyfile, "rb")) |
---|
3313 | + history = pickle.load(self.historyfp.getContent()) |
---|
3314 | state["history"] = history |
---|
3315 | |
---|
3316 | if not progress["cycle-in-progress"]: |
---|
3317 | hunk ./src/allmydata/storage/lease.py 17 |
---|
3318 | |
---|
3319 | def get_expiration_time(self): |
---|
3320 | return self.expiration_time |
---|
3321 | + |
---|
3322 | def get_grant_renew_time_time(self): |
---|
3323 | # hack, based upon fixed 31day expiration period |
---|
3324 | return self.expiration_time - 31*24*60*60 |
---|
3325 | hunk ./src/allmydata/storage/lease.py 21 |
---|
3326 | + |
---|
3327 | def get_age(self): |
---|
3328 | return time.time() - self.get_grant_renew_time_time() |
---|
3329 | |
---|
3330 | hunk ./src/allmydata/storage/lease.py 32 |
---|
3331 | self.expiration_time) = struct.unpack(">L32s32sL", data) |
---|
3332 | self.nodeid = None |
---|
3333 | return self |
---|
3334 | + |
---|
3335 | def to_immutable_data(self): |
---|
3336 | return struct.pack(">L32s32sL", |
---|
3337 | self.owner_num, |
---|
3338 | hunk ./src/allmydata/storage/lease.py 45 |
---|
3339 | int(self.expiration_time), |
---|
3340 | self.renew_secret, self.cancel_secret, |
---|
3341 | self.nodeid) |
---|
3342 | + |
---|
3343 | def from_mutable_data(self, data): |
---|
3344 | (self.owner_num, |
---|
3345 | self.expiration_time, |
---|
3346 | hunk ./src/allmydata/storage/server.py 1 |
---|
3347 | -import os, re, weakref, struct, time |
---|
3348 | +import weakref, time |
---|
3349 | |
---|
3350 | from foolscap.api import Referenceable |
---|
3351 | from twisted.application import service |
---|
3352 | hunk ./src/allmydata/storage/server.py 8 |
---|
3353 | |
---|
3354 | from zope.interface import implements |
---|
3355 | from allmydata.interfaces import RIStorageServer, IStatsProducer |
---|
3356 | -from allmydata.util import fileutil, idlib, log, time_format |
---|
3357 | +from allmydata.util import idlib, log |
---|
3358 | import allmydata # for __full_version__ |
---|
3359 | |
---|
3360 | hunk ./src/allmydata/storage/server.py 11 |
---|
3361 | -from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir |
---|
3362 | -_pyflakes_hush = [si_b2a, si_a2b, storage_index_to_dir] # re-exported |
---|
3363 | +from allmydata.storage.common import si_a2b, si_b2a |
---|
3364 | +[si_a2b] # hush pyflakes |
---|
3365 | from allmydata.storage.lease import LeaseInfo |
---|
3366 | hunk ./src/allmydata/storage/server.py 14 |
---|
3367 | -from allmydata.storage.mutable import MutableShareFile, EmptyShare, \ |
---|
3368 | - create_mutable_sharefile |
---|
3369 | -from allmydata.storage.immutable import ShareFile, BucketWriter, BucketReader |
---|
3370 | -from allmydata.storage.crawler import BucketCountingCrawler |
---|
3371 | from allmydata.storage.expirer import LeaseCheckingCrawler |
---|
3372 | hunk ./src/allmydata/storage/server.py 15 |
---|
3373 | - |
---|
3374 | -# storage/ |
---|
3375 | -# storage/shares/incoming |
---|
3376 | -# incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will |
---|
3377 | -# be moved to storage/shares/$START/$STORAGEINDEX/$SHARENUM upon success |
---|
3378 | -# storage/shares/$START/$STORAGEINDEX |
---|
3379 | -# storage/shares/$START/$STORAGEINDEX/$SHARENUM |
---|
3380 | - |
---|
3381 | -# Where "$START" denotes the first 10 bits worth of $STORAGEINDEX (that's 2 |
---|
3382 | -# base-32 chars). |
---|
3383 | - |
---|
3384 | -# $SHARENUM matches this regex: |
---|
3385 | -NUM_RE=re.compile("^[0-9]+$") |
---|
3386 | - |
---|
3387 | +from allmydata.storage.crawler import BucketCountingCrawler |
---|
3388 | |
---|
3389 | |
---|
3390 | class StorageServer(service.MultiService, Referenceable): |
---|
3391 | hunk ./src/allmydata/storage/server.py 20 |
---|
3392 | implements(RIStorageServer, IStatsProducer) |
---|
3393 | + |
---|
3394 | name = 'storage' |
---|
3395 | LeaseCheckerClass = LeaseCheckingCrawler |
---|
3396 | hunk ./src/allmydata/storage/server.py 23 |
---|
3397 | + DEFAULT_EXPIRATION_POLICY = { |
---|
3398 | + 'enabled': False, |
---|
3399 | + 'mode': 'age', |
---|
3400 | + 'override_lease_duration': None, |
---|
3401 | + 'cutoff_date': None, |
---|
3402 | + 'sharetypes': ('mutable', 'immutable'), |
---|
3403 | + } |
---|
3404 | |
---|
3405 | hunk ./src/allmydata/storage/server.py 31 |
---|
3406 | - def __init__(self, storedir, nodeid, reserved_space=0, |
---|
3407 | - discard_storage=False, readonly_storage=False, |
---|
3408 | + def __init__(self, nodeid, backend, reserved_space=0, |
---|
3409 | + readonly_storage=False, |
---|
3410 | stats_provider=None, |
---|
3411 | hunk ./src/allmydata/storage/server.py 34 |
---|
3412 | - expiration_enabled=False, |
---|
3413 | - expiration_mode="age", |
---|
3414 | - expiration_override_lease_duration=None, |
---|
3415 | - expiration_cutoff_date=None, |
---|
3416 | - expiration_sharetypes=("mutable", "immutable")): |
---|
3417 | + expiration_policy=None): |
---|
3418 | service.MultiService.__init__(self) |
---|
3419 | assert isinstance(nodeid, str) |
---|
3420 | assert len(nodeid) == 20 |
---|
3421 | hunk ./src/allmydata/storage/server.py 39 |
---|
3422 | self.my_nodeid = nodeid |
---|
3423 | - self.storedir = storedir |
---|
3424 | - sharedir = os.path.join(storedir, "shares") |
---|
3425 | - fileutil.make_dirs(sharedir) |
---|
3426 | - self.sharedir = sharedir |
---|
3427 | - # we don't actually create the corruption-advisory dir until necessary |
---|
3428 | - self.corruption_advisory_dir = os.path.join(storedir, |
---|
3429 | - "corruption-advisories") |
---|
3430 | - self.reserved_space = int(reserved_space) |
---|
3431 | - self.no_storage = discard_storage |
---|
3432 | - self.readonly_storage = readonly_storage |
---|
3433 | self.stats_provider = stats_provider |
---|
3434 | if self.stats_provider: |
---|
3435 | self.stats_provider.register_producer(self) |
---|
3436 | hunk ./src/allmydata/storage/server.py 42 |
---|
3437 | - self.incomingdir = os.path.join(sharedir, 'incoming') |
---|
3438 | - self._clean_incomplete() |
---|
3439 | - fileutil.make_dirs(self.incomingdir) |
---|
3440 | self._active_writers = weakref.WeakKeyDictionary() |
---|
3441 | hunk ./src/allmydata/storage/server.py 43 |
---|
3442 | + self.backend = backend |
---|
3443 | + self.backend.setServiceParent(self) |
---|
3444 | + self.backend.set_storage_server(self) |
---|
3445 | log.msg("StorageServer created", facility="tahoe.storage") |
---|
3446 | |
---|
3447 | hunk ./src/allmydata/storage/server.py 48 |
---|
3448 | - if reserved_space: |
---|
3449 | - if self.get_available_space() is None: |
---|
3450 | - log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", |
---|
3451 | - umin="0wZ27w", level=log.UNUSUAL) |
---|
3452 | - |
---|
3453 | self.latencies = {"allocate": [], # immutable |
---|
3454 | "write": [], |
---|
3455 | "close": [], |
---|
3456 | hunk ./src/allmydata/storage/server.py 59 |
---|
3457 | "renew": [], |
---|
3458 | "cancel": [], |
---|
3459 | } |
---|
3460 | - self.add_bucket_counter() |
---|
3461 | - |
---|
3462 | - statefile = os.path.join(self.storedir, "lease_checker.state") |
---|
3463 | - historyfile = os.path.join(self.storedir, "lease_checker.history") |
---|
3464 | - klass = self.LeaseCheckerClass |
---|
3465 | - self.lease_checker = klass(self, statefile, historyfile, |
---|
3466 | - expiration_enabled, expiration_mode, |
---|
3467 | - expiration_override_lease_duration, |
---|
3468 | - expiration_cutoff_date, |
---|
3469 | - expiration_sharetypes) |
---|
3470 | - self.lease_checker.setServiceParent(self) |
---|
3471 | + self._setup_bucket_counter() |
---|
3472 | + self._setup_lease_checker(expiration_policy or self.DEFAULT_EXPIRATION_POLICY) |
---|
3473 | |
---|
3474 | def __repr__(self): |
---|
3475 | return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),) |
---|
3476 | hunk ./src/allmydata/storage/server.py 65 |
---|
3477 | |
---|
3478 | - def add_bucket_counter(self): |
---|
3479 | - statefile = os.path.join(self.storedir, "bucket_counter.state") |
---|
3480 | - self.bucket_counter = BucketCountingCrawler(self, statefile) |
---|
3481 | + def _setup_bucket_counter(self): |
---|
3482 | + statefp = self.storedir.child("bucket_counter.state") |
---|
3483 | + self.bucket_counter = BucketCountingCrawler(statefp) |
---|
3484 | self.bucket_counter.setServiceParent(self) |
---|
3485 | |
---|
3486 | hunk ./src/allmydata/storage/server.py 70 |
---|
3487 | + def _setup_lease_checker(self, expiration_policy): |
---|
3488 | + statefp = self.storedir.child("lease_checker.state") |
---|
3489 | + historyfp = self.storedir.child("lease_checker.history") |
---|
3490 | + self.lease_checker = self.LeaseCheckerClass(statefp, historyfp, expiration_policy) |
---|
3491 | + self.lease_checker.setServiceParent(self) |
---|
3492 | + |
---|
3493 | def count(self, name, delta=1): |
---|
3494 | if self.stats_provider: |
---|
3495 | self.stats_provider.count("storage_server." + name, delta) |
---|
3496 | hunk ./src/allmydata/storage/server.py 90 |
---|
3497 | """Return a dict, indexed by category, that contains a dict of |
---|
3498 | latency numbers for each category. If there are sufficient samples |
---|
3499 | for unambiguous interpretation, each dict will contain the |
---|
3500 | - following keys: mean, 01_0_percentile, 10_0_percentile, |
---|
3501 | + following keys: samplesize, mean, 01_0_percentile, 10_0_percentile, |
---|
3502 | 50_0_percentile (median), 90_0_percentile, 95_0_percentile, |
---|
3503 | 99_0_percentile, 99_9_percentile. If there are insufficient |
---|
3504 | samples for a given percentile to be interpreted unambiguously |
---|
3505 | hunk ./src/allmydata/storage/server.py 112 |
---|
3506 | else: |
---|
3507 | stats["mean"] = None |
---|
3508 | |
---|
3509 | - orderstatlist = [(0.01, "01_0_percentile", 100), (0.1, "10_0_percentile", 10),\ |
---|
3510 | - (0.50, "50_0_percentile", 10), (0.90, "90_0_percentile", 10),\ |
---|
3511 | - (0.95, "95_0_percentile", 20), (0.99, "99_0_percentile", 100),\ |
---|
3512 | + orderstatlist = [(0.1, "10_0_percentile", 10), (0.5, "50_0_percentile", 10), \ |
---|
3513 | + (0.9, "90_0_percentile", 10), (0.95, "95_0_percentile", 20), \ |
---|
3514 | + (0.01, "01_0_percentile", 100), (0.99, "99_0_percentile", 100),\ |
---|
3515 | (0.999, "99_9_percentile", 1000)] |
---|
3516 | |
---|
3517 | for percentile, percentilestring, minnumtoobserve in orderstatlist: |
---|
3518 | hunk ./src/allmydata/storage/server.py 131 |
---|
3519 | kwargs["facility"] = "tahoe.storage" |
---|
3520 | return log.msg(*args, **kwargs) |
---|
3521 | |
---|
3522 | - def _clean_incomplete(self): |
---|
3523 | - fileutil.rm_dir(self.incomingdir) |
---|
3524 | + def get_serverid(self): |
---|
3525 | + return self.my_nodeid |
---|
3526 | |
---|
3527 | def get_stats(self): |
---|
3528 | # remember: RIStatsProvider requires that our return dict |
---|
3529 | hunk ./src/allmydata/storage/server.py 136 |
---|
3530 | - # contains numeric values. |
---|
3531 | + # contains numeric, or None values. |
---|
3532 | stats = { 'storage_server.allocated': self.allocated_size(), } |
---|
3533 | stats['storage_server.reserved_space'] = self.reserved_space |
---|
3534 | for category,ld in self.get_latencies().items(): |
---|
3535 | hunk ./src/allmydata/storage/server.py 143 |
---|
3536 | for name,v in ld.items(): |
---|
3537 | stats['storage_server.latencies.%s.%s' % (category, name)] = v |
---|
3538 | |
---|
3539 | - try: |
---|
3540 | - disk = fileutil.get_disk_stats(self.sharedir, self.reserved_space) |
---|
3541 | - writeable = disk['avail'] > 0 |
---|
3542 | + self.backend.fill_in_space_stats(stats) |
---|
3543 | |
---|
3544 | hunk ./src/allmydata/storage/server.py 145 |
---|
3545 | - # spacetime predictors should use disk_avail / (d(disk_used)/dt) |
---|
3546 | - stats['storage_server.disk_total'] = disk['total'] |
---|
3547 | - stats['storage_server.disk_used'] = disk['used'] |
---|
3548 | - stats['storage_server.disk_free_for_root'] = disk['free_for_root'] |
---|
3549 | - stats['storage_server.disk_free_for_nonroot'] = disk['free_for_nonroot'] |
---|
3550 | - stats['storage_server.disk_avail'] = disk['avail'] |
---|
3551 | - except AttributeError: |
---|
3552 | - writeable = True |
---|
3553 | - except EnvironmentError: |
---|
3554 | - log.msg("OS call to get disk statistics failed", level=log.UNUSUAL) |
---|
3555 | - writeable = False |
---|
3556 | - |
---|
3557 | - if self.readonly_storage: |
---|
3558 | - stats['storage_server.disk_avail'] = 0 |
---|
3559 | - writeable = False |
---|
3560 | - |
---|
3561 | - stats['storage_server.accepting_immutable_shares'] = int(writeable) |
---|
3562 | s = self.bucket_counter.get_state() |
---|
3563 | bucket_count = s.get("last-complete-bucket-count") |
---|
3564 | if bucket_count: |
---|
3565 | hunk ./src/allmydata/storage/server.py 152 |
---|
3566 | return stats |
---|
3567 | |
---|
3568 | def get_available_space(self): |
---|
3569 | - """Returns available space for share storage in bytes, or None if no |
---|
3570 | - API to get this information is available.""" |
---|
3571 | - |
---|
3572 | - if self.readonly_storage: |
---|
3573 | - return 0 |
---|
3574 | - return fileutil.get_available_space(self.sharedir, self.reserved_space) |
---|
3575 | + return self.backend.get_available_space() |
---|
3576 | |
---|
3577 | def allocated_size(self): |
---|
3578 | space = 0 |
---|
3579 | hunk ./src/allmydata/storage/server.py 161 |
---|
3580 | return space |
---|
3581 | |
---|
3582 | def remote_get_version(self): |
---|
3583 | - remaining_space = self.get_available_space() |
---|
3584 | + remaining_space = self.backend.get_available_space() |
---|
3585 | if remaining_space is None: |
---|
3586 | # We're on a platform that has no API to get disk stats. |
---|
3587 | remaining_space = 2**64 |
---|
3588 | hunk ./src/allmydata/storage/server.py 177 |
---|
3589 | } |
---|
3590 | return version |
---|
3591 | |
---|
3592 | - def remote_allocate_buckets(self, storage_index, |
---|
3593 | + def remote_allocate_buckets(self, storageindex, |
---|
3594 | renew_secret, cancel_secret, |
---|
3595 | sharenums, allocated_size, |
---|
3596 | canary, owner_num=0): |
---|
3597 | hunk ./src/allmydata/storage/server.py 181 |
---|
3598 | + # cancel_secret is no longer used. |
---|
3599 | # owner_num is not for clients to set, but rather it should be |
---|
3600 | hunk ./src/allmydata/storage/server.py 183 |
---|
3601 | - # curried into the PersonalStorageServer instance that is dedicated |
---|
3602 | - # to a particular owner. |
---|
3603 | + # curried into a StorageServer instance dedicated to a particular |
---|
3604 | + # owner. |
---|
3605 | start = time.time() |
---|
3606 | self.count("allocate") |
---|
3607 | hunk ./src/allmydata/storage/server.py 187 |
---|
3608 | - alreadygot = set() |
---|
3609 | + incoming = set() |
---|
3610 | bucketwriters = {} # k: shnum, v: BucketWriter |
---|
3611 | hunk ./src/allmydata/storage/server.py 189 |
---|
3612 | - si_dir = storage_index_to_dir(storage_index) |
---|
3613 | - si_s = si_b2a(storage_index) |
---|
3614 | |
---|
3615 | hunk ./src/allmydata/storage/server.py 190 |
---|
3616 | + si_s = si_b2a(storageindex) |
---|
3617 | log.msg("storage: allocate_buckets %s" % si_s) |
---|
3618 | |
---|
3619 | hunk ./src/allmydata/storage/server.py 193 |
---|
3620 | - # in this implementation, the lease information (including secrets) |
---|
3621 | - # goes into the share files themselves. It could also be put into a |
---|
3622 | - # separate database. Note that the lease should not be added until |
---|
3623 | - # the BucketWriter has been closed. |
---|
3624 | + # Note that the lease should not be added until the BucketWriter |
---|
3625 | + # has been closed. |
---|
3626 | expire_time = time.time() + 31*24*60*60 |
---|
3627 | hunk ./src/allmydata/storage/server.py 196 |
---|
3628 | - lease_info = LeaseInfo(owner_num, |
---|
3629 | - renew_secret, cancel_secret, |
---|
3630 | + lease_info = LeaseInfo(owner_num, renew_secret, |
---|
3631 | expire_time, self.my_nodeid) |
---|
3632 | |
---|
3633 | max_space_per_bucket = allocated_size |
---|
3634 | hunk ./src/allmydata/storage/server.py 201 |
---|
3635 | |
---|
3636 | - remaining_space = self.get_available_space() |
---|
3637 | + remaining_space = self.backend.get_available_space() |
---|
3638 | limited = remaining_space is not None |
---|
3639 | if limited: |
---|
3640 | hunk ./src/allmydata/storage/server.py 204 |
---|
3641 | - # this is a bit conservative, since some of this allocated_size() |
---|
3642 | - # has already been written to disk, where it will show up in |
---|
3643 | + # This is a bit conservative, since some of this allocated_size() |
---|
3644 | + # has already been written to the backend, where it will show up in |
---|
3645 | # get_available_space. |
---|
3646 | remaining_space -= self.allocated_size() |
---|
3647 | # self.readonly_storage causes remaining_space <= 0 |
---|
3648 | hunk ./src/allmydata/storage/server.py 210 |
---|
3649 | |
---|
3650 | - # fill alreadygot with all shares that we have, not just the ones |
---|
3651 | + # Fill alreadygot with all shares that we have, not just the ones |
---|
3652 | # they asked about: this will save them a lot of work. Add or update |
---|
3653 | # leases for all of them: if they want us to hold shares for this |
---|
3654 | hunk ./src/allmydata/storage/server.py 213 |
---|
3655 | - # file, they'll want us to hold leases for this file. |
---|
3656 | - for (shnum, fn) in self._get_bucket_shares(storage_index): |
---|
3657 | - alreadygot.add(shnum) |
---|
3658 | - sf = ShareFile(fn) |
---|
3659 | - sf.add_or_renew_lease(lease_info) |
---|
3660 | + # file, they'll want us to hold leases for all the shares of it. |
---|
3661 | + # |
---|
3662 | + # XXX should we be making the assumption here that lease info is |
---|
3663 | + # duplicated in all shares? |
---|
3664 | + alreadygot = set() |
---|
3665 | + for share in self.backend.get_shares(storageindex): |
---|
3666 | + share.add_or_renew_lease(lease_info) |
---|
3667 | + alreadygot.add(share.shnum) |
---|
3668 | |
---|
3669 | hunk ./src/allmydata/storage/server.py 222 |
---|
3670 | - for shnum in sharenums: |
---|
3671 | - incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum) |
---|
3672 | - finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum) |
---|
3673 | - if os.path.exists(finalhome): |
---|
3674 | - # great! we already have it. easy. |
---|
3675 | - pass |
---|
3676 | - elif os.path.exists(incominghome): |
---|
3677 | - # Note that we don't create BucketWriters for shnums that |
---|
3678 | - # have a partial share (in incoming/), so if a second upload |
---|
3679 | - # occurs while the first is still in progress, the second |
---|
3680 | - # uploader will use different storage servers. |
---|
3681 | - pass |
---|
3682 | - elif (not limited) or (remaining_space >= max_space_per_bucket): |
---|
3683 | - # ok! we need to create the new share file. |
---|
3684 | - bw = BucketWriter(self, incominghome, finalhome, |
---|
3685 | - max_space_per_bucket, lease_info, canary) |
---|
3686 | - if self.no_storage: |
---|
3687 | - bw.throw_out_all_data = True |
---|
3688 | + # all share numbers that are incoming |
---|
3689 | + incoming = self.backend.get_incoming_shnums(storageindex) |
---|
3690 | + |
---|
3691 | + for shnum in ((sharenums - alreadygot) - incoming): |
---|
3692 | + if (not limited) or (remaining_space >= max_space_per_bucket): |
---|
3693 | + bw = self.backend.make_bucket_writer(storageindex, shnum, max_space_per_bucket, |
---|
3694 | + lease_info, canary) |
---|
3695 | bucketwriters[shnum] = bw |
---|
3696 | self._active_writers[bw] = 1 |
---|
3697 | if limited: |
---|
3698 | hunk ./src/allmydata/storage/server.py 234 |
---|
3699 | remaining_space -= max_space_per_bucket |
---|
3700 | else: |
---|
3701 | - # bummer! not enough space to accept this bucket |
---|
3702 | + # Bummer not enough space to accept this share. |
---|
3703 | pass |
---|
3704 | |
---|
3705 | hunk ./src/allmydata/storage/server.py 237 |
---|
3706 | - if bucketwriters: |
---|
3707 | - fileutil.make_dirs(os.path.join(self.sharedir, si_dir)) |
---|
3708 | - |
---|
3709 | self.add_latency("allocate", time.time() - start) |
---|
3710 | return alreadygot, bucketwriters |
---|
3711 | |
---|
3712 | hunk ./src/allmydata/storage/server.py 240 |
---|
3713 | - def _iter_share_files(self, storage_index): |
---|
3714 | - for shnum, filename in self._get_bucket_shares(storage_index): |
---|
3715 | - f = open(filename, 'rb') |
---|
3716 | - header = f.read(32) |
---|
3717 | - f.close() |
---|
3718 | - if header[:32] == MutableShareFile.MAGIC: |
---|
3719 | - sf = MutableShareFile(filename, self) |
---|
3720 | - # note: if the share has been migrated, the renew_lease() |
---|
3721 | - # call will throw an exception, with information to help the |
---|
3722 | - # client update the lease. |
---|
3723 | - elif header[:4] == struct.pack(">L", 1): |
---|
3724 | - sf = ShareFile(filename) |
---|
3725 | - else: |
---|
3726 | - continue # non-sharefile |
---|
3727 | - yield sf |
---|
3728 | - |
---|
3729 | - def remote_add_lease(self, storage_index, renew_secret, cancel_secret, |
---|
3730 | + def remote_add_lease(self, storageindex, renew_secret, cancel_secret, |
---|
3731 | owner_num=1): |
---|
3732 | hunk ./src/allmydata/storage/server.py 242 |
---|
3733 | + # cancel_secret is no longer used. |
---|
3734 | start = time.time() |
---|
3735 | self.count("add-lease") |
---|
3736 | new_expire_time = time.time() + 31*24*60*60 |
---|
3737 | hunk ./src/allmydata/storage/server.py 246 |
---|
3738 | - lease_info = LeaseInfo(owner_num, |
---|
3739 | - renew_secret, cancel_secret, |
---|
3740 | + lease_info = LeaseInfo(owner_num, renew_secret, |
---|
3741 | new_expire_time, self.my_nodeid) |
---|
3742 | hunk ./src/allmydata/storage/server.py 248 |
---|
3743 | - for sf in self._iter_share_files(storage_index): |
---|
3744 | - sf.add_or_renew_lease(lease_info) |
---|
3745 | - self.add_latency("add-lease", time.time() - start) |
---|
3746 | - return None |
---|
3747 | |
---|
3748 | hunk ./src/allmydata/storage/server.py 249 |
---|
3749 | - def remote_renew_lease(self, storage_index, renew_secret): |
---|
3750 | + try: |
---|
3751 | + self.backend.add_or_renew_lease(lease_info) |
---|
3752 | + finally: |
---|
3753 | + self.add_latency("add-lease", time.time() - start) |
---|
3754 | + |
---|
3755 | + def remote_renew_lease(self, storageindex, renew_secret): |
---|
3756 | start = time.time() |
---|
3757 | self.count("renew") |
---|
3758 | hunk ./src/allmydata/storage/server.py 257 |
---|
3759 | - new_expire_time = time.time() + 31*24*60*60 |
---|
3760 | - found_buckets = False |
---|
3761 | - for sf in self._iter_share_files(storage_index): |
---|
3762 | - found_buckets = True |
---|
3763 | - sf.renew_lease(renew_secret, new_expire_time) |
---|
3764 | - self.add_latency("renew", time.time() - start) |
---|
3765 | - if not found_buckets: |
---|
3766 | - raise IndexError("no such lease to renew") |
---|
3767 | + |
---|
3768 | + try: |
---|
3769 | + shareset = self.backend.get_shareset(storageindex) |
---|
3770 | + new_expiration_time = start + 31*24*60*60 # one month from now |
---|
3771 | + shareset.renew_lease(renew_secret, new_expiration_time) |
---|
3772 | + finally: |
---|
3773 | + self.add_latency("renew", time.time() - start) |
---|
3774 | |
---|
3775 | def bucket_writer_closed(self, bw, consumed_size): |
---|
3776 | if self.stats_provider: |
---|
3777 | hunk ./src/allmydata/storage/server.py 270 |
---|
3778 | self.stats_provider.count('storage_server.bytes_added', consumed_size) |
---|
3779 | del self._active_writers[bw] |
---|
3780 | |
---|
3781 | - def _get_bucket_shares(self, storage_index): |
---|
3782 | - """Return a list of (shnum, pathname) tuples for files that hold |
---|
3783 | - shares for this storage_index. In each tuple, 'shnum' will always be |
---|
3784 | - the integer form of the last component of 'pathname'.""" |
---|
3785 | - storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index)) |
---|
3786 | - try: |
---|
3787 | - for f in os.listdir(storagedir): |
---|
3788 | - if NUM_RE.match(f): |
---|
3789 | - filename = os.path.join(storagedir, f) |
---|
3790 | - yield (int(f), filename) |
---|
3791 | - except OSError: |
---|
3792 | - # Commonly caused by there being no buckets at all. |
---|
3793 | - pass |
---|
3794 | - |
---|
3795 | - def remote_get_buckets(self, storage_index): |
---|
3796 | + def remote_get_buckets(self, storageindex): |
---|
3797 | start = time.time() |
---|
3798 | self.count("get") |
---|
3799 | hunk ./src/allmydata/storage/server.py 273 |
---|
3800 | - si_s = si_b2a(storage_index) |
---|
3801 | + si_s = si_b2a(storageindex) |
---|
3802 | log.msg("storage: get_buckets %s" % si_s) |
---|
3803 | bucketreaders = {} # k: sharenum, v: BucketReader |
---|
3804 | hunk ./src/allmydata/storage/server.py 276 |
---|
3805 | - for shnum, filename in self._get_bucket_shares(storage_index): |
---|
3806 | - bucketreaders[shnum] = BucketReader(self, filename, |
---|
3807 | - storage_index, shnum) |
---|
3808 | - self.add_latency("get", time.time() - start) |
---|
3809 | - return bucketreaders |
---|
3810 | |
---|
3811 | hunk ./src/allmydata/storage/server.py 277 |
---|
3812 | - def get_leases(self, storage_index): |
---|
3813 | - """Provide an iterator that yields all of the leases attached to this |
---|
3814 | - bucket. Each lease is returned as a LeaseInfo instance. |
---|
3815 | + try: |
---|
3816 | + shareset = self.backend.get_shareset(storageindex) |
---|
3817 | + for share in shareset.get_shares(storageindex): |
---|
3818 | + bucketreaders[share.get_shnum()] = self.backend.make_bucket_reader(self, share) |
---|
3819 | + return bucketreaders |
---|
3820 | + finally: |
---|
3821 | + self.add_latency("get", time.time() - start) |
---|
3822 | |
---|
3823 | hunk ./src/allmydata/storage/server.py 285 |
---|
3824 | - This method is not for client use. |
---|
3825 | + def get_leases(self, storageindex): |
---|
3826 | """ |
---|
3827 | hunk ./src/allmydata/storage/server.py 287 |
---|
3828 | + Provide an iterator that yields all of the leases attached to this |
---|
3829 | + bucket. Each lease is returned as a LeaseInfo instance. |
---|
3830 | |
---|
3831 | hunk ./src/allmydata/storage/server.py 290 |
---|
3832 | - # since all shares get the same lease data, we just grab the leases |
---|
3833 | - # from the first share |
---|
3834 | - try: |
---|
3835 | - shnum, filename = self._get_bucket_shares(storage_index).next() |
---|
3836 | - sf = ShareFile(filename) |
---|
3837 | - return sf.get_leases() |
---|
3838 | - except StopIteration: |
---|
3839 | - return iter([]) |
---|
3840 | + This method is not for client use. XXX do we need it at all? |
---|
3841 | + """ |
---|
3842 | + return self.backend.get_shareset(storageindex).get_leases() |
---|
3843 | |
---|
3844 | hunk ./src/allmydata/storage/server.py 294 |
---|
3845 | - def remote_slot_testv_and_readv_and_writev(self, storage_index, |
---|
3846 | + def remote_slot_testv_and_readv_and_writev(self, storageindex, |
---|
3847 | secrets, |
---|
3848 | test_and_write_vectors, |
---|
3849 | read_vector): |
---|
3850 | hunk ./src/allmydata/storage/server.py 300 |
---|
3851 | start = time.time() |
---|
3852 | self.count("writev") |
---|
3853 | - si_s = si_b2a(storage_index) |
---|
3854 | + si_s = si_b2a(storageindex) |
---|
3855 | log.msg("storage: slot_writev %s" % si_s) |
---|
3856 | hunk ./src/allmydata/storage/server.py 302 |
---|
3857 | - si_dir = storage_index_to_dir(storage_index) |
---|
3858 | - (write_enabler, renew_secret, cancel_secret) = secrets |
---|
3859 | - # shares exist if there is a file for them |
---|
3860 | - bucketdir = os.path.join(self.sharedir, si_dir) |
---|
3861 | - shares = {} |
---|
3862 | - if os.path.isdir(bucketdir): |
---|
3863 | - for sharenum_s in os.listdir(bucketdir): |
---|
3864 | - try: |
---|
3865 | - sharenum = int(sharenum_s) |
---|
3866 | - except ValueError: |
---|
3867 | - continue |
---|
3868 | - filename = os.path.join(bucketdir, sharenum_s) |
---|
3869 | - msf = MutableShareFile(filename, self) |
---|
3870 | - msf.check_write_enabler(write_enabler, si_s) |
---|
3871 | - shares[sharenum] = msf |
---|
3872 | - # write_enabler is good for all existing shares. |
---|
3873 | - |
---|
3874 | - # Now evaluate test vectors. |
---|
3875 | - testv_is_good = True |
---|
3876 | - for sharenum in test_and_write_vectors: |
---|
3877 | - (testv, datav, new_length) = test_and_write_vectors[sharenum] |
---|
3878 | - if sharenum in shares: |
---|
3879 | - if not shares[sharenum].check_testv(testv): |
---|
3880 | - self.log("testv failed: [%d]: %r" % (sharenum, testv)) |
---|
3881 | - testv_is_good = False |
---|
3882 | - break |
---|
3883 | - else: |
---|
3884 | - # compare the vectors against an empty share, in which all |
---|
3885 | - # reads return empty strings. |
---|
3886 | - if not EmptyShare().check_testv(testv): |
---|
3887 | - self.log("testv failed (empty): [%d] %r" % (sharenum, |
---|
3888 | - testv)) |
---|
3889 | - testv_is_good = False |
---|
3890 | - break |
---|
3891 | |
---|
3892 | hunk ./src/allmydata/storage/server.py 303 |
---|
3893 | - # now gather the read vectors, before we do any writes |
---|
3894 | - read_data = {} |
---|
3895 | - for sharenum, share in shares.items(): |
---|
3896 | - read_data[sharenum] = share.readv(read_vector) |
---|
3897 | - |
---|
3898 | - ownerid = 1 # TODO |
---|
3899 | - expire_time = time.time() + 31*24*60*60 # one month |
---|
3900 | - lease_info = LeaseInfo(ownerid, |
---|
3901 | - renew_secret, cancel_secret, |
---|
3902 | - expire_time, self.my_nodeid) |
---|
3903 | - |
---|
3904 | - if testv_is_good: |
---|
3905 | - # now apply the write vectors |
---|
3906 | - for sharenum in test_and_write_vectors: |
---|
3907 | - (testv, datav, new_length) = test_and_write_vectors[sharenum] |
---|
3908 | - if new_length == 0: |
---|
3909 | - if sharenum in shares: |
---|
3910 | - shares[sharenum].unlink() |
---|
3911 | - else: |
---|
3912 | - if sharenum not in shares: |
---|
3913 | - # allocate a new share |
---|
3914 | - allocated_size = 2000 # arbitrary, really |
---|
3915 | - share = self._allocate_slot_share(bucketdir, secrets, |
---|
3916 | - sharenum, |
---|
3917 | - allocated_size, |
---|
3918 | - owner_num=0) |
---|
3919 | - shares[sharenum] = share |
---|
3920 | - shares[sharenum].writev(datav, new_length) |
---|
3921 | - # and update the lease |
---|
3922 | - shares[sharenum].add_or_renew_lease(lease_info) |
---|
3923 | - |
---|
3924 | - if new_length == 0: |
---|
3925 | - # delete empty bucket directories |
---|
3926 | - if not os.listdir(bucketdir): |
---|
3927 | - os.rmdir(bucketdir) |
---|
3928 | - |
---|
3929 | - |
---|
3930 | - # all done |
---|
3931 | - self.add_latency("writev", time.time() - start) |
---|
3932 | - return (testv_is_good, read_data) |
---|
3933 | - |
---|
3934 | - def _allocate_slot_share(self, bucketdir, secrets, sharenum, |
---|
3935 | - allocated_size, owner_num=0): |
---|
3936 | - (write_enabler, renew_secret, cancel_secret) = secrets |
---|
3937 | - my_nodeid = self.my_nodeid |
---|
3938 | - fileutil.make_dirs(bucketdir) |
---|
3939 | - filename = os.path.join(bucketdir, "%d" % sharenum) |
---|
3940 | - share = create_mutable_sharefile(filename, my_nodeid, write_enabler, |
---|
3941 | - self) |
---|
3942 | - return share |
---|
3943 | + try: |
---|
3944 | + shareset = self.backend.get_shareset(storageindex) |
---|
3945 | + expiration_time = start + 31*24*60*60 # one month from now |
---|
3946 | + return shareset.testv_and_readv_and_writev(self, secrets, test_and_write_vectors, |
---|
3947 | + read_vector, expiration_time) |
---|
3948 | + finally: |
---|
3949 | + self.add_latency("writev", time.time() - start) |
---|
3950 | |
---|
3951 | hunk ./src/allmydata/storage/server.py 311 |
---|
3952 | - def remote_slot_readv(self, storage_index, shares, readv): |
---|
3953 | + def remote_slot_readv(self, storageindex, shares, readv): |
---|
3954 | start = time.time() |
---|
3955 | self.count("readv") |
---|
3956 | hunk ./src/allmydata/storage/server.py 314 |
---|
3957 | - si_s = si_b2a(storage_index) |
---|
3958 | - lp = log.msg("storage: slot_readv %s %s" % (si_s, shares), |
---|
3959 | - facility="tahoe.storage", level=log.OPERATIONAL) |
---|
3960 | - si_dir = storage_index_to_dir(storage_index) |
---|
3961 | - # shares exist if there is a file for them |
---|
3962 | - bucketdir = os.path.join(self.sharedir, si_dir) |
---|
3963 | - if not os.path.isdir(bucketdir): |
---|
3964 | + si_s = si_b2a(storageindex) |
---|
3965 | + log.msg("storage: slot_readv %s %s" % (si_s, shares), |
---|
3966 | + facility="tahoe.storage", level=log.OPERATIONAL) |
---|
3967 | + |
---|
3968 | + try: |
---|
3969 | + shareset = self.backend.get_shareset(storageindex) |
---|
3970 | + return shareset.readv(self, shares, readv) |
---|
3971 | + finally: |
---|
3972 | self.add_latency("readv", time.time() - start) |
---|
3973 | hunk ./src/allmydata/storage/server.py 323 |
---|
3974 | - return {} |
---|
3975 | - datavs = {} |
---|
3976 | - for sharenum_s in os.listdir(bucketdir): |
---|
3977 | - try: |
---|
3978 | - sharenum = int(sharenum_s) |
---|
3979 | - except ValueError: |
---|
3980 | - continue |
---|
3981 | - if sharenum in shares or not shares: |
---|
3982 | - filename = os.path.join(bucketdir, sharenum_s) |
---|
3983 | - msf = MutableShareFile(filename, self) |
---|
3984 | - datavs[sharenum] = msf.readv(readv) |
---|
3985 | - log.msg("returning shares %s" % (datavs.keys(),), |
---|
3986 | - facility="tahoe.storage", level=log.NOISY, parent=lp) |
---|
3987 | - self.add_latency("readv", time.time() - start) |
---|
3988 | - return datavs |
---|
3989 | |
---|
3990 | hunk ./src/allmydata/storage/server.py 324 |
---|
3991 | - def remote_advise_corrupt_share(self, share_type, storage_index, shnum, |
---|
3992 | - reason): |
---|
3993 | - fileutil.make_dirs(self.corruption_advisory_dir) |
---|
3994 | - now = time_format.iso_utc(sep="T") |
---|
3995 | - si_s = si_b2a(storage_index) |
---|
3996 | - # windows can't handle colons in the filename |
---|
3997 | - fn = os.path.join(self.corruption_advisory_dir, |
---|
3998 | - "%s--%s-%d" % (now, si_s, shnum)).replace(":","") |
---|
3999 | - f = open(fn, "w") |
---|
4000 | - f.write("report: Share Corruption\n") |
---|
4001 | - f.write("type: %s\n" % share_type) |
---|
4002 | - f.write("storage_index: %s\n" % si_s) |
---|
4003 | - f.write("share_number: %d\n" % shnum) |
---|
4004 | - f.write("\n") |
---|
4005 | - f.write(reason) |
---|
4006 | - f.write("\n") |
---|
4007 | - f.close() |
---|
4008 | - log.msg(format=("client claims corruption in (%(share_type)s) " + |
---|
4009 | - "%(si)s-%(shnum)d: %(reason)s"), |
---|
4010 | - share_type=share_type, si=si_s, shnum=shnum, reason=reason, |
---|
4011 | - level=log.SCARY, umid="SGx2fA") |
---|
4012 | - return None |
---|
4013 | + def remote_advise_corrupt_share(self, share_type, storage_index, shnum, reason): |
---|
4014 | + self.backend.advise_corrupt_share(share_type, storage_index, shnum, reason) |
---|
4015 | hunk ./src/allmydata/storage/shares.py 1 |
---|
4016 | -#! /usr/bin/python |
---|
4017 | - |
---|
4018 | -from allmydata.storage.mutable import MutableShareFile |
---|
4019 | -from allmydata.storage.immutable import ShareFile |
---|
4020 | - |
---|
4021 | -def get_share_file(filename): |
---|
4022 | - f = open(filename, "rb") |
---|
4023 | - prefix = f.read(32) |
---|
4024 | - f.close() |
---|
4025 | - if prefix == MutableShareFile.MAGIC: |
---|
4026 | - return MutableShareFile(filename) |
---|
4027 | - # otherwise assume it's immutable |
---|
4028 | - return ShareFile(filename) |
---|
4029 | - |
---|
4030 | rmfile ./src/allmydata/storage/shares.py |
---|
4031 | hunk ./src/allmydata/test/common.py 20 |
---|
4032 | from allmydata.mutable.common import CorruptShareError |
---|
4033 | from allmydata.mutable.layout import unpack_header |
---|
4034 | from allmydata.mutable.publish import MutableData |
---|
4035 | -from allmydata.storage.mutable import MutableShareFile |
---|
4036 | +from allmydata.storage.backends.disk.mutable import MutableShareFile |
---|
4037 | from allmydata.util import hashutil, log, fileutil, pollmixin |
---|
4038 | from allmydata.util.assertutil import precondition |
---|
4039 | from allmydata.util.consumer import download_to_data |
---|
4040 | replace ./src/allmydata/test/common.py [A-Za-z_0-9] MutableShareFile MutableDiskShare |
---|
4041 | hunk ./src/allmydata/test/no_network.py 25 |
---|
4042 | from base64 import b32encode |
---|
4043 | from allmydata import uri as tahoe_uri |
---|
4044 | from allmydata.client import Client |
---|
4045 | -from allmydata.storage.server import StorageServer, storage_index_to_dir |
---|
4046 | +from allmydata.storage.server import StorageServer |
---|
4047 | from allmydata.util import fileutil, idlib, hashutil |
---|
4048 | from allmydata.util.hashutil import sha1 |
---|
4049 | from allmydata.test.common_web import HTTPClientGETFactory |
---|
4050 | hunk ./src/allmydata/test/no_network.py 152 |
---|
4051 | seed = server.get_permutation_seed() |
---|
4052 | return sha1(peer_selection_index + seed).digest() |
---|
4053 | return sorted(self.get_connected_servers(), key=_permuted) |
---|
4054 | + |
---|
4055 | def get_connected_servers(self): |
---|
4056 | return self.client._servers |
---|
4057 | hunk ./src/allmydata/test/no_network.py 155 |
---|
4058 | + |
---|
4059 | def get_nickname_for_serverid(self, serverid): |
---|
4060 | return None |
---|
4061 | |
---|
4062 | hunk ./src/allmydata/test/no_network.py 159 |
---|
4063 | + def get_known_servers(self): |
---|
4064 | + return self.get_connected_servers() |
---|
4065 | + |
---|
4066 | + def get_all_serverids(self): |
---|
4067 | + return self.client.get_all_serverids() |
---|
4068 | + |
---|
4069 | + |
---|
4070 | class NoNetworkClient(Client): |
---|
4071 | def create_tub(self): |
---|
4072 | pass |
---|
4073 | hunk ./src/allmydata/test/no_network.py 342 |
---|
4074 | def get_clientdir(self, i=0): |
---|
4075 | return self.g.clients[i].basedir |
---|
4076 | |
---|
4077 | + def get_server(self, i): |
---|
4078 | + return self.g.servers_by_number[i] |
---|
4079 | + |
---|
4080 | def get_serverdir(self, i): |
---|
4081 | hunk ./src/allmydata/test/no_network.py 346 |
---|
4082 | - return self.g.servers_by_number[i].storedir |
---|
4083 | + return self.g.servers_by_number[i].backend.storedir |
---|
4084 | |
---|
4085 | def iterate_servers(self): |
---|
4086 | for i in sorted(self.g.servers_by_number.keys()): |
---|
4087 | hunk ./src/allmydata/test/no_network.py 351 |
---|
4088 | ss = self.g.servers_by_number[i] |
---|
4089 | - yield (i, ss, ss.storedir) |
---|
4090 | + yield (i, ss, ss.backend.storedir) |
---|
4091 | |
---|
4092 | def find_uri_shares(self, uri): |
---|
4093 | si = tahoe_uri.from_string(uri).get_storage_index() |
---|
4094 | hunk ./src/allmydata/test/no_network.py 355 |
---|
4095 | - prefixdir = storage_index_to_dir(si) |
---|
4096 | shares = [] |
---|
4097 | for i,ss in self.g.servers_by_number.items(): |
---|
4098 | hunk ./src/allmydata/test/no_network.py 357 |
---|
4099 | - serverid = ss.my_nodeid |
---|
4100 | - basedir = os.path.join(ss.sharedir, prefixdir) |
---|
4101 | - if not os.path.exists(basedir): |
---|
4102 | - continue |
---|
4103 | - for f in os.listdir(basedir): |
---|
4104 | - try: |
---|
4105 | - shnum = int(f) |
---|
4106 | - shares.append((shnum, serverid, os.path.join(basedir, f))) |
---|
4107 | - except ValueError: |
---|
4108 | - pass |
---|
4109 | + for share in ss.backend.get_shareset(si).get_shares(): |
---|
4110 | + shares.append((share.get_shnum(), ss.get_serverid(), share._home)) |
---|
4111 | return sorted(shares) |
---|
4112 | |
---|
4113 | hunk ./src/allmydata/test/no_network.py 361 |
---|
4114 | + def count_leases(self, uri): |
---|
4115 | + """Return (filename, leasecount) pairs in arbitrary order.""" |
---|
4116 | + si = tahoe_uri.from_string(uri).get_storage_index() |
---|
4117 | + lease_counts = [] |
---|
4118 | + for i,ss in self.g.servers_by_number.items(): |
---|
4119 | + for share in ss.backend.get_shareset(si).get_shares(): |
---|
4120 | + num_leases = len(list(share.get_leases())) |
---|
4121 | + lease_counts.append( (share._home.path, num_leases) ) |
---|
4122 | + return lease_counts |
---|
4123 | + |
---|
4124 | def copy_shares(self, uri): |
---|
4125 | shares = {} |
---|
4126 | hunk ./src/allmydata/test/no_network.py 373 |
---|
4127 | - for (shnum, serverid, sharefile) in self.find_uri_shares(uri): |
---|
4128 | - shares[sharefile] = open(sharefile, "rb").read() |
---|
4129 | + for (shnum, serverid, sharefp) in self.find_uri_shares(uri): |
---|
4130 | + shares[sharefp.path] = sharefp.getContent() |
---|
4131 | return shares |
---|
4132 | |
---|
4133 | hunk ./src/allmydata/test/no_network.py 377 |
---|
4134 | + def copy_share(self, from_share, uri, to_server): |
---|
4135 | + si = uri.from_string(self.uri).get_storage_index() |
---|
4136 | + (i_shnum, i_serverid, i_sharefp) = from_share |
---|
4137 | + shares_dir = to_server.backend.get_shareset(si)._sharehomedir |
---|
4138 | + i_sharefp.copyTo(shares_dir.child(str(i_shnum))) |
---|
4139 | + |
---|
4140 | def restore_all_shares(self, shares): |
---|
4141 | hunk ./src/allmydata/test/no_network.py 384 |
---|
4142 | - for sharefile, data in shares.items(): |
---|
4143 | - open(sharefile, "wb").write(data) |
---|
4144 | + for share, data in shares.items(): |
---|
4145 | + share.home.setContent(data) |
---|
4146 | |
---|
4147 | hunk ./src/allmydata/test/no_network.py 387 |
---|
4148 | - def delete_share(self, (shnum, serverid, sharefile)): |
---|
4149 | - os.unlink(sharefile) |
---|
4150 | + def delete_share(self, (shnum, serverid, sharefp)): |
---|
4151 | + sharefp.remove() |
---|
4152 | |
---|
4153 | def delete_shares_numbered(self, uri, shnums): |
---|
4154 | hunk ./src/allmydata/test/no_network.py 391 |
---|
4155 | - for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): |
---|
4156 | + for (i_shnum, i_serverid, i_sharefp) in self.find_uri_shares(uri): |
---|
4157 | if i_shnum in shnums: |
---|
4158 | hunk ./src/allmydata/test/no_network.py 393 |
---|
4159 | - os.unlink(i_sharefile) |
---|
4160 | + i_sharefp.remove() |
---|
4161 | |
---|
4162 | hunk ./src/allmydata/test/no_network.py 395 |
---|
4163 | - def corrupt_share(self, (shnum, serverid, sharefile), corruptor_function): |
---|
4164 | - sharedata = open(sharefile, "rb").read() |
---|
4165 | - corruptdata = corruptor_function(sharedata) |
---|
4166 | - open(sharefile, "wb").write(corruptdata) |
---|
4167 | + def corrupt_share(self, (shnum, serverid, sharefp), corruptor_function, debug=False): |
---|
4168 | + sharedata = sharefp.getContent() |
---|
4169 | + corruptdata = corruptor_function(sharedata, debug=debug) |
---|
4170 | + sharefp.setContent(corruptdata) |
---|
4171 | |
---|
4172 | def corrupt_shares_numbered(self, uri, shnums, corruptor, debug=False): |
---|
4173 | hunk ./src/allmydata/test/no_network.py 401 |
---|
4174 | - for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): |
---|
4175 | + for (i_shnum, i_serverid, i_sharefp) in self.find_uri_shares(uri): |
---|
4176 | if i_shnum in shnums: |
---|
4177 | hunk ./src/allmydata/test/no_network.py 403 |
---|
4178 | - sharedata = open(i_sharefile, "rb").read() |
---|
4179 | - corruptdata = corruptor(sharedata, debug=debug) |
---|
4180 | - open(i_sharefile, "wb").write(corruptdata) |
---|
4181 | + self.corrupt_share((i_shnum, i_serverid, i_sharefp), corruptor, debug=debug) |
---|
4182 | |
---|
4183 | def corrupt_all_shares(self, uri, corruptor, debug=False): |
---|
4184 | hunk ./src/allmydata/test/no_network.py 406 |
---|
4185 | - for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri): |
---|
4186 | - sharedata = open(i_sharefile, "rb").read() |
---|
4187 | - corruptdata = corruptor(sharedata, debug=debug) |
---|
4188 | - open(i_sharefile, "wb").write(corruptdata) |
---|
4189 | + for (i_shnum, i_serverid, i_sharefp) in self.find_uri_shares(uri): |
---|
4190 | + self.corrupt_share((i_shnum, i_serverid, i_sharefp), corruptor, debug=debug) |
---|
4191 | |
---|
4192 | def GET(self, urlpath, followRedirect=False, return_response=False, |
---|
4193 | method="GET", clientnum=0, **kwargs): |
---|
4194 | addfile ./src/allmydata/test/test_backends.py |
---|
4195 | hunk ./src/allmydata/test/test_backends.py 1 |
---|
4196 | +import os, stat |
---|
4197 | +from twisted.trial import unittest |
---|
4198 | +from allmydata.util.log import msg |
---|
4199 | +from allmydata.test.common_util import ReallyEqualMixin |
---|
4200 | +import mock |
---|
4201 | + |
---|
4202 | +# This is the code that we're going to be testing. |
---|
4203 | +from allmydata.storage.server import StorageServer |
---|
4204 | +from allmydata.storage.backends.disk.disk_backend import DiskBackend, si_si2dir |
---|
4205 | +from allmydata.storage.backends.null.null_backend import NullBackend |
---|
4206 | + |
---|
4207 | +# The following share file content was generated with |
---|
4208 | +# storage.immutable.ShareFile from Tahoe-LAFS v1.8.2 |
---|
4209 | +# with share data == 'a'. The total size of this input |
---|
4210 | +# is 85 bytes. |
---|
4211 | +shareversionnumber = '\x00\x00\x00\x01' |
---|
4212 | +sharedatalength = '\x00\x00\x00\x01' |
---|
4213 | +numberofleases = '\x00\x00\x00\x01' |
---|
4214 | +shareinputdata = 'a' |
---|
4215 | +ownernumber = '\x00\x00\x00\x00' |
---|
4216 | +renewsecret = 'x'*32 |
---|
4217 | +cancelsecret = 'y'*32 |
---|
4218 | +expirationtime = '\x00(\xde\x80' |
---|
4219 | +nextlease = '' |
---|
4220 | +containerdata = shareversionnumber + sharedatalength + numberofleases |
---|
4221 | +client_data = shareinputdata + ownernumber + renewsecret + \ |
---|
4222 | + cancelsecret + expirationtime + nextlease |
---|
4223 | +share_data = containerdata + client_data |
---|
4224 | +testnodeid = 'testnodeidxxxxxxxxxx' |
---|
4225 | + |
---|
4226 | + |
---|
4227 | +class MockFileSystem(unittest.TestCase): |
---|
4228 | + """ I simulate a filesystem that the code under test can use. I simulate |
---|
4229 | + just the parts of the filesystem that the current implementation of Disk |
---|
4230 | + backend needs. """ |
---|
4231 | + def setUp(self): |
---|
4232 | + # Make patcher, patch, and effects for disk-using functions. |
---|
4233 | + msg( "%s.setUp()" % (self,)) |
---|
4234 | + self.mockedfilepaths = {} |
---|
4235 | + # keys are pathnames, values are MockFilePath objects. This is necessary because |
---|
4236 | + # MockFilePath behavior sometimes depends on the filesystem. Where it does, |
---|
4237 | + # self.mockedfilepaths has the relevant information. |
---|
4238 | + self.storedir = MockFilePath('teststoredir', self.mockedfilepaths) |
---|
4239 | + self.basedir = self.storedir.child('shares') |
---|
4240 | + self.baseincdir = self.basedir.child('incoming') |
---|
4241 | + self.sharedirfinalname = self.basedir.child('or').child('orsxg5dtorxxeylhmvpws3temv4a') |
---|
4242 | + self.sharedirincomingname = self.baseincdir.child('or').child('orsxg5dtorxxeylhmvpws3temv4a') |
---|
4243 | + self.shareincomingname = self.sharedirincomingname.child('0') |
---|
4244 | + self.sharefinalname = self.sharedirfinalname.child('0') |
---|
4245 | + |
---|
4246 | + self.FilePathFake = mock.patch('allmydata.storage.backends.disk.core.FilePath', new = MockFilePath) |
---|
4247 | + self.FilePathFake.__enter__() |
---|
4248 | + |
---|
4249 | + self.BCountingCrawler = mock.patch('allmydata.storage.backends.disk.core.BucketCountingCrawler') |
---|
4250 | + FakeBCC = self.BCountingCrawler.__enter__() |
---|
4251 | + FakeBCC.side_effect = self.call_FakeBCC |
---|
4252 | + |
---|
4253 | + self.LeaseCheckingCrawler = mock.patch('allmydata.storage.backends.disk.core.LeaseCheckingCrawler') |
---|
4254 | + FakeLCC = self.LeaseCheckingCrawler.__enter__() |
---|
4255 | + FakeLCC.side_effect = self.call_FakeLCC |
---|
4256 | + |
---|
4257 | + self.get_available_space = mock.patch('allmydata.util.fileutil.get_available_space') |
---|
4258 | + GetSpace = self.get_available_space.__enter__() |
---|
4259 | + GetSpace.side_effect = self.call_get_available_space |
---|
4260 | + |
---|
4261 | + self.statforsize = mock.patch('allmydata.storage.backends.disk.core.filepath.stat') |
---|
4262 | + getsize = self.statforsize.__enter__() |
---|
4263 | + getsize.side_effect = self.call_statforsize |
---|
4264 | + |
---|
4265 | + def call_FakeBCC(self, StateFile): |
---|
4266 | + return MockBCC() |
---|
4267 | + |
---|
4268 | + def call_FakeLCC(self, StateFile, HistoryFile, ExpirationPolicy): |
---|
4269 | + return MockLCC() |
---|
4270 | + |
---|
4271 | + def call_get_available_space(self, storedir, reservedspace): |
---|
4272 | + # The input vector has an input size of 85. |
---|
4273 | + return 85 - reservedspace |
---|
4274 | + |
---|
4275 | + def call_statforsize(self, fakefpname): |
---|
4276 | + return self.mockedfilepaths[fakefpname].fileobject.size() |
---|
4277 | + |
---|
4278 | + def tearDown(self): |
---|
4279 | + msg( "%s.tearDown()" % (self,)) |
---|
4280 | + self.FilePathFake.__exit__() |
---|
4281 | + self.mockedfilepaths = {} |
---|
4282 | + |
---|
4283 | + |
---|
4284 | +class MockFilePath: |
---|
4285 | + def __init__(self, pathstring, ffpathsenvironment, existence=False): |
---|
4286 | + # I can't just make the values MockFileObjects because they may be directories. |
---|
4287 | + self.mockedfilepaths = ffpathsenvironment |
---|
4288 | + self.path = pathstring |
---|
4289 | + self.existence = existence |
---|
4290 | + if not self.mockedfilepaths.has_key(self.path): |
---|
4291 | + # The first MockFilePath object is special |
---|
4292 | + self.mockedfilepaths[self.path] = self |
---|
4293 | + self.fileobject = None |
---|
4294 | + else: |
---|
4295 | + self.fileobject = self.mockedfilepaths[self.path].fileobject |
---|
4296 | + self.spawn = {} |
---|
4297 | + self.antecedent = os.path.dirname(self.path) |
---|
4298 | + |
---|
4299 | + def setContent(self, contentstring): |
---|
4300 | + # This method rewrites the data in the file that corresponds to its path |
---|
4301 | + # name whether it preexisted or not. |
---|
4302 | + self.fileobject = MockFileObject(contentstring) |
---|
4303 | + self.existence = True |
---|
4304 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
4305 | + self.mockedfilepaths[self.path].existence = self.existence |
---|
4306 | + self.setparents() |
---|
4307 | + |
---|
4308 | + def create(self): |
---|
4309 | + # This method chokes if there's a pre-existing file! |
---|
4310 | + if self.mockedfilepaths[self.path].fileobject: |
---|
4311 | + raise OSError |
---|
4312 | + else: |
---|
4313 | + self.existence = True |
---|
4314 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
4315 | + self.mockedfilepaths[self.path].existence = self.existence |
---|
4316 | + self.setparents() |
---|
4317 | + |
---|
4318 | + def open(self, mode='r'): |
---|
4319 | + # XXX Makes no use of mode. |
---|
4320 | + if not self.mockedfilepaths[self.path].fileobject: |
---|
4321 | + # If there's no fileobject there already then make one and put it there. |
---|
4322 | + self.fileobject = MockFileObject() |
---|
4323 | + self.existence = True |
---|
4324 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
4325 | + self.mockedfilepaths[self.path].existence = self.existence |
---|
4326 | + else: |
---|
4327 | + # Otherwise get a ref to it. |
---|
4328 | + self.fileobject = self.mockedfilepaths[self.path].fileobject |
---|
4329 | + self.existence = self.mockedfilepaths[self.path].existence |
---|
4330 | + return self.fileobject.open(mode) |
---|
4331 | + |
---|
4332 | + def child(self, childstring): |
---|
4333 | + arg2child = os.path.join(self.path, childstring) |
---|
4334 | + child = MockFilePath(arg2child, self.mockedfilepaths) |
---|
4335 | + return child |
---|
4336 | + |
---|
4337 | + def children(self): |
---|
4338 | + childrenfromffs = [ffp for ffp in self.mockedfilepaths.values() if ffp.path.startswith(self.path)] |
---|
4339 | + childrenfromffs = [ffp for ffp in childrenfromffs if not ffp.path.endswith(self.path)] |
---|
4340 | + childrenfromffs = [ffp for ffp in childrenfromffs if ffp.exists()] |
---|
4341 | + self.spawn = frozenset(childrenfromffs) |
---|
4342 | + return self.spawn |
---|
4343 | + |
---|
4344 | + def parent(self): |
---|
4345 | + if self.mockedfilepaths.has_key(self.antecedent): |
---|
4346 | + parent = self.mockedfilepaths[self.antecedent] |
---|
4347 | + else: |
---|
4348 | + parent = MockFilePath(self.antecedent, self.mockedfilepaths) |
---|
4349 | + return parent |
---|
4350 | + |
---|
4351 | + def parents(self): |
---|
4352 | + antecedents = [] |
---|
4353 | + def f(fps, antecedents): |
---|
4354 | + newfps = os.path.split(fps)[0] |
---|
4355 | + if newfps: |
---|
4356 | + antecedents.append(newfps) |
---|
4357 | + f(newfps, antecedents) |
---|
4358 | + f(self.path, antecedents) |
---|
4359 | + return antecedents |
---|
4360 | + |
---|
4361 | + def setparents(self): |
---|
4362 | + for fps in self.parents(): |
---|
4363 | + if not self.mockedfilepaths.has_key(fps): |
---|
4364 | + self.mockedfilepaths[fps] = MockFilePath(fps, self.mockedfilepaths, exists=True) |
---|
4365 | + |
---|
4366 | + def basename(self): |
---|
4367 | + return os.path.split(self.path)[1] |
---|
4368 | + |
---|
4369 | + def moveTo(self, newffp): |
---|
4370 | + # XXX Makes no distinction between file and directory arguments, this is deviation from filepath.moveTo |
---|
4371 | + if self.mockedfilepaths[newffp.path].exists(): |
---|
4372 | + raise OSError |
---|
4373 | + else: |
---|
4374 | + self.mockedfilepaths[newffp.path] = self |
---|
4375 | + self.path = newffp.path |
---|
4376 | + |
---|
4377 | + def getsize(self): |
---|
4378 | + return self.fileobject.getsize() |
---|
4379 | + |
---|
4380 | + def exists(self): |
---|
4381 | + return self.existence |
---|
4382 | + |
---|
4383 | + def isdir(self): |
---|
4384 | + return True |
---|
4385 | + |
---|
4386 | + def makedirs(self): |
---|
4387 | + # XXX These methods assume that fp_<FOO> functions in fileutil will be tested elsewhere! |
---|
4388 | + pass |
---|
4389 | + |
---|
4390 | + def remove(self): |
---|
4391 | + pass |
---|
4392 | + |
---|
4393 | + |
---|
4394 | +class MockFileObject: |
---|
4395 | + def __init__(self, contentstring=''): |
---|
4396 | + self.buffer = contentstring |
---|
4397 | + self.pos = 0 |
---|
4398 | + def open(self, mode='r'): |
---|
4399 | + return self |
---|
4400 | + def write(self, instring): |
---|
4401 | + begin = self.pos |
---|
4402 | + padlen = begin - len(self.buffer) |
---|
4403 | + if padlen > 0: |
---|
4404 | + self.buffer += '\x00' * padlen |
---|
4405 | + end = self.pos + len(instring) |
---|
4406 | + self.buffer = self.buffer[:begin]+instring+self.buffer[end:] |
---|
4407 | + self.pos = end |
---|
4408 | + def close(self): |
---|
4409 | + self.pos = 0 |
---|
4410 | + def seek(self, pos): |
---|
4411 | + self.pos = pos |
---|
4412 | + def read(self, numberbytes): |
---|
4413 | + return self.buffer[self.pos:self.pos+numberbytes] |
---|
4414 | + def tell(self): |
---|
4415 | + return self.pos |
---|
4416 | + def size(self): |
---|
4417 | + # XXX This method A: Is not to be found in a real file B: Is part of a wild-mung-up of filepath.stat! |
---|
4418 | + # XXX Finally we shall hopefully use a getsize method soon, must consult first though. |
---|
4419 | + # Hmmm... perhaps we need to sometimes stat the address when there's not a mockfileobject present? |
---|
4420 | + return {stat.ST_SIZE:len(self.buffer)} |
---|
4421 | + def getsize(self): |
---|
4422 | + return len(self.buffer) |
---|
4423 | + |
---|
4424 | +class MockBCC: |
---|
4425 | + def setServiceParent(self, Parent): |
---|
4426 | + pass |
---|
4427 | + |
---|
4428 | + |
---|
4429 | +class MockLCC: |
---|
4430 | + def setServiceParent(self, Parent): |
---|
4431 | + pass |
---|
4432 | + |
---|
4433 | + |
---|
4434 | +class TestServerWithNullBackend(unittest.TestCase, ReallyEqualMixin): |
---|
4435 | + """ NullBackend is just for testing and executable documentation, so |
---|
4436 | + this test is actually a test of StorageServer in which we're using |
---|
4437 | + NullBackend as helper code for the test, rather than a test of |
---|
4438 | + NullBackend. """ |
---|
4439 | + def setUp(self): |
---|
4440 | + self.ss = StorageServer(testnodeid, NullBackend()) |
---|
4441 | + |
---|
4442 | + @mock.patch('os.mkdir') |
---|
4443 | + @mock.patch('__builtin__.open') |
---|
4444 | + @mock.patch('os.listdir') |
---|
4445 | + @mock.patch('os.path.isdir') |
---|
4446 | + def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir): |
---|
4447 | + """ |
---|
4448 | + Write a new share. This tests that StorageServer's remote_allocate_buckets |
---|
4449 | + generates the correct return types when given test-vector arguments. That |
---|
4450 | + bs is of the correct type is verified by attempting to invoke remote_write |
---|
4451 | + on bs[0]. |
---|
4452 | + """ |
---|
4453 | + alreadygot, bs = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
4454 | + bs[0].remote_write(0, 'a') |
---|
4455 | + self.failIf(mockisdir.called) |
---|
4456 | + self.failIf(mocklistdir.called) |
---|
4457 | + self.failIf(mockopen.called) |
---|
4458 | + self.failIf(mockmkdir.called) |
---|
4459 | + |
---|
4460 | + |
---|
4461 | +class TestServerConstruction(MockFileSystem, ReallyEqualMixin): |
---|
4462 | + def test_create_server_disk_backend(self): |
---|
4463 | + """ This tests whether a server instance can be constructed with a |
---|
4464 | + filesystem backend. To pass the test, it mustn't use the filesystem |
---|
4465 | + outside of its configured storedir. """ |
---|
4466 | + StorageServer(testnodeid, DiskBackend(self.storedir)) |
---|
4467 | + |
---|
4468 | + |
---|
4469 | +class TestServerAndDiskBackend(MockFileSystem, ReallyEqualMixin): |
---|
4470 | + """ This tests both the StorageServer and the Disk backend together. """ |
---|
4471 | + def setUp(self): |
---|
4472 | + MockFileSystem.setUp(self) |
---|
4473 | + try: |
---|
4474 | + self.backend = DiskBackend(self.storedir) |
---|
4475 | + self.ss = StorageServer(testnodeid, self.backend) |
---|
4476 | + |
---|
4477 | + self.backendwithreserve = DiskBackend(self.storedir, reserved_space = 1) |
---|
4478 | + self.sswithreserve = StorageServer(testnodeid, self.backendwithreserve) |
---|
4479 | + except: |
---|
4480 | + MockFileSystem.tearDown(self) |
---|
4481 | + raise |
---|
4482 | + |
---|
4483 | + @mock.patch('time.time') |
---|
4484 | + @mock.patch('allmydata.util.fileutil.get_available_space') |
---|
4485 | + def test_out_of_space(self, mockget_available_space, mocktime): |
---|
4486 | + mocktime.return_value = 0 |
---|
4487 | + |
---|
4488 | + def call_get_available_space(dir, reserve): |
---|
4489 | + return 0 |
---|
4490 | + |
---|
4491 | + mockget_available_space.side_effect = call_get_available_space |
---|
4492 | + alreadygotc, bsc = self.sswithreserve.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
4493 | + self.failUnlessReallyEqual(bsc, {}) |
---|
4494 | + |
---|
4495 | + @mock.patch('time.time') |
---|
4496 | + def test_write_and_read_share(self, mocktime): |
---|
4497 | + """ |
---|
4498 | + Write a new share, read it, and test the server's (and disk backend's) |
---|
4499 | + handling of simultaneous and successive attempts to write the same |
---|
4500 | + share. |
---|
4501 | + """ |
---|
4502 | + mocktime.return_value = 0 |
---|
4503 | + # Inspect incoming and fail unless it's empty. |
---|
4504 | + incomingset = self.ss.backend.get_incoming_shnums('teststorage_index') |
---|
4505 | + |
---|
4506 | + self.failUnlessReallyEqual(incomingset, frozenset()) |
---|
4507 | + |
---|
4508 | + # Populate incoming with the sharenum: 0. |
---|
4509 | + alreadygot, bs = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, frozenset((0,)), 1, mock.Mock()) |
---|
4510 | + |
---|
4511 | + # This is a transparent-box test: Inspect incoming and fail unless the sharenum: 0 is listed there. |
---|
4512 | + self.failUnlessReallyEqual(self.ss.backend.get_incoming_shnums('teststorage_index'), frozenset((0,))) |
---|
4513 | + |
---|
4514 | + |
---|
4515 | + |
---|
4516 | + # Attempt to create a second share writer with the same sharenum. |
---|
4517 | + alreadygota, bsa = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, frozenset((0,)), 1, mock.Mock()) |
---|
4518 | + |
---|
4519 | + # Show that no sharewriter results from a remote_allocate_buckets |
---|
4520 | + # with the same si and sharenum, until BucketWriter.remote_close() |
---|
4521 | + # has been called. |
---|
4522 | + self.failIf(bsa) |
---|
4523 | + |
---|
4524 | + # Test allocated size. |
---|
4525 | + spaceint = self.ss.allocated_size() |
---|
4526 | + self.failUnlessReallyEqual(spaceint, 1) |
---|
4527 | + |
---|
4528 | + # Write 'a' to shnum 0. Only tested together with close and read. |
---|
4529 | + bs[0].remote_write(0, 'a') |
---|
4530 | + |
---|
4531 | + # Preclose: Inspect final, failUnless nothing there. |
---|
4532 | + self.failUnlessReallyEqual(len(list(self.backend.get_shares('teststorage_index'))), 0) |
---|
4533 | + bs[0].remote_close() |
---|
4534 | + |
---|
4535 | + # Postclose: (Omnibus) failUnless written data is in final. |
---|
4536 | + sharesinfinal = list(self.backend.get_shares('teststorage_index')) |
---|
4537 | + self.failUnlessReallyEqual(len(sharesinfinal), 1) |
---|
4538 | + contents = sharesinfinal[0].read_share_data(0, 73) |
---|
4539 | + self.failUnlessReallyEqual(contents, client_data) |
---|
4540 | + |
---|
4541 | + # Exercise the case that the share we're asking to allocate is |
---|
4542 | + # already (completely) uploaded. |
---|
4543 | + self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
4544 | + |
---|
4545 | + |
---|
4546 | + def test_read_old_share(self): |
---|
4547 | + """ This tests whether the code correctly finds and reads |
---|
4548 | + shares written out by old (Tahoe-LAFS <= v1.8.2) |
---|
4549 | + servers. There is a similar test in test_download, but that one |
---|
4550 | + is from the perspective of the client and exercises a deeper |
---|
4551 | + stack of code. This one is for exercising just the |
---|
4552 | + StorageServer object. """ |
---|
4553 | + # Contruct a file with the appropriate contents in the mockfilesystem. |
---|
4554 | + datalen = len(share_data) |
---|
4555 | + finalhome = si_si2dir(self.basedir, 'teststorage_index').child(str(0)) |
---|
4556 | + finalhome.setContent(share_data) |
---|
4557 | + |
---|
4558 | + # Now begin the test. |
---|
4559 | + bs = self.ss.remote_get_buckets('teststorage_index') |
---|
4560 | + |
---|
4561 | + self.failUnlessEqual(len(bs), 1) |
---|
4562 | + b = bs['0'] |
---|
4563 | + # These should match by definition, the next two cases cover cases without (completely) unambiguous behaviors. |
---|
4564 | + self.failUnlessReallyEqual(b.remote_read(0, datalen), client_data) |
---|
4565 | + # If you try to read past the end you get the as much data as is there. |
---|
4566 | + self.failUnlessReallyEqual(b.remote_read(0, datalen+20), client_data) |
---|
4567 | + # If you start reading past the end of the file you get the empty string. |
---|
4568 | + self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '') |
---|
4569 | hunk ./src/allmydata/test/test_download.py 6 |
---|
4570 | # a previous run. This asserts that the current code is capable of decoding |
---|
4571 | # shares from a previous version. |
---|
4572 | |
---|
4573 | -import os |
---|
4574 | from twisted.trial import unittest |
---|
4575 | from twisted.internet import defer, reactor |
---|
4576 | from allmydata import uri |
---|
4577 | hunk ./src/allmydata/test/test_download.py 9 |
---|
4578 | -from allmydata.storage.server import storage_index_to_dir |
---|
4579 | from allmydata.util import base32, fileutil, spans, log, hashutil |
---|
4580 | from allmydata.util.consumer import download_to_data, MemoryConsumer |
---|
4581 | from allmydata.immutable import upload, layout |
---|
4582 | hunk ./src/allmydata/test/test_download.py 85 |
---|
4583 | u = upload.Data(plaintext, None) |
---|
4584 | d = self.c0.upload(u) |
---|
4585 | f = open("stored_shares.py", "w") |
---|
4586 | - def _created_immutable(ur): |
---|
4587 | - # write the generated shares and URI to a file, which can then be |
---|
4588 | - # incorporated into this one next time. |
---|
4589 | - f.write('immutable_uri = "%s"\n' % ur.uri) |
---|
4590 | - f.write('immutable_shares = {\n') |
---|
4591 | - si = uri.from_string(ur.uri).get_storage_index() |
---|
4592 | - si_dir = storage_index_to_dir(si) |
---|
4593 | + |
---|
4594 | + def _write_py(uri): |
---|
4595 | + si = uri.from_string(uri).get_storage_index() |
---|
4596 | for (i,ss,ssdir) in self.iterate_servers(): |
---|
4597 | hunk ./src/allmydata/test/test_download.py 89 |
---|
4598 | - sharedir = os.path.join(ssdir, "shares", si_dir) |
---|
4599 | shares = {} |
---|
4600 | hunk ./src/allmydata/test/test_download.py 90 |
---|
4601 | - for fn in os.listdir(sharedir): |
---|
4602 | - shnum = int(fn) |
---|
4603 | - sharedata = open(os.path.join(sharedir, fn), "rb").read() |
---|
4604 | - shares[shnum] = sharedata |
---|
4605 | - fileutil.rm_dir(sharedir) |
---|
4606 | + shareset = ss.backend.get_shareset(si) |
---|
4607 | + for share in shareset.get_shares(): |
---|
4608 | + sharedata = share._home.getContent() |
---|
4609 | + shares[share.get_shnum()] = sharedata |
---|
4610 | + |
---|
4611 | + fileutil.fp_remove(shareset._sharehomedir) |
---|
4612 | if shares: |
---|
4613 | f.write(' %d: { # client[%d]\n' % (i, i)) |
---|
4614 | for shnum in sorted(shares.keys()): |
---|
4615 | hunk ./src/allmydata/test/test_download.py 103 |
---|
4616 | (shnum, base32.b2a(shares[shnum]))) |
---|
4617 | f.write(' },\n') |
---|
4618 | f.write('}\n') |
---|
4619 | - f.write('\n') |
---|
4620 | |
---|
4621 | hunk ./src/allmydata/test/test_download.py 104 |
---|
4622 | + def _created_immutable(ur): |
---|
4623 | + # write the generated shares and URI to a file, which can then be |
---|
4624 | + # incorporated into this one next time. |
---|
4625 | + f.write('immutable_uri = "%s"\n' % ur.uri) |
---|
4626 | + f.write('immutable_shares = {\n') |
---|
4627 | + _write_py(ur.uri) |
---|
4628 | + f.write('\n') |
---|
4629 | d.addCallback(_created_immutable) |
---|
4630 | |
---|
4631 | d.addCallback(lambda ignored: |
---|
4632 | hunk ./src/allmydata/test/test_download.py 118 |
---|
4633 | def _created_mutable(n): |
---|
4634 | f.write('mutable_uri = "%s"\n' % n.get_uri()) |
---|
4635 | f.write('mutable_shares = {\n') |
---|
4636 | - si = uri.from_string(n.get_uri()).get_storage_index() |
---|
4637 | - si_dir = storage_index_to_dir(si) |
---|
4638 | - for (i,ss,ssdir) in self.iterate_servers(): |
---|
4639 | - sharedir = os.path.join(ssdir, "shares", si_dir) |
---|
4640 | - shares = {} |
---|
4641 | - for fn in os.listdir(sharedir): |
---|
4642 | - shnum = int(fn) |
---|
4643 | - sharedata = open(os.path.join(sharedir, fn), "rb").read() |
---|
4644 | - shares[shnum] = sharedata |
---|
4645 | - fileutil.rm_dir(sharedir) |
---|
4646 | - if shares: |
---|
4647 | - f.write(' %d: { # client[%d]\n' % (i, i)) |
---|
4648 | - for shnum in sorted(shares.keys()): |
---|
4649 | - f.write(' %d: base32.a2b("%s"),\n' % |
---|
4650 | - (shnum, base32.b2a(shares[shnum]))) |
---|
4651 | - f.write(' },\n') |
---|
4652 | - f.write('}\n') |
---|
4653 | - |
---|
4654 | - f.close() |
---|
4655 | + _write_py(n.get_uri()) |
---|
4656 | d.addCallback(_created_mutable) |
---|
4657 | |
---|
4658 | def _done(ignored): |
---|
4659 | hunk ./src/allmydata/test/test_download.py 123 |
---|
4660 | f.close() |
---|
4661 | - d.addCallback(_done) |
---|
4662 | + d.addBoth(_done) |
---|
4663 | |
---|
4664 | return d |
---|
4665 | |
---|
4666 | hunk ./src/allmydata/test/test_download.py 127 |
---|
4667 | + def _write_shares(self, uri, shares): |
---|
4668 | + si = uri.from_string(uri).get_storage_index() |
---|
4669 | + for i in shares: |
---|
4670 | + shares_for_server = shares[i] |
---|
4671 | + for shnum in shares_for_server: |
---|
4672 | + share_dir = self.get_server(i).backend.get_shareset(si)._sharehomedir |
---|
4673 | + fileutil.fp_make_dirs(share_dir) |
---|
4674 | + share_dir.child(str(shnum)).setContent(shares[shnum]) |
---|
4675 | + |
---|
4676 | def load_shares(self, ignored=None): |
---|
4677 | # this uses the data generated by create_shares() to populate the |
---|
4678 | # storage servers with pre-generated shares |
---|
4679 | hunk ./src/allmydata/test/test_download.py 139 |
---|
4680 | - si = uri.from_string(immutable_uri).get_storage_index() |
---|
4681 | - si_dir = storage_index_to_dir(si) |
---|
4682 | - for i in immutable_shares: |
---|
4683 | - shares = immutable_shares[i] |
---|
4684 | - for shnum in shares: |
---|
4685 | - dn = os.path.join(self.get_serverdir(i), "shares", si_dir) |
---|
4686 | - fileutil.make_dirs(dn) |
---|
4687 | - fn = os.path.join(dn, str(shnum)) |
---|
4688 | - f = open(fn, "wb") |
---|
4689 | - f.write(shares[shnum]) |
---|
4690 | - f.close() |
---|
4691 | - |
---|
4692 | - si = uri.from_string(mutable_uri).get_storage_index() |
---|
4693 | - si_dir = storage_index_to_dir(si) |
---|
4694 | - for i in mutable_shares: |
---|
4695 | - shares = mutable_shares[i] |
---|
4696 | - for shnum in shares: |
---|
4697 | - dn = os.path.join(self.get_serverdir(i), "shares", si_dir) |
---|
4698 | - fileutil.make_dirs(dn) |
---|
4699 | - fn = os.path.join(dn, str(shnum)) |
---|
4700 | - f = open(fn, "wb") |
---|
4701 | - f.write(shares[shnum]) |
---|
4702 | - f.close() |
---|
4703 | + self._write_shares(immutable_uri, immutable_shares) |
---|
4704 | + self._write_shares(mutable_uri, mutable_shares) |
---|
4705 | |
---|
4706 | def download_immutable(self, ignored=None): |
---|
4707 | n = self.c0.create_node_from_uri(immutable_uri) |
---|
4708 | hunk ./src/allmydata/test/test_download.py 183 |
---|
4709 | |
---|
4710 | self.load_shares() |
---|
4711 | si = uri.from_string(immutable_uri).get_storage_index() |
---|
4712 | - si_dir = storage_index_to_dir(si) |
---|
4713 | |
---|
4714 | n = self.c0.create_node_from_uri(immutable_uri) |
---|
4715 | d = download_to_data(n) |
---|
4716 | hunk ./src/allmydata/test/test_download.py 198 |
---|
4717 | for clientnum in immutable_shares: |
---|
4718 | for shnum in immutable_shares[clientnum]: |
---|
4719 | if s._shnum == shnum: |
---|
4720 | - fn = os.path.join(self.get_serverdir(clientnum), |
---|
4721 | - "shares", si_dir, str(shnum)) |
---|
4722 | - os.unlink(fn) |
---|
4723 | + share_dir = self.get_server(clientnum).backend.get_shareset(si)._sharehomedir |
---|
4724 | + share_dir.child(str(shnum)).remove() |
---|
4725 | d.addCallback(_clobber_some_shares) |
---|
4726 | d.addCallback(lambda ign: download_to_data(n)) |
---|
4727 | d.addCallback(_got_data) |
---|
4728 | hunk ./src/allmydata/test/test_download.py 212 |
---|
4729 | for shnum in immutable_shares[clientnum]: |
---|
4730 | if shnum == save_me: |
---|
4731 | continue |
---|
4732 | - fn = os.path.join(self.get_serverdir(clientnum), |
---|
4733 | - "shares", si_dir, str(shnum)) |
---|
4734 | - if os.path.exists(fn): |
---|
4735 | - os.unlink(fn) |
---|
4736 | + share_dir = self.get_server(clientnum).backend.get_shareset(si)._sharehomedir |
---|
4737 | + fileutil.fp_remove(share_dir.child(str(shnum))) |
---|
4738 | # now the download should fail with NotEnoughSharesError |
---|
4739 | return self.shouldFail(NotEnoughSharesError, "1shares", None, |
---|
4740 | download_to_data, n) |
---|
4741 | hunk ./src/allmydata/test/test_download.py 223 |
---|
4742 | # delete the last remaining share |
---|
4743 | for clientnum in immutable_shares: |
---|
4744 | for shnum in immutable_shares[clientnum]: |
---|
4745 | - fn = os.path.join(self.get_serverdir(clientnum), |
---|
4746 | - "shares", si_dir, str(shnum)) |
---|
4747 | - if os.path.exists(fn): |
---|
4748 | - os.unlink(fn) |
---|
4749 | + share_dir = self.get_server(clientnum).backend.get_shareset(si)._sharehomedir |
---|
4750 | + share_dir.child(str(shnum)).remove() |
---|
4751 | # now a new download should fail with NoSharesError. We want a |
---|
4752 | # new ImmutableFileNode so it will forget about the old shares. |
---|
4753 | # If we merely called create_node_from_uri() without first |
---|
4754 | hunk ./src/allmydata/test/test_download.py 801 |
---|
4755 | # will report two shares, and the ShareFinder will handle the |
---|
4756 | # duplicate by attaching both to the same CommonShare instance. |
---|
4757 | si = uri.from_string(immutable_uri).get_storage_index() |
---|
4758 | - si_dir = storage_index_to_dir(si) |
---|
4759 | - sh0_file = [sharefile |
---|
4760 | - for (shnum, serverid, sharefile) |
---|
4761 | - in self.find_uri_shares(immutable_uri) |
---|
4762 | - if shnum == 0][0] |
---|
4763 | - sh0_data = open(sh0_file, "rb").read() |
---|
4764 | + sh0_fp = [sharefp for (shnum, serverid, sharefp) |
---|
4765 | + in self.find_uri_shares(immutable_uri) |
---|
4766 | + if shnum == 0][0] |
---|
4767 | + sh0_data = sh0_fp.getContent() |
---|
4768 | for clientnum in immutable_shares: |
---|
4769 | if 0 in immutable_shares[clientnum]: |
---|
4770 | continue |
---|
4771 | hunk ./src/allmydata/test/test_download.py 808 |
---|
4772 | - cdir = self.get_serverdir(clientnum) |
---|
4773 | - target = os.path.join(cdir, "shares", si_dir, "0") |
---|
4774 | - outf = open(target, "wb") |
---|
4775 | - outf.write(sh0_data) |
---|
4776 | - outf.close() |
---|
4777 | + cdir = self.get_server(clientnum).backend.get_shareset(si)._sharehomedir |
---|
4778 | + fileutil.fp_make_dirs(cdir) |
---|
4779 | + cdir.child(str(shnum)).setContent(sh0_data) |
---|
4780 | |
---|
4781 | d = self.download_immutable() |
---|
4782 | return d |
---|
4783 | hunk ./src/allmydata/test/test_encode.py 134 |
---|
4784 | d.addCallback(_try) |
---|
4785 | return d |
---|
4786 | |
---|
4787 | - def get_share_hashes(self, at_least_these=()): |
---|
4788 | + def get_share_hashes(self): |
---|
4789 | d = self._start() |
---|
4790 | def _try(unused=None): |
---|
4791 | if self.mode == "bad sharehash": |
---|
4792 | hunk ./src/allmydata/test/test_hung_server.py 3 |
---|
4793 | # -*- coding: utf-8 -*- |
---|
4794 | |
---|
4795 | -import os, shutil |
---|
4796 | from twisted.trial import unittest |
---|
4797 | from twisted.internet import defer |
---|
4798 | hunk ./src/allmydata/test/test_hung_server.py 5 |
---|
4799 | -from allmydata import uri |
---|
4800 | + |
---|
4801 | from allmydata.util.consumer import download_to_data |
---|
4802 | from allmydata.immutable import upload |
---|
4803 | from allmydata.mutable.common import UnrecoverableFileError |
---|
4804 | hunk ./src/allmydata/test/test_hung_server.py 10 |
---|
4805 | from allmydata.mutable.publish import MutableData |
---|
4806 | -from allmydata.storage.common import storage_index_to_dir |
---|
4807 | from allmydata.test.no_network import GridTestMixin |
---|
4808 | from allmydata.test.common import ShouldFailMixin |
---|
4809 | from allmydata.util.pollmixin import PollMixin |
---|
4810 | hunk ./src/allmydata/test/test_hung_server.py 18 |
---|
4811 | immutable_plaintext = "data" * 10000 |
---|
4812 | mutable_plaintext = "muta" * 10000 |
---|
4813 | |
---|
4814 | + |
---|
4815 | class HungServerDownloadTest(GridTestMixin, ShouldFailMixin, PollMixin, |
---|
4816 | unittest.TestCase): |
---|
4817 | # Many of these tests take around 60 seconds on François's ARM buildslave: |
---|
4818 | hunk ./src/allmydata/test/test_hung_server.py 31 |
---|
4819 | timeout = 240 |
---|
4820 | |
---|
4821 | def _break(self, servers): |
---|
4822 | - for (id, ss) in servers: |
---|
4823 | - self.g.break_server(id) |
---|
4824 | + for ss in servers: |
---|
4825 | + self.g.break_server(ss.get_serverid()) |
---|
4826 | |
---|
4827 | def _hang(self, servers, **kwargs): |
---|
4828 | hunk ./src/allmydata/test/test_hung_server.py 35 |
---|
4829 | - for (id, ss) in servers: |
---|
4830 | - self.g.hang_server(id, **kwargs) |
---|
4831 | + for ss in servers: |
---|
4832 | + self.g.hang_server(ss.get_serverid(), **kwargs) |
---|
4833 | |
---|
4834 | def _unhang(self, servers, **kwargs): |
---|
4835 | hunk ./src/allmydata/test/test_hung_server.py 39 |
---|
4836 | - for (id, ss) in servers: |
---|
4837 | - self.g.unhang_server(id, **kwargs) |
---|
4838 | + for ss in servers: |
---|
4839 | + self.g.unhang_server(ss.get_serverid(), **kwargs) |
---|
4840 | |
---|
4841 | def _hang_shares(self, shnums, **kwargs): |
---|
4842 | # hang all servers who are holding the given shares |
---|
4843 | hunk ./src/allmydata/test/test_hung_server.py 52 |
---|
4844 | hung_serverids.add(i_serverid) |
---|
4845 | |
---|
4846 | def _delete_all_shares_from(self, servers): |
---|
4847 | - serverids = [id for (id, ss) in servers] |
---|
4848 | - for (i_shnum, i_serverid, i_sharefile) in self.shares: |
---|
4849 | + serverids = [ss.get_serverid() for ss in servers] |
---|
4850 | + for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
4851 | if i_serverid in serverids: |
---|
4852 | hunk ./src/allmydata/test/test_hung_server.py 55 |
---|
4853 | - os.unlink(i_sharefile) |
---|
4854 | + i_sharefp.remove() |
---|
4855 | |
---|
4856 | def _corrupt_all_shares_in(self, servers, corruptor_func): |
---|
4857 | hunk ./src/allmydata/test/test_hung_server.py 58 |
---|
4858 | - serverids = [id for (id, ss) in servers] |
---|
4859 | - for (i_shnum, i_serverid, i_sharefile) in self.shares: |
---|
4860 | + serverids = [ss.get_serverid() for ss in servers] |
---|
4861 | + for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
4862 | if i_serverid in serverids: |
---|
4863 | hunk ./src/allmydata/test/test_hung_server.py 61 |
---|
4864 | - self._corrupt_share((i_shnum, i_sharefile), corruptor_func) |
---|
4865 | + self.corrupt_share((i_shnum, i_serverid, i_sharefp), corruptor_func) |
---|
4866 | |
---|
4867 | def _copy_all_shares_from(self, from_servers, to_server): |
---|
4868 | hunk ./src/allmydata/test/test_hung_server.py 64 |
---|
4869 | - serverids = [id for (id, ss) in from_servers] |
---|
4870 | - for (i_shnum, i_serverid, i_sharefile) in self.shares: |
---|
4871 | + serverids = [ss.get_serverid() for ss in from_servers] |
---|
4872 | + for (i_shnum, i_serverid, i_sharefp) in self.shares: |
---|
4873 | if i_serverid in serverids: |
---|
4874 | hunk ./src/allmydata/test/test_hung_server.py 67 |
---|
4875 | - self._copy_share((i_shnum, i_sharefile), to_server) |
---|
4876 | + self.copy_share((i_shnum, i_serverid, i_sharefp), self.uri, to_server) |
---|
4877 | |
---|
4878 | hunk ./src/allmydata/test/test_hung_server.py 69 |
---|
4879 | - def _copy_share(self, share, to_server): |
---|
4880 | - (sharenum, sharefile) = share |
---|
4881 | - (id, ss) = to_server |
---|
4882 | - shares_dir = os.path.join(ss.original.storedir, "shares") |
---|
4883 | - si = uri.from_string(self.uri).get_storage_index() |
---|
4884 | - si_dir = os.path.join(shares_dir, storage_index_to_dir(si)) |
---|
4885 | - if not os.path.exists(si_dir): |
---|
4886 | - os.makedirs(si_dir) |
---|
4887 | - new_sharefile = os.path.join(si_dir, str(sharenum)) |
---|
4888 | - shutil.copy(sharefile, new_sharefile) |
---|
4889 | self.shares = self.find_uri_shares(self.uri) |
---|
4890 | hunk ./src/allmydata/test/test_hung_server.py 70 |
---|
4891 | - # Make sure that the storage server has the share. |
---|
4892 | - self.failUnless((sharenum, ss.original.my_nodeid, new_sharefile) |
---|
4893 | - in self.shares) |
---|
4894 | - |
---|
4895 | - def _corrupt_share(self, share, corruptor_func): |
---|
4896 | - (sharenum, sharefile) = share |
---|
4897 | - data = open(sharefile, "rb").read() |
---|
4898 | - newdata = corruptor_func(data) |
---|
4899 | - os.unlink(sharefile) |
---|
4900 | - wf = open(sharefile, "wb") |
---|
4901 | - wf.write(newdata) |
---|
4902 | - wf.close() |
---|
4903 | |
---|
4904 | def _set_up(self, mutable, testdir, num_clients=1, num_servers=10): |
---|
4905 | self.mutable = mutable |
---|
4906 | hunk ./src/allmydata/test/test_hung_server.py 82 |
---|
4907 | |
---|
4908 | self.c0 = self.g.clients[0] |
---|
4909 | nm = self.c0.nodemaker |
---|
4910 | - self.servers = sorted([(s.get_serverid(), s.get_rref()) |
---|
4911 | - for s in nm.storage_broker.get_connected_servers()]) |
---|
4912 | + unsorted = [(s.get_serverid(), s.get_rref()) for s in nm.storage_broker.get_connected_servers()] |
---|
4913 | + self.servers = [ss for (id, ss) in sorted(unsorted)] |
---|
4914 | self.servers = self.servers[5:] + self.servers[:5] |
---|
4915 | |
---|
4916 | if mutable: |
---|
4917 | hunk ./src/allmydata/test/test_hung_server.py 244 |
---|
4918 | # stuck-but-not-overdue, and 4 live requests. All 4 live requests |
---|
4919 | # will retire before the download is complete and the ShareFinder |
---|
4920 | # is shut off. That will leave 4 OVERDUE and 1 |
---|
4921 | - # stuck-but-not-overdue, for a total of 5 requests in in |
---|
4922 | + # stuck-but-not-overdue, for a total of 5 requests in |
---|
4923 | # _sf.pending_requests |
---|
4924 | for t in self._sf.overdue_timers.values()[:4]: |
---|
4925 | t.reset(-1.0) |
---|
4926 | hunk ./src/allmydata/test/test_mutable.py 21 |
---|
4927 | from foolscap.api import eventually, fireEventually |
---|
4928 | from foolscap.logging import log |
---|
4929 | from allmydata.storage_client import StorageFarmBroker |
---|
4930 | -from allmydata.storage.common import storage_index_to_dir |
---|
4931 | from allmydata.scripts import debug |
---|
4932 | |
---|
4933 | from allmydata.mutable.filenode import MutableFileNode, BackoffAgent |
---|
4934 | hunk ./src/allmydata/test/test_mutable.py 3662 |
---|
4935 | # Now execute each assignment by writing the storage. |
---|
4936 | for (share, servernum) in assignments: |
---|
4937 | sharedata = base64.b64decode(self.sdmf_old_shares[share]) |
---|
4938 | - storedir = self.get_serverdir(servernum) |
---|
4939 | - storage_path = os.path.join(storedir, "shares", |
---|
4940 | - storage_index_to_dir(si)) |
---|
4941 | - fileutil.make_dirs(storage_path) |
---|
4942 | - fileutil.write(os.path.join(storage_path, "%d" % share), |
---|
4943 | - sharedata) |
---|
4944 | + storage_dir = self.get_server(servernum).backend.get_shareset(si).sharehomedir |
---|
4945 | + fileutil.fp_make_dirs(storage_dir) |
---|
4946 | + storage_dir.child("%d" % share).setContent(sharedata) |
---|
4947 | # ...and verify that the shares are there. |
---|
4948 | shares = self.find_uri_shares(self.sdmf_old_cap) |
---|
4949 | assert len(shares) == 10 |
---|
4950 | replace ./src/allmydata/test/test_mutable.py [A-Za-z_0-9] MutableShareFile MutableDiskShare |
---|
4951 | replace ./src/allmydata/test/test_provisioning.py [A-Za-z_0-9] MyRequest MockRequest |
---|
4952 | hunk ./src/allmydata/test/test_storage.py 14 |
---|
4953 | from allmydata import interfaces |
---|
4954 | from allmydata.util import fileutil, hashutil, base32, pollmixin, time_format |
---|
4955 | from allmydata.storage.server import StorageServer |
---|
4956 | -from allmydata.storage.mutable import MutableShareFile |
---|
4957 | -from allmydata.storage.immutable import BucketWriter, BucketReader |
---|
4958 | -from allmydata.storage.common import DataTooLargeError, storage_index_to_dir, \ |
---|
4959 | +from allmydata.storage.backends.disk.mutable import MutableShareFile |
---|
4960 | +from allmydata.storage.bucket import BucketWriter, BucketReader |
---|
4961 | +from allmydata.storage.common import DataTooLargeError, \ |
---|
4962 | UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError |
---|
4963 | from allmydata.storage.lease import LeaseInfo |
---|
4964 | from allmydata.storage.crawler import BucketCountingCrawler |
---|
4965 | hunk ./src/allmydata/test/test_storage.py 474 |
---|
4966 | w[0].remote_write(0, "\xff"*10) |
---|
4967 | w[0].remote_close() |
---|
4968 | |
---|
4969 | - fn = os.path.join(ss.sharedir, storage_index_to_dir("si1"), "0") |
---|
4970 | - f = open(fn, "rb+") |
---|
4971 | + fp = ss.backend.get_shareset("si1").sharehomedir.child("0") |
---|
4972 | + f = fp.open("rb+") |
---|
4973 | f.seek(0) |
---|
4974 | f.write(struct.pack(">L", 0)) # this is invalid: minimum used is v1 |
---|
4975 | f.close() |
---|
4976 | hunk ./src/allmydata/test/test_storage.py 814 |
---|
4977 | def test_bad_magic(self): |
---|
4978 | ss = self.create("test_bad_magic") |
---|
4979 | self.allocate(ss, "si1", "we1", self._lease_secret.next(), set([0]), 10) |
---|
4980 | - fn = os.path.join(ss.sharedir, storage_index_to_dir("si1"), "0") |
---|
4981 | - f = open(fn, "rb+") |
---|
4982 | + fp = ss.backend.get_shareset("si1").sharehomedir.child("0") |
---|
4983 | + f = fp.open("rb+") |
---|
4984 | f.seek(0) |
---|
4985 | f.write("BAD MAGIC") |
---|
4986 | f.close() |
---|
4987 | hunk ./src/allmydata/test/test_storage.py 1229 |
---|
4988 | |
---|
4989 | # create a random non-numeric file in the bucket directory, to |
---|
4990 | # exercise the code that's supposed to ignore those. |
---|
4991 | - bucket_dir = os.path.join(self.workdir("test_leases"), |
---|
4992 | - "shares", storage_index_to_dir("si1")) |
---|
4993 | - f = open(os.path.join(bucket_dir, "ignore_me.txt"), "w") |
---|
4994 | - f.write("you ought to be ignoring me\n") |
---|
4995 | - f.close() |
---|
4996 | + bucket_dir = ss.backend.get_shareset("si1").sharehomedir |
---|
4997 | + bucket_dir.child("ignore_me.txt").setContent("you ought to be ignoring me\n") |
---|
4998 | |
---|
4999 | s0 = MutableShareFile(os.path.join(bucket_dir, "0")) |
---|
5000 | self.failUnlessEqual(len(list(s0.get_leases())), 1) |
---|
5001 | hunk ./src/allmydata/test/test_storage.py 3118 |
---|
5002 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
5003 | |
---|
5004 | # add a non-sharefile to exercise another code path |
---|
5005 | - fn = os.path.join(ss.sharedir, |
---|
5006 | - storage_index_to_dir(immutable_si_0), |
---|
5007 | - "not-a-share") |
---|
5008 | - f = open(fn, "wb") |
---|
5009 | - f.write("I am not a share.\n") |
---|
5010 | - f.close() |
---|
5011 | + fp = ss.backend.get_shareset(immutable_si_0).sharehomedir.child("not-a-share") |
---|
5012 | + fp.setContent("I am not a share.\n") |
---|
5013 | |
---|
5014 | # this is before the crawl has started, so we're not in a cycle yet |
---|
5015 | initial_state = lc.get_state() |
---|
5016 | hunk ./src/allmydata/test/test_storage.py 3282 |
---|
5017 | def test_expire_age(self): |
---|
5018 | basedir = "storage/LeaseCrawler/expire_age" |
---|
5019 | fileutil.make_dirs(basedir) |
---|
5020 | - # setting expiration_time to 2000 means that any lease which is more |
---|
5021 | - # than 2000s old will be expired. |
---|
5022 | - ss = InstrumentedStorageServer(basedir, "\x00" * 20, |
---|
5023 | - expiration_enabled=True, |
---|
5024 | - expiration_mode="age", |
---|
5025 | - expiration_override_lease_duration=2000) |
---|
5026 | + # setting 'override_lease_duration' to 2000 means that any lease that |
---|
5027 | + # is more than 2000 seconds old will be expired. |
---|
5028 | + expiration_policy = { |
---|
5029 | + 'enabled': True, |
---|
5030 | + 'mode': 'age', |
---|
5031 | + 'override_lease_duration': 2000, |
---|
5032 | + 'sharetypes': ('mutable', 'immutable'), |
---|
5033 | + } |
---|
5034 | + ss = InstrumentedStorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5035 | # make it start sooner than usual. |
---|
5036 | lc = ss.lease_checker |
---|
5037 | lc.slow_start = 0 |
---|
5038 | hunk ./src/allmydata/test/test_storage.py 3423 |
---|
5039 | def test_expire_cutoff_date(self): |
---|
5040 | basedir = "storage/LeaseCrawler/expire_cutoff_date" |
---|
5041 | fileutil.make_dirs(basedir) |
---|
5042 | - # setting cutoff-date to 2000 seconds ago means that any lease which |
---|
5043 | - # is more than 2000s old will be expired. |
---|
5044 | + # setting 'cutoff_date' to 2000 seconds ago means that any lease that |
---|
5045 | + # is more than 2000 seconds old will be expired. |
---|
5046 | now = time.time() |
---|
5047 | then = int(now - 2000) |
---|
5048 | hunk ./src/allmydata/test/test_storage.py 3427 |
---|
5049 | - ss = InstrumentedStorageServer(basedir, "\x00" * 20, |
---|
5050 | - expiration_enabled=True, |
---|
5051 | - expiration_mode="cutoff-date", |
---|
5052 | - expiration_cutoff_date=then) |
---|
5053 | + expiration_policy = { |
---|
5054 | + 'enabled': True, |
---|
5055 | + 'mode': 'cutoff-date', |
---|
5056 | + 'cutoff_date': then, |
---|
5057 | + 'sharetypes': ('mutable', 'immutable'), |
---|
5058 | + } |
---|
5059 | + ss = InstrumentedStorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5060 | # make it start sooner than usual. |
---|
5061 | lc = ss.lease_checker |
---|
5062 | lc.slow_start = 0 |
---|
5063 | hunk ./src/allmydata/test/test_storage.py 3575 |
---|
5064 | def test_only_immutable(self): |
---|
5065 | basedir = "storage/LeaseCrawler/only_immutable" |
---|
5066 | fileutil.make_dirs(basedir) |
---|
5067 | + # setting 'cutoff_date' to 2000 seconds ago means that any lease that |
---|
5068 | + # is more than 2000 seconds old will be expired. |
---|
5069 | now = time.time() |
---|
5070 | then = int(now - 2000) |
---|
5071 | hunk ./src/allmydata/test/test_storage.py 3579 |
---|
5072 | - ss = StorageServer(basedir, "\x00" * 20, |
---|
5073 | - expiration_enabled=True, |
---|
5074 | - expiration_mode="cutoff-date", |
---|
5075 | - expiration_cutoff_date=then, |
---|
5076 | - expiration_sharetypes=("immutable",)) |
---|
5077 | + expiration_policy = { |
---|
5078 | + 'enabled': True, |
---|
5079 | + 'mode': 'cutoff-date', |
---|
5080 | + 'cutoff_date': then, |
---|
5081 | + 'sharetypes': ('immutable',), |
---|
5082 | + } |
---|
5083 | + ss = StorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5084 | lc = ss.lease_checker |
---|
5085 | lc.slow_start = 0 |
---|
5086 | webstatus = StorageStatus(ss) |
---|
5087 | hunk ./src/allmydata/test/test_storage.py 3636 |
---|
5088 | def test_only_mutable(self): |
---|
5089 | basedir = "storage/LeaseCrawler/only_mutable" |
---|
5090 | fileutil.make_dirs(basedir) |
---|
5091 | + # setting 'cutoff_date' to 2000 seconds ago means that any lease that |
---|
5092 | + # is more than 2000 seconds old will be expired. |
---|
5093 | now = time.time() |
---|
5094 | then = int(now - 2000) |
---|
5095 | hunk ./src/allmydata/test/test_storage.py 3640 |
---|
5096 | - ss = StorageServer(basedir, "\x00" * 20, |
---|
5097 | - expiration_enabled=True, |
---|
5098 | - expiration_mode="cutoff-date", |
---|
5099 | - expiration_cutoff_date=then, |
---|
5100 | - expiration_sharetypes=("mutable",)) |
---|
5101 | + expiration_policy = { |
---|
5102 | + 'enabled': True, |
---|
5103 | + 'mode': 'cutoff-date', |
---|
5104 | + 'cutoff_date': then, |
---|
5105 | + 'sharetypes': ('mutable',), |
---|
5106 | + } |
---|
5107 | + ss = StorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5108 | lc = ss.lease_checker |
---|
5109 | lc.slow_start = 0 |
---|
5110 | webstatus = StorageStatus(ss) |
---|
5111 | hunk ./src/allmydata/test/test_storage.py 3819 |
---|
5112 | def test_no_st_blocks(self): |
---|
5113 | basedir = "storage/LeaseCrawler/no_st_blocks" |
---|
5114 | fileutil.make_dirs(basedir) |
---|
5115 | - ss = No_ST_BLOCKS_StorageServer(basedir, "\x00" * 20, |
---|
5116 | - expiration_mode="age", |
---|
5117 | - expiration_override_lease_duration=-1000) |
---|
5118 | - # a negative expiration_time= means the "configured-" |
---|
5119 | + # A negative 'override_lease_duration' means that the "configured-" |
---|
5120 | # space-recovered counts will be non-zero, since all shares will have |
---|
5121 | hunk ./src/allmydata/test/test_storage.py 3821 |
---|
5122 | - # expired by then |
---|
5123 | + # expired by then. |
---|
5124 | + expiration_policy = { |
---|
5125 | + 'enabled': True, |
---|
5126 | + 'mode': 'age', |
---|
5127 | + 'override_lease_duration': -1000, |
---|
5128 | + 'sharetypes': ('mutable', 'immutable'), |
---|
5129 | + } |
---|
5130 | + ss = No_ST_BLOCKS_StorageServer(basedir, "\x00" * 20, expiration_policy) |
---|
5131 | |
---|
5132 | # make it start sooner than usual. |
---|
5133 | lc = ss.lease_checker |
---|
5134 | hunk ./src/allmydata/test/test_storage.py 3877 |
---|
5135 | [immutable_si_0, immutable_si_1, mutable_si_2, mutable_si_3] = self.sis |
---|
5136 | first = min(self.sis) |
---|
5137 | first_b32 = base32.b2a(first) |
---|
5138 | - fn = os.path.join(ss.sharedir, storage_index_to_dir(first), "0") |
---|
5139 | - f = open(fn, "rb+") |
---|
5140 | + fp = ss.backend.get_shareset(first).sharehomedir.child("0") |
---|
5141 | + f = fp.open("rb+") |
---|
5142 | f.seek(0) |
---|
5143 | f.write("BAD MAGIC") |
---|
5144 | f.close() |
---|
5145 | hunk ./src/allmydata/test/test_storage.py 3890 |
---|
5146 | |
---|
5147 | # also create an empty bucket |
---|
5148 | empty_si = base32.b2a("\x04"*16) |
---|
5149 | - empty_bucket_dir = os.path.join(ss.sharedir, |
---|
5150 | - storage_index_to_dir(empty_si)) |
---|
5151 | - fileutil.make_dirs(empty_bucket_dir) |
---|
5152 | + empty_bucket_dir = ss.backend.get_shareset(empty_si).sharehomedir |
---|
5153 | + fileutil.fp_make_dirs(empty_bucket_dir) |
---|
5154 | |
---|
5155 | ss.setServiceParent(self.s) |
---|
5156 | |
---|
5157 | replace ./src/allmydata/test/test_storage.py [A-Za-z_0-9] MutableShareFile MutableDiskShare |
---|
5158 | hunk ./src/allmydata/test/test_system.py 10 |
---|
5159 | |
---|
5160 | import allmydata |
---|
5161 | from allmydata import uri |
---|
5162 | -from allmydata.storage.mutable import MutableShareFile |
---|
5163 | +from allmydata.storage.backends.disk.mutable import MutableShareFile |
---|
5164 | from allmydata.storage.server import si_a2b |
---|
5165 | from allmydata.immutable import offloaded, upload |
---|
5166 | from allmydata.immutable.literal import LiteralFileNode |
---|
5167 | replace ./src/allmydata/test/test_system.py [A-Za-z_0-9] MutableShareFile MutableDiskShare |
---|
5168 | hunk ./src/allmydata/test/test_upload.py 22 |
---|
5169 | from allmydata.util.happinessutil import servers_of_happiness, \ |
---|
5170 | shares_by_server, merge_servers |
---|
5171 | from allmydata.storage_client import StorageFarmBroker |
---|
5172 | -from allmydata.storage.server import storage_index_to_dir |
---|
5173 | |
---|
5174 | MiB = 1024*1024 |
---|
5175 | |
---|
5176 | hunk ./src/allmydata/test/test_upload.py 821 |
---|
5177 | |
---|
5178 | def _copy_share_to_server(self, share_number, server_number): |
---|
5179 | ss = self.g.servers_by_number[server_number] |
---|
5180 | - # Copy share i from the directory associated with the first |
---|
5181 | - # storage server to the directory associated with this one. |
---|
5182 | - assert self.g, "I tried to find a grid at self.g, but failed" |
---|
5183 | - assert self.shares, "I tried to find shares at self.shares, but failed" |
---|
5184 | - old_share_location = self.shares[share_number][2] |
---|
5185 | - new_share_location = os.path.join(ss.storedir, "shares") |
---|
5186 | - si = uri.from_string(self.uri).get_storage_index() |
---|
5187 | - new_share_location = os.path.join(new_share_location, |
---|
5188 | - storage_index_to_dir(si)) |
---|
5189 | - if not os.path.exists(new_share_location): |
---|
5190 | - os.makedirs(new_share_location) |
---|
5191 | - new_share_location = os.path.join(new_share_location, |
---|
5192 | - str(share_number)) |
---|
5193 | - if old_share_location != new_share_location: |
---|
5194 | - shutil.copy(old_share_location, new_share_location) |
---|
5195 | - shares = self.find_uri_shares(self.uri) |
---|
5196 | - # Make sure that the storage server has the share. |
---|
5197 | - self.failUnless((share_number, ss.my_nodeid, new_share_location) |
---|
5198 | - in shares) |
---|
5199 | + self.copy_share(self.shares[share_number], ss) |
---|
5200 | |
---|
5201 | def _setup_grid(self): |
---|
5202 | """ |
---|
5203 | hunk ./src/allmydata/test/test_web.py 12 |
---|
5204 | from twisted.python import failure, log |
---|
5205 | from nevow import rend |
---|
5206 | from allmydata import interfaces, uri, webish, dirnode |
---|
5207 | -from allmydata.storage.shares import get_share_file |
---|
5208 | from allmydata.storage_client import StorageFarmBroker |
---|
5209 | from allmydata.immutable import upload |
---|
5210 | from allmydata.immutable.downloader.status import DownloadStatus |
---|
5211 | hunk ./src/allmydata/test/test_web.py 4111 |
---|
5212 | good_shares = self.find_uri_shares(self.uris["good"]) |
---|
5213 | self.failUnlessReallyEqual(len(good_shares), 10) |
---|
5214 | sick_shares = self.find_uri_shares(self.uris["sick"]) |
---|
5215 | - os.unlink(sick_shares[0][2]) |
---|
5216 | + sick_shares[0][2].remove() |
---|
5217 | dead_shares = self.find_uri_shares(self.uris["dead"]) |
---|
5218 | for i in range(1, 10): |
---|
5219 | hunk ./src/allmydata/test/test_web.py 4114 |
---|
5220 | - os.unlink(dead_shares[i][2]) |
---|
5221 | + dead_shares[i][2].remove() |
---|
5222 | c_shares = self.find_uri_shares(self.uris["corrupt"]) |
---|
5223 | cso = CorruptShareOptions() |
---|
5224 | cso.stdout = StringIO() |
---|
5225 | hunk ./src/allmydata/test/test_web.py 4118 |
---|
5226 | - cso.parseOptions([c_shares[0][2]]) |
---|
5227 | + cso.parseOptions([c_shares[0][2].path]) |
---|
5228 | corrupt_share(cso) |
---|
5229 | d.addCallback(_clobber_shares) |
---|
5230 | |
---|
5231 | hunk ./src/allmydata/test/test_web.py 4253 |
---|
5232 | good_shares = self.find_uri_shares(self.uris["good"]) |
---|
5233 | self.failUnlessReallyEqual(len(good_shares), 10) |
---|
5234 | sick_shares = self.find_uri_shares(self.uris["sick"]) |
---|
5235 | - os.unlink(sick_shares[0][2]) |
---|
5236 | + sick_shares[0][2].remove() |
---|
5237 | dead_shares = self.find_uri_shares(self.uris["dead"]) |
---|
5238 | for i in range(1, 10): |
---|
5239 | hunk ./src/allmydata/test/test_web.py 4256 |
---|
5240 | - os.unlink(dead_shares[i][2]) |
---|
5241 | + dead_shares[i][2].remove() |
---|
5242 | c_shares = self.find_uri_shares(self.uris["corrupt"]) |
---|
5243 | cso = CorruptShareOptions() |
---|
5244 | cso.stdout = StringIO() |
---|
5245 | hunk ./src/allmydata/test/test_web.py 4260 |
---|
5246 | - cso.parseOptions([c_shares[0][2]]) |
---|
5247 | + cso.parseOptions([c_shares[0][2].path]) |
---|
5248 | corrupt_share(cso) |
---|
5249 | d.addCallback(_clobber_shares) |
---|
5250 | |
---|
5251 | hunk ./src/allmydata/test/test_web.py 4319 |
---|
5252 | |
---|
5253 | def _clobber_shares(ignored): |
---|
5254 | sick_shares = self.find_uri_shares(self.uris["sick"]) |
---|
5255 | - os.unlink(sick_shares[0][2]) |
---|
5256 | + sick_shares[0][2].remove() |
---|
5257 | d.addCallback(_clobber_shares) |
---|
5258 | |
---|
5259 | d.addCallback(self.CHECK, "sick", "t=check&repair=true&output=json") |
---|
5260 | hunk ./src/allmydata/test/test_web.py 4811 |
---|
5261 | good_shares = self.find_uri_shares(self.uris["good"]) |
---|
5262 | self.failUnlessReallyEqual(len(good_shares), 10) |
---|
5263 | sick_shares = self.find_uri_shares(self.uris["sick"]) |
---|
5264 | - os.unlink(sick_shares[0][2]) |
---|
5265 | + sick_shares[0][2].remove() |
---|
5266 | #dead_shares = self.find_uri_shares(self.uris["dead"]) |
---|
5267 | #for i in range(1, 10): |
---|
5268 | hunk ./src/allmydata/test/test_web.py 4814 |
---|
5269 | - # os.unlink(dead_shares[i][2]) |
---|
5270 | + # dead_shares[i][2].remove() |
---|
5271 | |
---|
5272 | #c_shares = self.find_uri_shares(self.uris["corrupt"]) |
---|
5273 | #cso = CorruptShareOptions() |
---|
5274 | hunk ./src/allmydata/test/test_web.py 4819 |
---|
5275 | #cso.stdout = StringIO() |
---|
5276 | - #cso.parseOptions([c_shares[0][2]]) |
---|
5277 | + #cso.parseOptions([c_shares[0][2].path]) |
---|
5278 | #corrupt_share(cso) |
---|
5279 | d.addCallback(_clobber_shares) |
---|
5280 | |
---|
5281 | hunk ./src/allmydata/test/test_web.py 4870 |
---|
5282 | d.addErrback(self.explain_web_error) |
---|
5283 | return d |
---|
5284 | |
---|
5285 | - def _count_leases(self, ignored, which): |
---|
5286 | - u = self.uris[which] |
---|
5287 | - shares = self.find_uri_shares(u) |
---|
5288 | - lease_counts = [] |
---|
5289 | - for shnum, serverid, fn in shares: |
---|
5290 | - sf = get_share_file(fn) |
---|
5291 | - num_leases = len(list(sf.get_leases())) |
---|
5292 | - lease_counts.append( (fn, num_leases) ) |
---|
5293 | - return lease_counts |
---|
5294 | - |
---|
5295 | - def _assert_leasecount(self, lease_counts, expected): |
---|
5296 | + def _assert_leasecount(self, ignored, which, expected): |
---|
5297 | + lease_counts = self.count_leases(self.uris[which]) |
---|
5298 | for (fn, num_leases) in lease_counts: |
---|
5299 | if num_leases != expected: |
---|
5300 | self.fail("expected %d leases, have %d, on %s" % |
---|
5301 | hunk ./src/allmydata/test/test_web.py 4903 |
---|
5302 | self.fileurls[which] = "uri/" + urllib.quote(self.uris[which]) |
---|
5303 | d.addCallback(_compute_fileurls) |
---|
5304 | |
---|
5305 | - d.addCallback(self._count_leases, "one") |
---|
5306 | - d.addCallback(self._assert_leasecount, 1) |
---|
5307 | - d.addCallback(self._count_leases, "two") |
---|
5308 | - d.addCallback(self._assert_leasecount, 1) |
---|
5309 | - d.addCallback(self._count_leases, "mutable") |
---|
5310 | - d.addCallback(self._assert_leasecount, 1) |
---|
5311 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
5312 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
5313 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
5314 | |
---|
5315 | d.addCallback(self.CHECK, "one", "t=check") # no add-lease |
---|
5316 | def _got_html_good(res): |
---|
5317 | hunk ./src/allmydata/test/test_web.py 4913 |
---|
5318 | self.failIf("Not Healthy" in res, res) |
---|
5319 | d.addCallback(_got_html_good) |
---|
5320 | |
---|
5321 | - d.addCallback(self._count_leases, "one") |
---|
5322 | - d.addCallback(self._assert_leasecount, 1) |
---|
5323 | - d.addCallback(self._count_leases, "two") |
---|
5324 | - d.addCallback(self._assert_leasecount, 1) |
---|
5325 | - d.addCallback(self._count_leases, "mutable") |
---|
5326 | - d.addCallback(self._assert_leasecount, 1) |
---|
5327 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
5328 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
5329 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
5330 | |
---|
5331 | # this CHECK uses the original client, which uses the same |
---|
5332 | # lease-secrets, so it will just renew the original lease |
---|
5333 | hunk ./src/allmydata/test/test_web.py 4922 |
---|
5334 | d.addCallback(self.CHECK, "one", "t=check&add-lease=true") |
---|
5335 | d.addCallback(_got_html_good) |
---|
5336 | |
---|
5337 | - d.addCallback(self._count_leases, "one") |
---|
5338 | - d.addCallback(self._assert_leasecount, 1) |
---|
5339 | - d.addCallback(self._count_leases, "two") |
---|
5340 | - d.addCallback(self._assert_leasecount, 1) |
---|
5341 | - d.addCallback(self._count_leases, "mutable") |
---|
5342 | - d.addCallback(self._assert_leasecount, 1) |
---|
5343 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
5344 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
5345 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
5346 | |
---|
5347 | # this CHECK uses an alternate client, which adds a second lease |
---|
5348 | d.addCallback(self.CHECK, "one", "t=check&add-lease=true", clientnum=1) |
---|
5349 | hunk ./src/allmydata/test/test_web.py 4930 |
---|
5350 | d.addCallback(_got_html_good) |
---|
5351 | |
---|
5352 | - d.addCallback(self._count_leases, "one") |
---|
5353 | - d.addCallback(self._assert_leasecount, 2) |
---|
5354 | - d.addCallback(self._count_leases, "two") |
---|
5355 | - d.addCallback(self._assert_leasecount, 1) |
---|
5356 | - d.addCallback(self._count_leases, "mutable") |
---|
5357 | - d.addCallback(self._assert_leasecount, 1) |
---|
5358 | + d.addCallback(self._assert_leasecount, "one", 2) |
---|
5359 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
5360 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
5361 | |
---|
5362 | d.addCallback(self.CHECK, "mutable", "t=check&add-lease=true") |
---|
5363 | d.addCallback(_got_html_good) |
---|
5364 | hunk ./src/allmydata/test/test_web.py 4937 |
---|
5365 | |
---|
5366 | - d.addCallback(self._count_leases, "one") |
---|
5367 | - d.addCallback(self._assert_leasecount, 2) |
---|
5368 | - d.addCallback(self._count_leases, "two") |
---|
5369 | - d.addCallback(self._assert_leasecount, 1) |
---|
5370 | - d.addCallback(self._count_leases, "mutable") |
---|
5371 | - d.addCallback(self._assert_leasecount, 1) |
---|
5372 | + d.addCallback(self._assert_leasecount, "one", 2) |
---|
5373 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
5374 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
5375 | |
---|
5376 | d.addCallback(self.CHECK, "mutable", "t=check&add-lease=true", |
---|
5377 | clientnum=1) |
---|
5378 | hunk ./src/allmydata/test/test_web.py 4945 |
---|
5379 | d.addCallback(_got_html_good) |
---|
5380 | |
---|
5381 | - d.addCallback(self._count_leases, "one") |
---|
5382 | - d.addCallback(self._assert_leasecount, 2) |
---|
5383 | - d.addCallback(self._count_leases, "two") |
---|
5384 | - d.addCallback(self._assert_leasecount, 1) |
---|
5385 | - d.addCallback(self._count_leases, "mutable") |
---|
5386 | - d.addCallback(self._assert_leasecount, 2) |
---|
5387 | + d.addCallback(self._assert_leasecount, "one", 2) |
---|
5388 | + d.addCallback(self._assert_leasecount, "two", 1) |
---|
5389 | + d.addCallback(self._assert_leasecount, "mutable", 2) |
---|
5390 | |
---|
5391 | d.addErrback(self.explain_web_error) |
---|
5392 | return d |
---|
5393 | hunk ./src/allmydata/test/test_web.py 4989 |
---|
5394 | self.failUnlessReallyEqual(len(units), 4+1) |
---|
5395 | d.addCallback(_done) |
---|
5396 | |
---|
5397 | - d.addCallback(self._count_leases, "root") |
---|
5398 | - d.addCallback(self._assert_leasecount, 1) |
---|
5399 | - d.addCallback(self._count_leases, "one") |
---|
5400 | - d.addCallback(self._assert_leasecount, 1) |
---|
5401 | - d.addCallback(self._count_leases, "mutable") |
---|
5402 | - d.addCallback(self._assert_leasecount, 1) |
---|
5403 | + d.addCallback(self._assert_leasecount, "root", 1) |
---|
5404 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
5405 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
5406 | |
---|
5407 | d.addCallback(self.CHECK, "root", "t=stream-deep-check&add-lease=true") |
---|
5408 | d.addCallback(_done) |
---|
5409 | hunk ./src/allmydata/test/test_web.py 4996 |
---|
5410 | |
---|
5411 | - d.addCallback(self._count_leases, "root") |
---|
5412 | - d.addCallback(self._assert_leasecount, 1) |
---|
5413 | - d.addCallback(self._count_leases, "one") |
---|
5414 | - d.addCallback(self._assert_leasecount, 1) |
---|
5415 | - d.addCallback(self._count_leases, "mutable") |
---|
5416 | - d.addCallback(self._assert_leasecount, 1) |
---|
5417 | + d.addCallback(self._assert_leasecount, "root", 1) |
---|
5418 | + d.addCallback(self._assert_leasecount, "one", 1) |
---|
5419 | + d.addCallback(self._assert_leasecount, "mutable", 1) |
---|
5420 | |
---|
5421 | d.addCallback(self.CHECK, "root", "t=stream-deep-check&add-lease=true", |
---|
5422 | clientnum=1) |
---|
5423 | hunk ./src/allmydata/test/test_web.py 5004 |
---|
5424 | d.addCallback(_done) |
---|
5425 | |
---|
5426 | - d.addCallback(self._count_leases, "root") |
---|
5427 | - d.addCallback(self._assert_leasecount, 2) |
---|
5428 | - d.addCallback(self._count_leases, "one") |
---|
5429 | - d.addCallback(self._assert_leasecount, 2) |
---|
5430 | - d.addCallback(self._count_leases, "mutable") |
---|
5431 | - d.addCallback(self._assert_leasecount, 2) |
---|
5432 | + d.addCallback(self._assert_leasecount, "root", 2) |
---|
5433 | + d.addCallback(self._assert_leasecount, "one", 2) |
---|
5434 | + d.addCallback(self._assert_leasecount, "mutable", 2) |
---|
5435 | |
---|
5436 | d.addErrback(self.explain_web_error) |
---|
5437 | return d |
---|
5438 | hunk ./src/allmydata/util/encodingutil.py 221 |
---|
5439 | def quote_path(path, quotemarks=True): |
---|
5440 | return quote_output("/".join(map(to_str, path)), quotemarks=quotemarks) |
---|
5441 | |
---|
5442 | +def quote_filepath(fp, quotemarks=True, encoding=None): |
---|
5443 | + path = fp.path |
---|
5444 | + if isinstance(path, str): |
---|
5445 | + try: |
---|
5446 | + path = path.decode(filesystem_encoding) |
---|
5447 | + except UnicodeDecodeError: |
---|
5448 | + return 'b"%s"' % (ESCAPABLE_8BIT.sub(_str_escape, path),) |
---|
5449 | + |
---|
5450 | + return quote_output(path, quotemarks=quotemarks, encoding=encoding) |
---|
5451 | + |
---|
5452 | |
---|
5453 | def unicode_platform(): |
---|
5454 | """ |
---|
5455 | hunk ./src/allmydata/util/fileutil.py 5 |
---|
5456 | Futz with files like a pro. |
---|
5457 | """ |
---|
5458 | |
---|
5459 | -import sys, exceptions, os, stat, tempfile, time, binascii |
---|
5460 | +import errno, sys, exceptions, os, stat, tempfile, time, binascii |
---|
5461 | + |
---|
5462 | +from allmydata.util.assertutil import precondition |
---|
5463 | |
---|
5464 | from twisted.python import log |
---|
5465 | hunk ./src/allmydata/util/fileutil.py 10 |
---|
5466 | +from twisted.python.filepath import FilePath, UnlistableError |
---|
5467 | |
---|
5468 | from pycryptopp.cipher.aes import AES |
---|
5469 | |
---|
5470 | hunk ./src/allmydata/util/fileutil.py 189 |
---|
5471 | raise tx |
---|
5472 | raise exceptions.IOError, "unknown error prevented creation of directory, or deleted the directory immediately after creation: %s" % dirname # careful not to construct an IOError with a 2-tuple, as that has a special meaning... |
---|
5473 | |
---|
5474 | -def rm_dir(dirname): |
---|
5475 | +def fp_make_dirs(dirfp): |
---|
5476 | + """ |
---|
5477 | + An idempotent version of FilePath.makedirs(). If the dir already |
---|
5478 | + exists, do nothing and return without raising an exception. If this |
---|
5479 | + call creates the dir, return without raising an exception. If there is |
---|
5480 | + an error that prevents creation or if the directory gets deleted after |
---|
5481 | + fp_make_dirs() creates it and before fp_make_dirs() checks that it |
---|
5482 | + exists, raise an exception. |
---|
5483 | + """ |
---|
5484 | + log.msg( "xxx 0 %s" % (dirfp,)) |
---|
5485 | + tx = None |
---|
5486 | + try: |
---|
5487 | + dirfp.makedirs() |
---|
5488 | + except OSError, x: |
---|
5489 | + tx = x |
---|
5490 | + |
---|
5491 | + if not dirfp.isdir(): |
---|
5492 | + if tx: |
---|
5493 | + raise tx |
---|
5494 | + raise exceptions.IOError, "unknown error prevented creation of directory, or deleted the directory immediately after creation: %s" % dirfp # careful not to construct an IOError with a 2-tuple, as that has a special meaning... |
---|
5495 | + |
---|
5496 | +def fp_rmdir_if_empty(dirfp): |
---|
5497 | + """ Remove the directory if it is empty. """ |
---|
5498 | + try: |
---|
5499 | + os.rmdir(dirfp.path) |
---|
5500 | + except OSError, e: |
---|
5501 | + if e.errno != errno.ENOTEMPTY: |
---|
5502 | + raise |
---|
5503 | + else: |
---|
5504 | + dirfp.changed() |
---|
5505 | + |
---|
5506 | +def rmtree(dirname): |
---|
5507 | """ |
---|
5508 | A threadsafe and idempotent version of shutil.rmtree(). If the dir is |
---|
5509 | already gone, do nothing and return without raising an exception. If this |
---|
5510 | hunk ./src/allmydata/util/fileutil.py 239 |
---|
5511 | else: |
---|
5512 | remove(fullname) |
---|
5513 | os.rmdir(dirname) |
---|
5514 | - except Exception, le: |
---|
5515 | - # Ignore "No such file or directory" |
---|
5516 | - if (not isinstance(le, OSError)) or le.args[0] != 2: |
---|
5517 | + except EnvironmentError, le: |
---|
5518 | + # Ignore "No such file or directory", collect any other exception. |
---|
5519 | + if (le.args[0] != 2 and le.args[0] != 3) or (le.args[0] != errno.ENOENT): |
---|
5520 | excs.append(le) |
---|
5521 | hunk ./src/allmydata/util/fileutil.py 243 |
---|
5522 | + except Exception, le: |
---|
5523 | + excs.append(le) |
---|
5524 | |
---|
5525 | # Okay, now we've recursively removed everything, ignoring any "No |
---|
5526 | # such file or directory" errors, and collecting any other errors. |
---|
5527 | hunk ./src/allmydata/util/fileutil.py 256 |
---|
5528 | raise OSError, "Failed to remove dir for unknown reason." |
---|
5529 | raise OSError, excs |
---|
5530 | |
---|
5531 | +def fp_remove(fp): |
---|
5532 | + """ |
---|
5533 | + An idempotent version of shutil.rmtree(). If the file/dir is already |
---|
5534 | + gone, do nothing and return without raising an exception. If this call |
---|
5535 | + removes the file/dir, return without raising an exception. If there is |
---|
5536 | + an error that prevents removal, or if a file or directory at the same |
---|
5537 | + path gets created again by someone else after this deletes it and before |
---|
5538 | + this checks that it is gone, raise an exception. |
---|
5539 | + """ |
---|
5540 | + try: |
---|
5541 | + fp.remove() |
---|
5542 | + except UnlistableError, e: |
---|
5543 | + if e.originalException.errno != errno.ENOENT: |
---|
5544 | + raise |
---|
5545 | + except OSError, e: |
---|
5546 | + if e.errno != errno.ENOENT: |
---|
5547 | + raise |
---|
5548 | + |
---|
5549 | +def rm_dir(dirname): |
---|
5550 | + # Renamed to be like shutil.rmtree and unlike rmdir. |
---|
5551 | + return rmtree(dirname) |
---|
5552 | |
---|
5553 | def remove_if_possible(f): |
---|
5554 | try: |
---|
5555 | hunk ./src/allmydata/util/fileutil.py 387 |
---|
5556 | import traceback |
---|
5557 | traceback.print_exc() |
---|
5558 | |
---|
5559 | -def get_disk_stats(whichdir, reserved_space=0): |
---|
5560 | +def get_disk_stats(whichdirfp, reserved_space=0): |
---|
5561 | """Return disk statistics for the storage disk, in the form of a dict |
---|
5562 | with the following fields. |
---|
5563 | total: total bytes on disk |
---|
5564 | hunk ./src/allmydata/util/fileutil.py 408 |
---|
5565 | you can pass how many bytes you would like to leave unused on this |
---|
5566 | filesystem as reserved_space. |
---|
5567 | """ |
---|
5568 | + precondition(isinstance(whichdirfp, FilePath), whichdirfp) |
---|
5569 | |
---|
5570 | if have_GetDiskFreeSpaceExW: |
---|
5571 | # If this is a Windows system and GetDiskFreeSpaceExW is available, use it. |
---|
5572 | hunk ./src/allmydata/util/fileutil.py 419 |
---|
5573 | n_free_for_nonroot = c_ulonglong(0) |
---|
5574 | n_total = c_ulonglong(0) |
---|
5575 | n_free_for_root = c_ulonglong(0) |
---|
5576 | - retval = GetDiskFreeSpaceExW(whichdir, byref(n_free_for_nonroot), |
---|
5577 | + retval = GetDiskFreeSpaceExW(whichdirfp.path, byref(n_free_for_nonroot), |
---|
5578 | byref(n_total), |
---|
5579 | byref(n_free_for_root)) |
---|
5580 | if retval == 0: |
---|
5581 | hunk ./src/allmydata/util/fileutil.py 424 |
---|
5582 | raise OSError("Windows error %d attempting to get disk statistics for %r" |
---|
5583 | - % (GetLastError(), whichdir)) |
---|
5584 | + % (GetLastError(), whichdirfp.path)) |
---|
5585 | free_for_nonroot = n_free_for_nonroot.value |
---|
5586 | total = n_total.value |
---|
5587 | free_for_root = n_free_for_root.value |
---|
5588 | hunk ./src/allmydata/util/fileutil.py 433 |
---|
5589 | # <http://docs.python.org/library/os.html#os.statvfs> |
---|
5590 | # <http://opengroup.org/onlinepubs/7990989799/xsh/fstatvfs.html> |
---|
5591 | # <http://opengroup.org/onlinepubs/7990989799/xsh/sysstatvfs.h.html> |
---|
5592 | - s = os.statvfs(whichdir) |
---|
5593 | + s = os.statvfs(whichdirfp.path) |
---|
5594 | |
---|
5595 | # on my mac laptop: |
---|
5596 | # statvfs(2) is a wrapper around statfs(2). |
---|
5597 | hunk ./src/allmydata/util/fileutil.py 460 |
---|
5598 | 'avail': avail, |
---|
5599 | } |
---|
5600 | |
---|
5601 | -def get_available_space(whichdir, reserved_space): |
---|
5602 | +def get_available_space(whichdirfp, reserved_space): |
---|
5603 | """Returns available space for share storage in bytes, or None if no |
---|
5604 | API to get this information is available. |
---|
5605 | |
---|
5606 | hunk ./src/allmydata/util/fileutil.py 472 |
---|
5607 | you can pass how many bytes you would like to leave unused on this |
---|
5608 | filesystem as reserved_space. |
---|
5609 | """ |
---|
5610 | + precondition(isinstance(whichdirfp, FilePath), whichdirfp) |
---|
5611 | try: |
---|
5612 | hunk ./src/allmydata/util/fileutil.py 474 |
---|
5613 | - return get_disk_stats(whichdir, reserved_space)['avail'] |
---|
5614 | + return get_disk_stats(whichdirfp, reserved_space)['avail'] |
---|
5615 | except AttributeError: |
---|
5616 | return None |
---|
5617 | hunk ./src/allmydata/util/fileutil.py 477 |
---|
5618 | - except EnvironmentError: |
---|
5619 | - log.msg("OS call to get disk statistics failed") |
---|
5620 | + |
---|
5621 | + |
---|
5622 | +def get_used_space(fp): |
---|
5623 | + if fp is None: |
---|
5624 | return 0 |
---|
5625 | hunk ./src/allmydata/util/fileutil.py 482 |
---|
5626 | + try: |
---|
5627 | + s = os.stat(fp.path) |
---|
5628 | + except EnvironmentError: |
---|
5629 | + if not fp.exists(): |
---|
5630 | + return 0 |
---|
5631 | + raise |
---|
5632 | + else: |
---|
5633 | + # POSIX defines st_blocks (originally a BSDism): |
---|
5634 | + # <http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/stat.h.html> |
---|
5635 | + # but does not require stat() to give it a "meaningful value" |
---|
5636 | + # <http://pubs.opengroup.org/onlinepubs/009695399/functions/stat.html> |
---|
5637 | + # and says: |
---|
5638 | + # "The unit for the st_blocks member of the stat structure is not defined |
---|
5639 | + # within IEEE Std 1003.1-2001. In some implementations it is 512 bytes. |
---|
5640 | + # It may differ on a file system basis. There is no correlation between |
---|
5641 | + # values of the st_blocks and st_blksize, and the f_bsize (from <sys/statvfs.h>) |
---|
5642 | + # structure members." |
---|
5643 | + # |
---|
5644 | + # The Linux docs define it as "the number of blocks allocated to the file, |
---|
5645 | + # [in] 512-byte units." It is also defined that way on MacOS X. Python does |
---|
5646 | + # not set the attribute on Windows. |
---|
5647 | + # |
---|
5648 | + # We consider platforms that define st_blocks but give it a wrong value, or |
---|
5649 | + # measure it in a unit other than 512 bytes, to be broken. See also |
---|
5650 | + # <http://bugs.python.org/issue12350>. |
---|
5651 | + |
---|
5652 | + if hasattr(s, 'st_blocks'): |
---|
5653 | + return s.st_blocks * 512 |
---|
5654 | + else: |
---|
5655 | + return s.st_size |
---|
5656 | } |
---|
5657 | |
---|
5658 | Context: |
---|
5659 | |
---|
5660 | [misc/coding_tools/check_interfaces.py: report all violations rather than only one for a given class, by including a forked version of verifyClass. refs #1474 |
---|
5661 | david-sarah@jacaranda.org**20110916223450 |
---|
5662 | Ignore-this: 927efeecf4d12588316826a4b3479aa9 |
---|
5663 | ] |
---|
5664 | [misc/coding_tools/check_interfaces.py: use os.walk instead of FilePath, since this script shouldn't really depend on Twisted. refs #1474 |
---|
5665 | david-sarah@jacaranda.org**20110916212633 |
---|
5666 | Ignore-this: 46eeb4236b34375227dac71ef53f5428 |
---|
5667 | ] |
---|
5668 | [misc/coding_tools/check-interfaces.py: reduce false-positives by adding Dummy* to the set of excluded classnames, and bench-* to the set of excluded basenames. refs #1474 |
---|
5669 | david-sarah@jacaranda.org**20110916212624 |
---|
5670 | Ignore-this: 4e78f6e6fe6c0e9be9df826a0e206804 |
---|
5671 | ] |
---|
5672 | [Make platform-detection code tolerate linux-3.0, patch by zooko. |
---|
5673 | Brian Warner <warner@lothar.com>**20110915202620 |
---|
5674 | Ignore-this: af63cf9177ae531984dea7a1cad03762 |
---|
5675 | |
---|
5676 | Otherwise address-autodetection can't find ifconfig. refs #1536 |
---|
5677 | ] |
---|
5678 | [test_web.py: fix a bug in _count_leases that was causing us to check only the lease count of one share file, not of all share files as intended. |
---|
5679 | david-sarah@jacaranda.org**20110915185126 |
---|
5680 | Ignore-this: d96632bc48d770b9b577cda1bbd8ff94 |
---|
5681 | ] |
---|
5682 | [Add a script 'misc/coding_tools/check-interfaces.py' that checks whether zope interfaces are enforced. Also add 'check-interfaces', 'version-and-path', and 'code-checks' targets to the Makefile. fixes #1474 |
---|
5683 | david-sarah@jacaranda.org**20110915161532 |
---|
5684 | Ignore-this: 32d9bdc5bc4a86d21e927724560ad4b4 |
---|
5685 | ] |
---|
5686 | [interfaces.py: 'which -> that' grammar cleanup. |
---|
5687 | david-sarah@jacaranda.org**20110825003217 |
---|
5688 | Ignore-this: a3e15f3676de1b346ad78aabdfb8cac6 |
---|
5689 | ] |
---|
5690 | [Fix interfaces related to MDMF. refs #393 |
---|
5691 | david-sarah@jacaranda.org**20110825013046 |
---|
5692 | Ignore-this: ee510c7261f8b328f0db218d71208ca3 |
---|
5693 | ] |
---|
5694 | [tests: bump up the timeout in this test that fails on FreeStorm's CentOS in order to see if it is just very slow |
---|
5695 | zooko@zooko.com**20110913024255 |
---|
5696 | Ignore-this: 6a86d691e878cec583722faad06fb8e4 |
---|
5697 | ] |
---|
5698 | [interfaces: document that the 'fills-holes-with-zero-bytes' key should be used to detect whether a storage server has that behavior. refs #1528 |
---|
5699 | david-sarah@jacaranda.org**20110913002843 |
---|
5700 | Ignore-this: 1a00a6029d40f6792af48c5578c1fd69 |
---|
5701 | ] |
---|
5702 | [CREDITS: more CREDITS for Kevan and David-Sarah |
---|
5703 | zooko@zooko.com**20110912223357 |
---|
5704 | Ignore-this: 4ea8f0d6f2918171d2f5359c25ad1ada |
---|
5705 | ] |
---|
5706 | [merge NEWS about the mutable file bounds fixes with NEWS about work-in-progress |
---|
5707 | zooko@zooko.com**20110913205521 |
---|
5708 | Ignore-this: 4289a4225f848d6ae6860dd39bc92fa8 |
---|
5709 | ] |
---|
5710 | [doc: add NEWS item about fixes to potential palimpsest issues in mutable files |
---|
5711 | zooko@zooko.com**20110912223329 |
---|
5712 | Ignore-this: 9d63c95ddf95c7d5453c94a1ba4d406a |
---|
5713 | ref. #1528 |
---|
5714 | ] |
---|
5715 | [merge the NEWS about the security fix (#1528) with the work-in-progress NEWS |
---|
5716 | zooko@zooko.com**20110913205153 |
---|
5717 | Ignore-this: 88e88a2ad140238c62010cf7c66953fc |
---|
5718 | ] |
---|
5719 | [doc: add NEWS entry about the issue which allows unauthorized deletion of shares |
---|
5720 | zooko@zooko.com**20110912223246 |
---|
5721 | Ignore-this: 77e06d09103d2ef6bb51ea3e5d6e80b0 |
---|
5722 | ref. #1528 |
---|
5723 | ] |
---|
5724 | [doc: add entry in known_issues.rst about the issue which allows unauthorized deletion of shares |
---|
5725 | zooko@zooko.com**20110912223135 |
---|
5726 | Ignore-this: b26c6ea96b6c8740b93da1f602b5a4cd |
---|
5727 | ref. #1528 |
---|
5728 | ] |
---|
5729 | [storage: more paranoid handling of bounds and palimpsests in mutable share files |
---|
5730 | zooko@zooko.com**20110912222655 |
---|
5731 | Ignore-this: a20782fa423779ee851ea086901e1507 |
---|
5732 | * storage server ignores requests to extend shares by sending a new_length |
---|
5733 | * storage server fills exposed holes (created by sending a write vector whose offset begins after the end of the current data) with 0 to avoid "palimpsest" exposure of previous contents |
---|
5734 | * storage server zeroes out lease info at the old location when moving it to a new location |
---|
5735 | ref. #1528 |
---|
5736 | ] |
---|
5737 | [storage: test that the storage server ignores requests to extend shares by sending a new_length, and that the storage server fills exposed holes with 0 to avoid "palimpsest" exposure of previous contents |
---|
5738 | zooko@zooko.com**20110912222554 |
---|
5739 | Ignore-this: 61ebd7b11250963efdf5b1734a35271 |
---|
5740 | ref. #1528 |
---|
5741 | ] |
---|
5742 | [immutable: prevent clients from reading past the end of share data, which would allow them to learn the cancellation secret |
---|
5743 | zooko@zooko.com**20110912222458 |
---|
5744 | Ignore-this: da1ebd31433ea052087b75b2e3480c25 |
---|
5745 | Declare explicitly that we prevent this problem in the server's version dict. |
---|
5746 | fixes #1528 (there are two patches that are each a sufficient fix to #1528 and this is one of them) |
---|
5747 | ] |
---|
5748 | [storage: remove the storage server's "remote_cancel_lease" function |
---|
5749 | zooko@zooko.com**20110912222331 |
---|
5750 | Ignore-this: 1c32dee50e0981408576daffad648c50 |
---|
5751 | We're removing this function because it is currently unused, because it is dangerous, and because the bug described in #1528 leaks the cancellation secret, which allows anyone who knows a file's storage index to abuse this function to delete shares of that file. |
---|
5752 | fixes #1528 (there are two patches that are each a sufficient fix to #1528 and this is one of them) |
---|
5753 | ] |
---|
5754 | [storage: test that the storage server does *not* have a "remote_cancel_lease" function |
---|
5755 | zooko@zooko.com**20110912222324 |
---|
5756 | Ignore-this: 21c652009704652d35f34651f98dd403 |
---|
5757 | We're removing this function because it is currently unused, because it is dangerous, and because the bug described in #1528 leaks the cancellation secret, which allows anyone who knows a file's storage index to abuse this function to delete shares of that file. |
---|
5758 | ref. #1528 |
---|
5759 | ] |
---|
5760 | [immutable: test whether the server allows clients to read past the end of share data, which would allow them to learn the cancellation secret |
---|
5761 | zooko@zooko.com**20110912221201 |
---|
5762 | Ignore-this: 376e47b346c713d37096531491176349 |
---|
5763 | Also test whether the server explicitly declares that it prevents this problem. |
---|
5764 | ref #1528 |
---|
5765 | ] |
---|
5766 | [Retrieve._activate_enough_peers: rewrite Verify logic |
---|
5767 | Brian Warner <warner@lothar.com>**20110909181150 |
---|
5768 | Ignore-this: 9367c11e1eacbf025f75ce034030d717 |
---|
5769 | ] |
---|
5770 | [Retrieve: implement/test stopProducing |
---|
5771 | Brian Warner <warner@lothar.com>**20110909181150 |
---|
5772 | Ignore-this: 47b2c3df7dc69835e0a066ca12e3c178 |
---|
5773 | ] |
---|
5774 | [move DownloadStopped from download.common to interfaces |
---|
5775 | Brian Warner <warner@lothar.com>**20110909181150 |
---|
5776 | Ignore-this: 8572acd3bb16e50341dbed8eb1d90a50 |
---|
5777 | ] |
---|
5778 | [retrieve.py: remove vestigal self._validated_readers |
---|
5779 | Brian Warner <warner@lothar.com>**20110909181150 |
---|
5780 | Ignore-this: faab2ec14e314a53a2ffb714de626e2d |
---|
5781 | ] |
---|
5782 | [Retrieve: rewrite flow-control: use a top-level loop() to catch all errors |
---|
5783 | Brian Warner <warner@lothar.com>**20110909181150 |
---|
5784 | Ignore-this: e162d2cd53b3d3144fc6bc757e2c7714 |
---|
5785 | |
---|
5786 | This ought to close the potential for dropped errors and hanging downloads. |
---|
5787 | Verify needs to be examined, I may have broken it, although all tests pass. |
---|
5788 | ] |
---|
5789 | [Retrieve: merge _validate_active_prefixes into _add_active_peers |
---|
5790 | Brian Warner <warner@lothar.com>**20110909181150 |
---|
5791 | Ignore-this: d3ead31e17e69394ae7058eeb5beaf4c |
---|
5792 | ] |
---|
5793 | [Retrieve: remove the initial prefix-is-still-good check |
---|
5794 | Brian Warner <warner@lothar.com>**20110909181150 |
---|
5795 | Ignore-this: da66ee51c894eaa4e862e2dffb458acc |
---|
5796 | |
---|
5797 | This check needs to be done with each fetch from the storage server, to |
---|
5798 | detect when someone has changed the share (i.e. our servermap goes stale). |
---|
5799 | Doing it just once at the beginning of retrieve isn't enough: a write might |
---|
5800 | occur after the first segment but before the second, etc. |
---|
5801 | |
---|
5802 | _try_to_validate_prefix() was not removed: it will be used by the future |
---|
5803 | check-with-each-fetch code. |
---|
5804 | |
---|
5805 | test_mutable.Roundtrip.test_corrupt_all_seqnum_late was disabled, since it |
---|
5806 | fails until this check is brought back. (the corruption it applies only |
---|
5807 | touches the prefix, not the block data, so the check-less retrieve actually |
---|
5808 | tolerates it). Don't forget to re-enable it once the check is brought back. |
---|
5809 | ] |
---|
5810 | [MDMFSlotReadProxy: remove the queue |
---|
5811 | Brian Warner <warner@lothar.com>**20110909181150 |
---|
5812 | Ignore-this: 96673cb8dda7a87a423de2f4897d66d2 |
---|
5813 | |
---|
5814 | This is a neat trick to reduce Foolscap overhead, but the need for an |
---|
5815 | explicit flush() complicates the Retrieve path and makes it prone to |
---|
5816 | lost-progress bugs. |
---|
5817 | |
---|
5818 | Also change test_mutable.FakeStorageServer to tolerate multiple reads of the |
---|
5819 | same share in a row, a limitation exposed by turning off the queue. |
---|
5820 | ] |
---|
5821 | [rearrange Retrieve: first step, shouldn't change order of execution |
---|
5822 | Brian Warner <warner@lothar.com>**20110909181149 |
---|
5823 | Ignore-this: e3006368bfd2802b82ea45c52409e8d6 |
---|
5824 | ] |
---|
5825 | [CLI: test_cli.py -- remove an unnecessary call in test_mkdir_mutable_type. refs #1527 |
---|
5826 | david-sarah@jacaranda.org**20110906183730 |
---|
5827 | Ignore-this: 122e2ffbee84861c32eda766a57759cf |
---|
5828 | ] |
---|
5829 | [CLI: improve test for 'tahoe mkdir --mutable-type='. refs #1527 |
---|
5830 | david-sarah@jacaranda.org**20110906183020 |
---|
5831 | Ignore-this: f1d4598e6c536f0a2b15050b3bc0ef9d |
---|
5832 | ] |
---|
5833 | [CLI: make the --mutable-type option value for 'tahoe put' and 'tahoe mkdir' case-insensitive, and change --help for these commands accordingly. fixes #1527 |
---|
5834 | david-sarah@jacaranda.org**20110905020922 |
---|
5835 | Ignore-this: 75a6df0a2df9c467d8c010579e9a024e |
---|
5836 | ] |
---|
5837 | [cli: make --mutable-type imply --mutable in 'tahoe put' |
---|
5838 | Kevan Carstensen <kevan@isnotajoke.com>**20110903190920 |
---|
5839 | Ignore-this: 23336d3c43b2a9554e40c2a11c675e93 |
---|
5840 | ] |
---|
5841 | [SFTP: add a comment about a subtle interaction between OverwriteableFileConsumer and GeneralSFTPFile, and test the case it is commenting on. |
---|
5842 | david-sarah@jacaranda.org**20110903222304 |
---|
5843 | Ignore-this: 980c61d4dd0119337f1463a69aeebaf0 |
---|
5844 | ] |
---|
5845 | [improve the storage/mutable.py asserts even more |
---|
5846 | warner@lothar.com**20110901160543 |
---|
5847 | Ignore-this: 5b2b13c49bc4034f96e6e3aaaa9a9946 |
---|
5848 | ] |
---|
5849 | [storage/mutable.py: special characters in struct.foo arguments indicate standard as opposed to native sizes, we should be using these characters in these asserts |
---|
5850 | wilcoxjg@gmail.com**20110901084144 |
---|
5851 | Ignore-this: 28ace2b2678642e4d7269ddab8c67f30 |
---|
5852 | ] |
---|
5853 | [docs/write_coordination.rst: fix formatting and add more specific warning about access via sshfs. |
---|
5854 | david-sarah@jacaranda.org**20110831232148 |
---|
5855 | Ignore-this: cd9c851d3eb4e0a1e088f337c291586c |
---|
5856 | ] |
---|
5857 | [test_mutable.Version: consolidate some tests, reduce runtime from 19s to 15s |
---|
5858 | warner@lothar.com**20110831050451 |
---|
5859 | Ignore-this: 64815284d9e536f8f3798b5f44cf580c |
---|
5860 | ] |
---|
5861 | [mutable/retrieve: handle the case where self._read_length is 0. |
---|
5862 | Kevan Carstensen <kevan@isnotajoke.com>**20110830210141 |
---|
5863 | Ignore-this: fceafbe485851ca53f2774e5a4fd8d30 |
---|
5864 | |
---|
5865 | Note that the downloader will still fetch a segment for a zero-length |
---|
5866 | read, which is wasteful. Fixing that isn't specifically required to fix |
---|
5867 | #1512, but it should probably be fixed before 1.9. |
---|
5868 | ] |
---|
5869 | [NEWS: added summary of all changes since 1.8.2. Needs editing. |
---|
5870 | Brian Warner <warner@lothar.com>**20110830163205 |
---|
5871 | Ignore-this: 273899b37a899fc6919b74572454b8b2 |
---|
5872 | ] |
---|
5873 | [test_mutable.Update: only upload the files needed for each test. refs #1500 |
---|
5874 | Brian Warner <warner@lothar.com>**20110829072717 |
---|
5875 | Ignore-this: 4d2ab4c7523af9054af7ecca9c3d9dc7 |
---|
5876 | |
---|
5877 | This first step shaves 15% off the runtime: from 139s to 119s on my laptop. |
---|
5878 | It also fixes a couple of places where a Deferred was being dropped, which |
---|
5879 | would cause two tests to run in parallel and also confuse error reporting. |
---|
5880 | ] |
---|
5881 | [Let Uploader retain History instead of passing it into upload(). Fixes #1079. |
---|
5882 | Brian Warner <warner@lothar.com>**20110829063246 |
---|
5883 | Ignore-this: 3902c58ec12bd4b2d876806248e19f17 |
---|
5884 | |
---|
5885 | This consistently records all immutable uploads in the Recent Uploads And |
---|
5886 | Downloads page, regardless of code path. Previously, certain webapi upload |
---|
5887 | operations (like PUT /uri/$DIRCAP/newchildname) failed to pass the History |
---|
5888 | object and were left out. |
---|
5889 | ] |
---|
5890 | [Fix mutable publish/retrieve timing status displays. Fixes #1505. |
---|
5891 | Brian Warner <warner@lothar.com>**20110828232221 |
---|
5892 | Ignore-this: 4080ce065cf481b2180fd711c9772dd6 |
---|
5893 | |
---|
5894 | publish: |
---|
5895 | * encrypt and encode times are cumulative, not just current-segment |
---|
5896 | |
---|
5897 | retrieve: |
---|
5898 | * same for decrypt and decode times |
---|
5899 | * update "current status" to include segment number |
---|
5900 | * set status to Finished/Failed when download is complete |
---|
5901 | * set progress to 1.0 when complete |
---|
5902 | |
---|
5903 | More improvements to consider: |
---|
5904 | * progress is currently 0% or 100%: should calculate how many segments are |
---|
5905 | involved (remembering retrieve can be less than the whole file) and set it |
---|
5906 | to a fraction |
---|
5907 | * "fetch" time is fuzzy: what we want is to know how much of the delay is not |
---|
5908 | our own fault, but since we do decode/decrypt work while waiting for more |
---|
5909 | shares, it's not straightforward |
---|
5910 | ] |
---|
5911 | [Teach 'tahoe debug catalog-shares about MDMF. Closes #1507. |
---|
5912 | Brian Warner <warner@lothar.com>**20110828080931 |
---|
5913 | Ignore-this: 56ef2951db1a648353d7daac6a04c7d1 |
---|
5914 | ] |
---|
5915 | [debug.py: remove some dead comments |
---|
5916 | Brian Warner <warner@lothar.com>**20110828074556 |
---|
5917 | Ignore-this: 40e74040dd4d14fd2f4e4baaae506b31 |
---|
5918 | ] |
---|
5919 | [hush pyflakes |
---|
5920 | Brian Warner <warner@lothar.com>**20110828074254 |
---|
5921 | Ignore-this: bef9d537a969fa82fe4decc4ba2acb09 |
---|
5922 | ] |
---|
5923 | [MutableFileNode.set_downloader_hints: never depend upon order of dict.values() |
---|
5924 | Brian Warner <warner@lothar.com>**20110828074103 |
---|
5925 | Ignore-this: caaf1aa518dbdde4d797b7f335230faa |
---|
5926 | |
---|
5927 | The old code was calculating the "extension parameters" (a list) from the |
---|
5928 | downloader hints (a dictionary) with hints.values(), which is not stable, and |
---|
5929 | would result in corrupted filecaps (with the 'k' and 'segsize' hints |
---|
5930 | occasionally swapped). The new code always uses [k,segsize]. |
---|
5931 | ] |
---|
5932 | [layout.py: fix MDMF share layout documentation |
---|
5933 | Brian Warner <warner@lothar.com>**20110828073921 |
---|
5934 | Ignore-this: 3f13366fed75b5e31b51ae895450a225 |
---|
5935 | ] |
---|
5936 | [teach 'tahoe debug dump-share' about MDMF and offsets. refs #1507 |
---|
5937 | Brian Warner <warner@lothar.com>**20110828073834 |
---|
5938 | Ignore-this: 3a9d2ef9c47a72bf1506ba41199a1dea |
---|
5939 | ] |
---|
5940 | [test_mutable.Version.test_debug: use splitlines() to fix buildslaves |
---|
5941 | Brian Warner <warner@lothar.com>**20110828064728 |
---|
5942 | Ignore-this: c7f6245426fc80b9d1ae901d5218246a |
---|
5943 | |
---|
5944 | Any slave running in a directory with spaces in the name was miscounting |
---|
5945 | shares, causing the test to fail. |
---|
5946 | ] |
---|
5947 | [test_mutable.Version: exercise 'tahoe debug find-shares' on MDMF. refs #1507 |
---|
5948 | Brian Warner <warner@lothar.com>**20110828005542 |
---|
5949 | Ignore-this: cb20bea1c28bfa50a72317d70e109672 |
---|
5950 | |
---|
5951 | Also changes NoNetworkGrid to put shares in storage/shares/ . |
---|
5952 | ] |
---|
5953 | [test_mutable.py: oops, missed a .todo |
---|
5954 | Brian Warner <warner@lothar.com>**20110828002118 |
---|
5955 | Ignore-this: fda09ae86481352b7a627c278d2a3940 |
---|
5956 | ] |
---|
5957 | [test_mutable: merge davidsarah's patch with my Version refactorings |
---|
5958 | warner@lothar.com**20110827235707 |
---|
5959 | Ignore-this: b5aaf481c90d99e33827273b5d118fd0 |
---|
5960 | ] |
---|
5961 | [Make the immutable/read-only constraint checking for MDMF URIs identical to that for SSK URIs. refs #393 |
---|
5962 | david-sarah@jacaranda.org**20110823012720 |
---|
5963 | Ignore-this: e1f59d7ff2007c81dbef2aeb14abd721 |
---|
5964 | ] |
---|
5965 | [Additional tests for MDMF URIs and for zero-length files. refs #393 |
---|
5966 | david-sarah@jacaranda.org**20110823011532 |
---|
5967 | Ignore-this: a7cc0c09d1d2d72413f9cd227c47a9d5 |
---|
5968 | ] |
---|
5969 | [Additional tests for zero-length partial reads and updates to mutable versions. refs #393 |
---|
5970 | david-sarah@jacaranda.org**20110822014111 |
---|
5971 | Ignore-this: 5fc6f4d06e11910124e4a277ec8a43ea |
---|
5972 | ] |
---|
5973 | [test_mutable.Version: factor out some expensive uploads, save 25% runtime |
---|
5974 | Brian Warner <warner@lothar.com>**20110827232737 |
---|
5975 | Ignore-this: ea37383eb85ea0894b254fe4dfb45544 |
---|
5976 | ] |
---|
5977 | [SDMF: update filenode with correct k/N after Retrieve. Fixes #1510. |
---|
5978 | Brian Warner <warner@lothar.com>**20110827225031 |
---|
5979 | Ignore-this: b50ae6e1045818c400079f118b4ef48 |
---|
5980 | |
---|
5981 | Without this, we get a regression when modifying a mutable file that was |
---|
5982 | created with more shares (larger N) than our current tahoe.cfg . The |
---|
5983 | modification attempt creates new versions of the (0,1,..,newN-1) shares, but |
---|
5984 | leaves the old versions of the (newN,..,oldN-1) shares alone (and throws a |
---|
5985 | assertion error in SDMFSlotWriteProxy.finish_publishing in the process). |
---|
5986 | |
---|
5987 | The mixed versions that result (some shares with e.g. N=10, some with N=20, |
---|
5988 | such that both versions are recoverable) cause problems for the Publish code, |
---|
5989 | even before MDMF landed. Might be related to refs #1390 and refs #1042. |
---|
5990 | ] |
---|
5991 | [layout.py: annotate assertion to figure out 'tahoe backup' failure |
---|
5992 | Brian Warner <warner@lothar.com>**20110827195253 |
---|
5993 | Ignore-this: 9b92b954e3ed0d0f80154fff1ff674e5 |
---|
5994 | ] |
---|
5995 | [Add 'tahoe debug dump-cap' support for MDMF, DIR2-CHK, DIR2-MDMF. refs #1507. |
---|
5996 | Brian Warner <warner@lothar.com>**20110827195048 |
---|
5997 | Ignore-this: 61c6af5e33fc88e0251e697a50addb2c |
---|
5998 | |
---|
5999 | This also adds tests for all those cases, and fixes an omission in uri.py |
---|
6000 | that broke parsing of DIR2-MDMF-Verifier and DIR2-CHK-Verifier. |
---|
6001 | ] |
---|
6002 | [MDMF: more writable/writeable consistentifications |
---|
6003 | warner@lothar.com**20110827190602 |
---|
6004 | Ignore-this: 22492a9e20c1819ddb12091062888b55 |
---|
6005 | ] |
---|
6006 | [MDMF: s/Writable/Writeable/g, for consistency with existing SDMF code |
---|
6007 | warner@lothar.com**20110827183357 |
---|
6008 | Ignore-this: 9dd312acedbdb2fc2f7bef0d0fb17c0b |
---|
6009 | ] |
---|
6010 | [setup.cfg: remove no-longer-supported test_mac_diskimage alias. refs #1479 |
---|
6011 | david-sarah@jacaranda.org**20110826230345 |
---|
6012 | Ignore-this: 40e908b8937322a290fb8012bfcad02a |
---|
6013 | ] |
---|
6014 | [test_mutable.Update: increase timeout from 120s to 400s, slaves are failing |
---|
6015 | Brian Warner <warner@lothar.com>**20110825230140 |
---|
6016 | Ignore-this: 101b1924a30cdbda9b2e419e95ca15ec |
---|
6017 | ] |
---|
6018 | [tests: fix check_memory test |
---|
6019 | zooko@zooko.com**20110825201116 |
---|
6020 | Ignore-this: 4d66299fa8cb61d2ca04b3f45344d835 |
---|
6021 | fixes #1503 |
---|
6022 | ] |
---|
6023 | [TAG allmydata-tahoe-1.9.0a1 |
---|
6024 | warner@lothar.com**20110825161122 |
---|
6025 | Ignore-this: 3cbf49f00dbda58189f893c427f65605 |
---|
6026 | ] |
---|
6027 | [touch NEWS to trigger buildslaves |
---|
6028 | warner@lothar.com**20110825161026 |
---|
6029 | Ignore-this: 3d444737d005a9051780d15604166401 |
---|
6030 | ] |
---|
6031 | [test_mutable.Update: remove .timeout overrides, otherwise tests ERROR |
---|
6032 | Brian Warner <warner@lothar.com>**20110825022455 |
---|
6033 | Ignore-this: 140ea1f7207ffd68be40e112f6e3d310 |
---|
6034 | ] |
---|
6035 | [blacklist.py: add read() method too, for completeness |
---|
6036 | warner@lothar.com**20110825021902 |
---|
6037 | Ignore-this: c79a429f311b01732eba2a71119e84 |
---|
6038 | ] |
---|
6039 | [Implementation, tests and docs for blacklists. This version allows listing directories containing a blacklisted child. Inclusion of blacklist.py fixed. fixes #1425 |
---|
6040 | david-sarah@jacaranda.org**20110824155928 |
---|
6041 | Ignore-this: a306f36bb6640eaf046e66dc4beeb11c |
---|
6042 | ] |
---|
6043 | [mutable/layout.py: fix unused import. refs #393 |
---|
6044 | david-sarah@jacaranda.org**20110816225043 |
---|
6045 | Ignore-this: 7c9d6d91521ceb9a7abd14b2c60c0604 |
---|
6046 | ] |
---|
6047 | [mutable/retrieve.py: cosmetics and remove a stale comment. refs #393 |
---|
6048 | david-sarah@jacaranda.org**20110816214612 |
---|
6049 | Ignore-this: 916e60c9dff1ef85595822e609ff34b7 |
---|
6050 | ] |
---|
6051 | [mutable/filenode.py: don't fetch more segments than necesasry to update the file |
---|
6052 | Kevan Carstensen <kevan@isnotajoke.com>**20110813210005 |
---|
6053 | Ignore-this: 2b0ad0533baa6f19f18851317dfc9f15 |
---|
6054 | ] |
---|
6055 | [test/test_mutable: test for incorrect div_ceil equations |
---|
6056 | Kevan Carstensen <kevan@isnotajoke.com>**20110813183936 |
---|
6057 | Ignore-this: 74e6061ab2ec5e706a1235611f87d5d6 |
---|
6058 | ] |
---|
6059 | [mutable/retrieve.py: use floor division to calculate segment boundaries, don't fetch more segments than necessary |
---|
6060 | Kevan Carstensen <kevan@isnotajoke.com>**20110813183833 |
---|
6061 | Ignore-this: 3e272249107afd3fbc1dd30c6a4f1e31 |
---|
6062 | ] |
---|
6063 | [mdmf: clean up boolean expressions, correct typos, remove self._paused, and don't unconditionally initialize block hash trees, asll as suggested by davidsarahs' review comments |
---|
6064 | Kevan Carstensen <kevan@isnotajoke.com>**20110813183710 |
---|
6065 | Ignore-this: cc6ad9f98b64f379151aa58b77b6c4e5 |
---|
6066 | ] |
---|
6067 | [now that tests pass with full-size keys, return test-keys to normal (522bit) |
---|
6068 | warner@lothar.com**20110811175418 |
---|
6069 | Ignore-this: dbce8a6699ba9a90d91cffbc8aa87900 |
---|
6070 | ] |
---|
6071 | [fix SHARE_HASH_CHAIN_SIZE computation |
---|
6072 | warner@lothar.com**20110811175350 |
---|
6073 | Ignore-this: 4508359d2207c8c1b7552b546697264 |
---|
6074 | ] |
---|
6075 | [More idiomatic resolution of the conflict between ticket393-MDMF-2 and trunk. refs #393 |
---|
6076 | david-sarah@jacaranda.org**20110810202942 |
---|
6077 | Ignore-this: 7fc54a30ab0bc6ce75b7d819800c1182 |
---|
6078 | ] |
---|
6079 | [Replace the hard-coded 522-bit RSA key size used for tests with a TEST_RSA_KEY_SIZE constant defined in test/common.py (part 2). refs #393 |
---|
6080 | david-sarah@jacaranda.org**20110810202310 |
---|
6081 | Ignore-this: 7fbd4d004279599bbcb10f7b31fb010f |
---|
6082 | ] |
---|
6083 | [Replace the hard-coded 522-bit RSA key size used for tests with a TEST_RSA_KEY_SIZE constant defined in test/common.py (part 1). refs #393 |
---|
6084 | david-sarah@jacaranda.org**20110810202243 |
---|
6085 | Ignore-this: c58d8130a2f383ff4421c632499b027b |
---|
6086 | ] |
---|
6087 | [merge some minor conflicts in test code from the 393-2 branch and trunk |
---|
6088 | zooko@zooko.com**20110810172139 |
---|
6089 | Ignore-this: 4a16f13eeae585c7c1dbe18c67072c90 |
---|
6090 | ] |
---|
6091 | [doc: eliminate the phrase "rootcap" from doc/frontends/FTP-and-SFTP.rst |
---|
6092 | zooko@zooko.com**20110809132601 |
---|
6093 | Ignore-this: f7e1dd212daa65c81fb57977bce24304 |
---|
6094 | Two different people have asked me for help, saying they couldn't figure out what a "rootcap" is. Hopefully just calling it a "cap" will make it easier for them to find out from the other docs what it is. |
---|
6095 | ] |
---|
6096 | [test_web.py: fix a test failure dependent on whether simplejson.loads returns a unicode or str object. |
---|
6097 | david-sarah@jacaranda.org**20110808213925 |
---|
6098 | Ignore-this: f7b267be8be56fcabc968e3c89999490 |
---|
6099 | ] |
---|
6100 | [immutable/filenode: fix pyflakes warnings |
---|
6101 | Kevan Carstensen <kevan@isnotajoke.com>**20110807004514 |
---|
6102 | Ignore-this: e8d875bf8b1c5571e31b0eff42ecf64c |
---|
6103 | ] |
---|
6104 | [test: fix assorted tests broken by MDMF changes |
---|
6105 | Kevan Carstensen <kevan@isnotajoke.com>**20110807004459 |
---|
6106 | Ignore-this: 9a0dc7e5c74bfe840a9fce278619a103 |
---|
6107 | ] |
---|
6108 | [uri: add MDMF and MDMF directory caps, add extension hint support |
---|
6109 | Kevan Carstensen <kevan@isnotajoke.com>**20110807004436 |
---|
6110 | Ignore-this: 6486b7d4dc0e849c6b1e9cdfb6318eac |
---|
6111 | ] |
---|
6112 | [test/test_mutable: tests for MDMF |
---|
6113 | Kevan Carstensen <kevan@isnotajoke.com>**20110807004414 |
---|
6114 | Ignore-this: 29f9c3a806d67df0ed09c4f0d857d347 |
---|
6115 | |
---|
6116 | These are their own patch because they cut across a lot of the changes |
---|
6117 | I've made in implementing MDMF in such a way as to make it difficult to |
---|
6118 | split them up into the other patches. |
---|
6119 | ] |
---|
6120 | [webapi changes for MDMF |
---|
6121 | Kevan Carstensen <kevan@isnotajoke.com>**20110807004348 |
---|
6122 | Ignore-this: d6d4dac680baa4c99b05882b3828796c |
---|
6123 | |
---|
6124 | - Learn how to create MDMF files and directories through the |
---|
6125 | mutable-type argument. |
---|
6126 | - Operate with the interface changes associated with MDMF and #993. |
---|
6127 | - Learn how to do partial updates of mutable files. |
---|
6128 | ] |
---|
6129 | [mutable/servermap: Rework the servermap to work with MDMF mutable files |
---|
6130 | Kevan Carstensen <kevan@isnotajoke.com>**20110807004259 |
---|
6131 | Ignore-this: 154b987fa0af716c41185b88ff7ee2e1 |
---|
6132 | ] |
---|
6133 | [dirnode: teach dirnode to make MDMF directories |
---|
6134 | Kevan Carstensen <kevan@isnotajoke.com>**20110807004224 |
---|
6135 | Ignore-this: 765ccd6a07ff752bf6057a3dab9e5abd |
---|
6136 | ] |
---|
6137 | [Fix some test failures caused by #393 patch. |
---|
6138 | david-sarah@jacaranda.org**20110802032810 |
---|
6139 | Ignore-this: 7f65e5adb5c859af289cea7011216fef |
---|
6140 | ] |
---|
6141 | [docs: amend configuration, webapi documentation to talk about MDMF |
---|
6142 | Kevan Carstensen <kevan@isnotajoke.com>**20110802022056 |
---|
6143 | Ignore-this: 4cab9b7e4ab79cc1efdabe2d457f27a6 |
---|
6144 | ] |
---|
6145 | [cli: teach CLI how to create MDMF mutable files |
---|
6146 | Kevan Carstensen <kevan@isnotajoke.com>**20110802021613 |
---|
6147 | Ignore-this: 18d0ff98e75be231eed3c53319e76936 |
---|
6148 | |
---|
6149 | Specifically, 'tahoe mkdir' and 'tahoe put' now take a --mutable-type |
---|
6150 | argument. |
---|
6151 | ] |
---|
6152 | [frontends/sftpd: Resolve incompatibilities between SFTP frontend and MDMF changes |
---|
6153 | Kevan Carstensen <kevan@isnotajoke.com>**20110802021207 |
---|
6154 | Ignore-this: 5e0f6e961048f71d4eed6d30210ffd2e |
---|
6155 | ] |
---|
6156 | [mutable/layout: Define MDMF share format, write tools for working with MDMF share format |
---|
6157 | Kevan Carstensen <kevan@isnotajoke.com>**20110802021120 |
---|
6158 | Ignore-this: fa76ef4800939e19ba3cbc22a2eab4e |
---|
6159 | |
---|
6160 | The changes in layout.py are mostly concerned with the MDMF share |
---|
6161 | format. In particular, we define read and write proxy objects used by |
---|
6162 | retrieval, publishing, and other code to write and read the MDMF share |
---|
6163 | format. We create equivalent proxies for SDMF objects so that these |
---|
6164 | objects can be suitably general. |
---|
6165 | ] |
---|
6166 | [immutable/filenode: implement unified filenode interface |
---|
6167 | Kevan Carstensen <kevan@isnotajoke.com>**20110802020905 |
---|
6168 | Ignore-this: d9a442fc285157f134f5d1b4607c6a48 |
---|
6169 | ] |
---|
6170 | [immutable/literal.py: Implement interface changes in literal nodes. |
---|
6171 | Kevan Carstensen <kevan@isnotajoke.com>**20110802020814 |
---|
6172 | Ignore-this: 4371e71a50e65ce2607c4d67d3a32171 |
---|
6173 | ] |
---|
6174 | [test/common: Alter common test code to work with MDMF. |
---|
6175 | Kevan Carstensen <kevan@isnotajoke.com>**20110802015643 |
---|
6176 | Ignore-this: e564403182d0030439b168dd9f8726fa |
---|
6177 | |
---|
6178 | This mostly has to do with making the test code implement the new |
---|
6179 | unified filenode interfaces. |
---|
6180 | ] |
---|
6181 | [mutable: train checker and repairer to work with MDMF mutable files |
---|
6182 | Kevan Carstensen <kevan@isnotajoke.com>**20110802015140 |
---|
6183 | Ignore-this: 8b1928925bed63708b71ab0de8d4306f |
---|
6184 | ] |
---|
6185 | [nodemaker: teach nodemaker about MDMF caps |
---|
6186 | Kevan Carstensen <kevan@isnotajoke.com>**20110802014926 |
---|
6187 | Ignore-this: 430c73121b6883b99626cfd652fc65c4 |
---|
6188 | ] |
---|
6189 | [client: teach client how to create and work with MDMF files |
---|
6190 | Kevan Carstensen <kevan@isnotajoke.com>**20110802014811 |
---|
6191 | Ignore-this: d72fbc4c2ca63f00d9ab9dc2919098ff |
---|
6192 | ] |
---|
6193 | [mutable/filenode: Modify mutable filenodes for use with MDMF |
---|
6194 | Kevan Carstensen <kevan@isnotajoke.com>**20110802014501 |
---|
6195 | Ignore-this: 3c230bb0ebe60a94c667b0ee0c3b28e0 |
---|
6196 | |
---|
6197 | In particular: |
---|
6198 | - Break MutableFileNode and MutableFileVersion into distinct classes. |
---|
6199 | - Implement the interface modifications made for MDMF. |
---|
6200 | - Be aware of MDMF caps. |
---|
6201 | - Learn how to create and work with MDMF files. |
---|
6202 | ] |
---|
6203 | [nodemaker: teach nodemaker how to create MDMF mutable files |
---|
6204 | Kevan Carstensen <kevan@isnotajoke.com>**20110802014258 |
---|
6205 | Ignore-this: 2bf1fd4f8c1d1ad0e855c678347b76c2 |
---|
6206 | ] |
---|
6207 | [interfaces: change interfaces to work with MDMF |
---|
6208 | Kevan Carstensen <kevan@isnotajoke.com>**20110802014119 |
---|
6209 | Ignore-this: 2f441022cf888c044bc9e6dd609db139 |
---|
6210 | |
---|
6211 | A lot of this work concerns #993, in that it unifies (to an extent) the |
---|
6212 | interfaces of mutable and immutable files. |
---|
6213 | ] |
---|
6214 | [mutable/publish: teach the publisher how to publish MDMF mutable files |
---|
6215 | Kevan Carstensen <kevan@isnotajoke.com>**20110802013931 |
---|
6216 | Ignore-this: 115217ec2b289452ec774cb725da8a86 |
---|
6217 | |
---|
6218 | Like the downloader, the publisher needs some substantial changes to handle multiple segment mutable files. |
---|
6219 | ] |
---|
6220 | [mutable/retrieve: rework the mutable downloader to handle multiple-segment files |
---|
6221 | Kevan Carstensen <kevan@isnotajoke.com>**20110802013524 |
---|
6222 | Ignore-this: 398d11b5cb993b50e5e4fa6e7a3856dc |
---|
6223 | |
---|
6224 | The downloader needs substantial reworking to handle multiple segment |
---|
6225 | mutable files, which it needs to handle for MDMF. |
---|
6226 | ] |
---|
6227 | [Fix repeated 'the' in license text. |
---|
6228 | david-sarah@jacaranda.org**20110819204836 |
---|
6229 | Ignore-this: b3bd4e9ec22029fe15533ad2a60003ad |
---|
6230 | ] |
---|
6231 | [Remove Non-Profit Open Software License from the set of 'added permission' licenses. Although it actually does qualify as an Open Source license (because it allows relicensing under plain OSL), its wording is unclear and could easily be misunderstood, and it contributes to incompatible license proliferation. |
---|
6232 | david-sarah@jacaranda.org**20110819204742 |
---|
6233 | Ignore-this: 7373819a6b5367581356728ea62cabb1 |
---|
6234 | ] |
---|
6235 | [docs: change links that pointed to COPYING.TGPPL.html to point to COPYING.TGPPL.rst instead |
---|
6236 | zooko@zooko.com**20110819060142 |
---|
6237 | Ignore-this: 301652554fd7ab4bfa5aa8f8a2863a9e |
---|
6238 | ] |
---|
6239 | [docs: formatting: reflow to fill-column 77 |
---|
6240 | zooko@zooko.com**20110819060110 |
---|
6241 | Ignore-this: ed1317c126f07c63b944bd2fa6aa2d21 |
---|
6242 | ] |
---|
6243 | [docs: formatting: M-x whitespace-cleanup |
---|
6244 | zooko@zooko.com**20110819060041 |
---|
6245 | Ignore-this: 8554b16a25067094d0dc4dc71e1b3950 |
---|
6246 | ] |
---|
6247 | [licensing: add to the list of licenses that we grant the added permission for |
---|
6248 | zooko@zooko.com**20110819054656 |
---|
6249 | Ignore-this: eb1490416ac6b7414a27f150a8a8a047 |
---|
6250 | Added: most of the ones listed on the FSF's "List of Free Software, GPL Incompatible Licenses", plus the Non-Profit Open Software License. |
---|
6251 | ] |
---|
6252 | [docs: reflow the added text at the top of COPYING.GPL to fill-column 77 |
---|
6253 | zooko@zooko.com**20110819053059 |
---|
6254 | Ignore-this: e994ed6ffbcc12656406f11cb862ce99 |
---|
6255 | ] |
---|
6256 | [docs: reformat COPYING.TGPPL.html to COPYING.TGPPL.rst |
---|
6257 | zooko@zooko.com**20110819052753 |
---|
6258 | Ignore-this: 34ddf623e0a6de008ba859ca9c92b2fd |
---|
6259 | ] |
---|
6260 | [docs: reflow docs/logging.rst to fill-column 77 |
---|
6261 | zooko@zooko.com**20110819044103 |
---|
6262 | Ignore-this: a6901f2244995f278ddf8d75d29410bf |
---|
6263 | ] |
---|
6264 | [doc: fix formatting error in docs/logging.rst |
---|
6265 | zooko@zooko.com**20110819043946 |
---|
6266 | Ignore-this: fa182dbbe7f4fda15e0a8bfcf7f00051 |
---|
6267 | ] |
---|
6268 | [Cleanups for suppression of UserWarnings. refs #1435 |
---|
6269 | david-sarah@jacaranda.org**20110818040749 |
---|
6270 | Ignore-this: 3863ef399c1c382a1365d51f000d314c |
---|
6271 | ] |
---|
6272 | [suppress warning emitted by newer zope.interface with Nevow 0.10 |
---|
6273 | zooko@zooko.com**20110817203134 |
---|
6274 | Ignore-this: b86d4ce0ed1c0da76d1f9eaf8d08d9c4 |
---|
6275 | refs #1435 |
---|
6276 | ] |
---|
6277 | [doc: formatting: reflow to fill-column=77 |
---|
6278 | zooko@zooko.com**20110809132510 |
---|
6279 | Ignore-this: 2d6d2e203d52925968b4451f36364792 |
---|
6280 | ] |
---|
6281 | [_auto_deps.py: change the requirement for zope.interface to <= 3.6.2, >= 3.6.6. fixes #1435 |
---|
6282 | david-sarah@jacaranda.org**20110815025347 |
---|
6283 | Ignore-this: 17a88c0f6573f044fbcd6b666667bd37 |
---|
6284 | ] |
---|
6285 | [allmydata/__init__.py, test_version.py: make version parsing understand '<=', with test. refs #1435 |
---|
6286 | david-sarah@jacaranda.org**20110815035153 |
---|
6287 | Ignore-this: 8c3a75f4a2b42b56bac48b5053c5e9c2 |
---|
6288 | ] |
---|
6289 | [Makefile and setup.py: remove setup.py commands that we no longer need, and their uses in the Makefile. Delete a stale and incorrect comment about updating _version.py. Also fix some coding style checks in the Makefile to operate on all source files. |
---|
6290 | david-sarah@jacaranda.org**20110801031952 |
---|
6291 | Ignore-this: 80a435dee3bc6e29058d4b37ff579922 |
---|
6292 | ] |
---|
6293 | [remove misc/debian[_helpers], rely upon official packaging instead. fixes #1454 |
---|
6294 | warner@lothar.com**20110811182705 |
---|
6295 | Ignore-this: 79673cafc7c108db49b5ab908d7b4668 |
---|
6296 | ] |
---|
6297 | [Makefile: remove targets that used misc/debian[_helpers] which no longer exist. Also change docs/debian.rst to reflect the fact that we no longer support building .debs using those targets. refs #1454 |
---|
6298 | david-sarah@jacaranda.org**20110801031857 |
---|
6299 | Ignore-this: 347cbeff45757db630ce34d0cfb84f92 |
---|
6300 | ] |
---|
6301 | [replace tabs with spaces in the #1441 'tahoe debug' synopsis |
---|
6302 | warner@lothar.com**20110811173704 |
---|
6303 | Ignore-this: 513fbfb18a3dd93119ea3700118df7ee |
---|
6304 | ] |
---|
6305 | [Correct the information printed by '/usr/bin/tahoe debug --help' on Debian/Ubuntu. fixes #1441 |
---|
6306 | david-sarah@jacaranda.org**20110724162530 |
---|
6307 | Ignore-this: 30d4b8c20e420e9a9d1b73eba1113ae |
---|
6308 | ] |
---|
6309 | [doc: edit the explanation of K-of-N tradeoffs |
---|
6310 | zooko@zooko.com**20110804193409 |
---|
6311 | Ignore-this: ab6f4e35a995c2099340b5c9c5d30f40 |
---|
6312 | ] |
---|
6313 | [doc: clean up formatting of doc/configuration.rst |
---|
6314 | zooko@zooko.com**20110804192722 |
---|
6315 | Ignore-this: 7a98a3a8afb7e5441ff1f534211199ba |
---|
6316 | reflow to 77 chars line width, M-x white-space cleanup, blank link between name and definition |
---|
6317 | ] |
---|
6318 | [Add test for webopen. fixes #1149 |
---|
6319 | david-sarah@jacaranda.org**20110724211659 |
---|
6320 | Ignore-this: 1e22853f7eb05e24c3141d56a513f661 |
---|
6321 | ] |
---|
6322 | [test_client.py: relax a check in test_create_drop_uploader so that it should pass on Python 2.4.x. refs #1429 |
---|
6323 | david-sarah@jacaranda.org**20110810052504 |
---|
6324 | Ignore-this: 1380749ceaf33c30e26c50d57476616c |
---|
6325 | ] |
---|
6326 | [test/common_util.py: correct fix to mkdir_nonascii. refs #1472 |
---|
6327 | david-sarah@jacaranda.org**20110810051906 |
---|
6328 | Ignore-this: 93c0c33370bc47d95c26c4cce8e05290 |
---|
6329 | ] |
---|
6330 | [test/common_util.py: fix a typo. refs #1472 |
---|
6331 | david-sarah@jacaranda.org**20110810044235 |
---|
6332 | Ignore-this: f88643d7c82cb3577686d77bbff9e2bc |
---|
6333 | ] |
---|
6334 | [test_client.py, test_drop_upload.py: fix pyflakes warnings. |
---|
6335 | david-sarah@jacaranda.org**20110810034505 |
---|
6336 | Ignore-this: 1e2d71bf2f43d63cbb423d32a6f96793 |
---|
6337 | ] |
---|
6338 | [Factor out methods dealing with non-ASCII directories and filenames from test_drop_upload.py into common_util.py. refs #1429, #1472 |
---|
6339 | david-sarah@jacaranda.org**20110810031558 |
---|
6340 | Ignore-this: 3de8f945fa7a58fc318a1184bad0fd1a |
---|
6341 | ] |
---|
6342 | [test_client.py: add a test that the drop-uploader is initialized correctly by client.py. Also give the DropUploader service a name, which is necessary for the test. refs #1429 |
---|
6343 | david-sarah@jacaranda.org**20110810030538 |
---|
6344 | Ignore-this: 13d511ea9bbe9da2dcffe4a91ce94eae |
---|
6345 | ] |
---|
6346 | [drop-upload: rename 'start' method to 'startService', which is what you're supposed to use to start a Service. refs #1429 |
---|
6347 | david-sarah@jacaranda.org**20110810030345 |
---|
6348 | Ignore-this: d1f5e5c63937ea37be37324e2f1ae99d |
---|
6349 | ] |
---|
6350 | [test_drop_upload.py: add comment explaining why we don't use FilePath.setContent. refs #1429 |
---|
6351 | david-sarah@jacaranda.org**20110810025942 |
---|
6352 | Ignore-this: b95358030b63cb467d1d7f1b9a9b6978 |
---|
6353 | ] |
---|
6354 | [test_drop_upload.py: fix some grammatical and spelling nits. refs #1429 |
---|
6355 | david-sarah@jacaranda.org**20110809221231 |
---|
6356 | Ignore-this: fd331acddd9f754173f274a34fe62f03 |
---|
6357 | ] |
---|
6358 | [drop-upload: report the configured local directory being absent differently from it being a file |
---|
6359 | zooko@zooko.com**20110809220930 |
---|
6360 | Ignore-this: a08879100f5f20e609be3f0ffa3b25cc |
---|
6361 | refs #1429 |
---|
6362 | ] |
---|
6363 | [drop-upload: rename the 'upload.uri' parameter to 'upload.dircap', and a couple of cleanups to error messages. refs #1429 |
---|
6364 | zooko@zooko.com**20110809220508 |
---|
6365 | Ignore-this: 4846368cbe331e8653bdce1f314e276b |
---|
6366 | I rerecorded this patch, originally by David-Sarah, to use "darcs replace" instead of editing to do the renames. This uncovered one missed rename in Client.init_drop_uploader. (Which also means that code isn't exercised by the current unit tests.) |
---|
6367 | refs #1429 |
---|
6368 | ] |
---|
6369 | [drop-upload test for non-existent local dir separately from test for non-directory local dir |
---|
6370 | zooko@zooko.com**20110809220115 |
---|
6371 | Ignore-this: cd85f345c02f5cb71b1c1527bd4ebddc |
---|
6372 | A candidate patch for #1429 has a bug when it is using FilePath.is_dir() to detect whether the configured local dir exists and is a directory. FilePath.is_dir() raises exception, instead of returning False, if the thing doesn't exist. This test is to make sure that DropUploader.__init__ raise different exceptions for those two cases. |
---|
6373 | refs #1429 |
---|
6374 | ] |
---|
6375 | [drop-upload: unit tests for the configuration options being named "cap" instead of "uri" |
---|
6376 | zooko@zooko.com**20110809215913 |
---|
6377 | Ignore-this: 958c78fffb3d76b3e4817647f824e7f9 |
---|
6378 | This is a subset of a patch that David-Sarah attached to #1429. This is just the unit-tests part of that patch, and uses darcs record instead of hunks to change the names. |
---|
6379 | refs #1429 |
---|
6380 | ] |
---|
6381 | [src/allmydata/storage/server.py: use the filesystem of storage/shares/, rather than storage/, to calculate remaining space. fixes #1384 |
---|
6382 | david-sarah@jacaranda.org**20110719022752 |
---|
6383 | Ignore-this: a4781043cfd453dbb66ae4f108d80bea |
---|
6384 | ] |
---|
6385 | [test_storage.py: test that we are using the filesystem of storage/shares/, rather than storage/, to calculate remaining space, and that the HTML status output reflects the values returned by fileutil.get_disk_stats. This version works with older versions of the mock library. refs #1384 |
---|
6386 | david-sarah@jacaranda.org**20110809190722 |
---|
6387 | Ignore-this: db447caca37a459ca49563efa58db58c |
---|
6388 | ] |
---|
6389 | [Work around ref #1472 by having test_drop_upload delete the non-ASCII directories it creates. |
---|
6390 | david-sarah@jacaranda.org**20110809012334 |
---|
6391 | Ignore-this: 5881fd5db419ba8ad12e0b2a82f6c4f0 |
---|
6392 | ] |
---|
6393 | [Remove all trailing whitespace from .py files. |
---|
6394 | david-sarah@jacaranda.org**20110809001117 |
---|
6395 | Ignore-this: d2658b5ce44af70cc606ae4d3085b7cc |
---|
6396 | ] |
---|
6397 | [test_drop_upload.py: fix unused imports. refs #1429 |
---|
6398 | david-sarah@jacaranda.org**20110808235422 |
---|
6399 | Ignore-this: 834f6b946bfea699d7d8c743edd66671 |
---|
6400 | ] |
---|
6401 | [Documentation for drop-upload frontend. refs #1429 |
---|
6402 | david-sarah@jacaranda.org**20110808182146 |
---|
6403 | Ignore-this: b33110834e586c0b784d1736c2af5779 |
---|
6404 | ] |
---|
6405 | [Drop-upload frontend, rerecorded for 1.9 beta (and correcting a minor mistake). Includes some fixes for Windows but not the Windows inotify implementation. fixes #1429 |
---|
6406 | david-sarah@jacaranda.org**20110808234049 |
---|
6407 | Ignore-this: 67f824c7f554e9a3a85f9fd2e1123d97 |
---|
6408 | ] |
---|
6409 | [node.py: ensure that client and introducer nodes record their port number and use that port on the next restart, fixing a regression caused by #1385. fixes #1469. |
---|
6410 | david-sarah@jacaranda.org**20110806221934 |
---|
6411 | Ignore-this: 1aa9d340b6570320ab2f9edc89c9e0a8 |
---|
6412 | ] |
---|
6413 | [test_runner.py: fix a race condition in the test when NODE_URL_FILE is written before PORTNUM_FILE. refs #1469 |
---|
6414 | david-sarah@jacaranda.org**20110806231842 |
---|
6415 | Ignore-this: ab01ae7cec3a073e29eec473e64052a0 |
---|
6416 | ] |
---|
6417 | [test_runner.py: cleanups of HOTLINE_FILE writing and removal. |
---|
6418 | david-sarah@jacaranda.org**20110806231652 |
---|
6419 | Ignore-this: 25f5c5d6f5d8faebb26a4ce80110a335 |
---|
6420 | ] |
---|
6421 | [test_runner.py: remove an unused constant. |
---|
6422 | david-sarah@jacaranda.org**20110806221416 |
---|
6423 | Ignore-this: eade2695cbabbea9cafeaa8debe410bb |
---|
6424 | ] |
---|
6425 | [node.py: fix the error path for a missing config option so that it works for a Unicode base directory. |
---|
6426 | david-sarah@jacaranda.org**20110806221007 |
---|
6427 | Ignore-this: 4eb9cc04b2ce05182a274a0d69dafaf3 |
---|
6428 | ] |
---|
6429 | [test_runner.py: test that client and introducer nodes record their port number and use that port on the next restart. This tests for a regression caused by ref #1385. |
---|
6430 | david-sarah@jacaranda.org**20110806220635 |
---|
6431 | Ignore-this: 40a0c040b142dbddd47e69b3c3712f5 |
---|
6432 | ] |
---|
6433 | [test_runner.py: fix a bug in CreateNode.do_create introduced in changeset [5114] when the tahoe.cfg file has been written with CRLF line endings. refs #1385 |
---|
6434 | david-sarah@jacaranda.org**20110804003032 |
---|
6435 | Ignore-this: 7b7afdcf99da6671afac2d42828883eb |
---|
6436 | ] |
---|
6437 | [test_client.py: repair Basic.test_error_on_old_config_files. refs #1385 |
---|
6438 | david-sarah@jacaranda.org**20110803235036 |
---|
6439 | Ignore-this: 31e2a9c3febe55948de7e144353663e |
---|
6440 | ] |
---|
6441 | [test_checker.py: increase timeout for TooParallel.test_immutable again. The ARM buildslave took 38 seconds, so 40 seconds is too close to the edge; make it 80. |
---|
6442 | david-sarah@jacaranda.org**20110803214042 |
---|
6443 | Ignore-this: 2d8026a6b25534e01738f78d6c7495cb |
---|
6444 | ] |
---|
6445 | [test_runner.py: fix RunNode.test_introducer to not rely on the mtime of introducer.furl to detect when the node has restarted. Instead we detect when node.url has been written. refs #1385 |
---|
6446 | david-sarah@jacaranda.org**20110803180917 |
---|
6447 | Ignore-this: 11ddc43b107beca42cb78af88c5c394c |
---|
6448 | ] |
---|
6449 | [Further improve error message about old config files. refs #1385 |
---|
6450 | david-sarah@jacaranda.org**20110803174546 |
---|
6451 | Ignore-this: 9d6cc3c288d9863dce58faafb3855917 |
---|
6452 | ] |
---|
6453 | [Slightly improve error message about old config files (avoid unnecessary Unicode escaping). refs #1385 |
---|
6454 | david-sarah@jacaranda.org**20110803163848 |
---|
6455 | Ignore-this: a3e3930fba7ccf90b8db3d2ed5829df4 |
---|
6456 | ] |
---|
6457 | [test_checker.py: increase timeout for TooParallel.test_immutable (was consistently failing on ARM buildslave). |
---|
6458 | david-sarah@jacaranda.org**20110803163213 |
---|
6459 | Ignore-this: d0efceaf12628e8791862b80c85b5d56 |
---|
6460 | ] |
---|
6461 | [Fix the bug that prevents an introducer from starting when introducer.furl already exists. Also remove some dead code that used to read old config files, and rename 'warn_about_old_config_files' to reflect that it's not a warning. refs #1385 |
---|
6462 | david-sarah@jacaranda.org**20110803013212 |
---|
6463 | Ignore-this: 2d6cd14bd06a7493b26f2027aff78f4d |
---|
6464 | ] |
---|
6465 | [test_runner.py: modify RunNode.test_introducer to test that starting an introducer works when the introducer.furl file already exists. refs #1385 |
---|
6466 | david-sarah@jacaranda.org**20110803012704 |
---|
6467 | Ignore-this: 8cf7f27ac4bfbb5ad8ca4a974106d437 |
---|
6468 | ] |
---|
6469 | [verifier: correct a bug introduced in changeset [5106] that caused us to only verify the first block of a file. refs #1395 |
---|
6470 | david-sarah@jacaranda.org**20110802172437 |
---|
6471 | Ignore-this: 87fb77854a839ff217dce73544775b11 |
---|
6472 | ] |
---|
6473 | [test_repairer: add a deterministic test of share data corruption that always flips the bits of the last byte of the share data. refs #1395 |
---|
6474 | david-sarah@jacaranda.org**20110802175841 |
---|
6475 | Ignore-this: 72f54603785007e88220c8d979e08be7 |
---|
6476 | ] |
---|
6477 | [verifier: serialize the fetching of blocks within a share so that we don't use too much RAM |
---|
6478 | zooko@zooko.com**20110802063703 |
---|
6479 | Ignore-this: debd9bac07dcbb6803f835a9e2eabaa1 |
---|
6480 | |
---|
6481 | Shares are still verified in parallel, but within a share, don't request a |
---|
6482 | block until the previous block has been verified and the memory we used to hold |
---|
6483 | it has been freed up. |
---|
6484 | |
---|
6485 | Patch originally due to Brian. This version has a mockery-patchery-style test |
---|
6486 | which is "low tech" (it implements the patching inline in the test code instead |
---|
6487 | of using an extension of the mock.patch() function from the mock library) and |
---|
6488 | which unpatches in case of exception. |
---|
6489 | |
---|
6490 | fixes #1395 |
---|
6491 | ] |
---|
6492 | [add docs about timing-channel attacks |
---|
6493 | Brian Warner <warner@lothar.com>**20110802044541 |
---|
6494 | Ignore-this: 73114d5f5ed9ce252597b707dba3a194 |
---|
6495 | ] |
---|
6496 | ['test-coverage' now needs PYTHONPATH=. to find TOP/twisted/plugins/ |
---|
6497 | Brian Warner <warner@lothar.com>**20110802041952 |
---|
6498 | Ignore-this: d40f1f4cb426ea1c362fc961baedde2 |
---|
6499 | ] |
---|
6500 | [remove nodeid from WriteBucketProxy classes and customers |
---|
6501 | warner@lothar.com**20110801224317 |
---|
6502 | Ignore-this: e55334bb0095de11711eeb3af827e8e8 |
---|
6503 | refs #1363 |
---|
6504 | ] |
---|
6505 | [remove get_serverid() from ReadBucketProxy and customers, including Checker |
---|
6506 | warner@lothar.com**20110801224307 |
---|
6507 | Ignore-this: 837aba457bc853e4fd413ab1a94519cb |
---|
6508 | and debug.py dump-share commands |
---|
6509 | refs #1363 |
---|
6510 | ] |
---|
6511 | [reject old-style (pre-Tahoe-LAFS-v1.3) configuration files |
---|
6512 | zooko@zooko.com**20110801232423 |
---|
6513 | Ignore-this: b58218fcc064cc75ad8f05ed0c38902b |
---|
6514 | Check for the existence of any of them and if any are found raise exception which will abort the startup of the node. |
---|
6515 | This is a backwards-incompatible change for anyone who is still using old-style configuration files. |
---|
6516 | fixes #1385 |
---|
6517 | ] |
---|
6518 | [whitespace-cleanup |
---|
6519 | zooko@zooko.com**20110725015546 |
---|
6520 | Ignore-this: 442970d0545183b97adc7bd66657876c |
---|
6521 | ] |
---|
6522 | [tests: use fileutil.write() instead of open() to ensure timely close even without CPython-style reference counting |
---|
6523 | zooko@zooko.com**20110331145427 |
---|
6524 | Ignore-this: 75aae4ab8e5fa0ad698f998aaa1888ce |
---|
6525 | Some of these already had an explicit close() but I went ahead and replaced them with fileutil.write() as well for the sake of uniformity. |
---|
6526 | ] |
---|
6527 | [Address Kevan's comment in #776 about Options classes missed when adding 'self.command_name'. refs #776, #1359 |
---|
6528 | david-sarah@jacaranda.org**20110801221317 |
---|
6529 | Ignore-this: 8881d42cf7e6a1d15468291b0cb8fab9 |
---|
6530 | ] |
---|
6531 | [docs/frontends/webapi.rst: change some more instances of 'delete' or 'remove' to 'unlink', change some section titles, and use two blank lines between all sections. refs #776, #1104 |
---|
6532 | david-sarah@jacaranda.org**20110801220919 |
---|
6533 | Ignore-this: 572327591137bb05c24c44812d4b163f |
---|
6534 | ] |
---|
6535 | [cleanup: implement rm as a synonym for unlink rather than vice-versa. refs #776 |
---|
6536 | david-sarah@jacaranda.org**20110801220108 |
---|
6537 | Ignore-this: 598dcbed870f4f6bb9df62de9111b343 |
---|
6538 | ] |
---|
6539 | [docs/webapi.rst: address Kevan's comments about use of 'delete' on ref #1104 |
---|
6540 | david-sarah@jacaranda.org**20110801205356 |
---|
6541 | Ignore-this: 4fbf03864934753c951ddeff64392491 |
---|
6542 | ] |
---|
6543 | [docs: some changes of 'delete' or 'rm' to 'unlink'. refs #1104 |
---|
6544 | david-sarah@jacaranda.org**20110713002722 |
---|
6545 | Ignore-this: 304d2a330d5e6e77d5f1feed7814b21c |
---|
6546 | ] |
---|
6547 | [WUI: change the label of the button to unlink a file from 'del' to 'unlink'. Also change some internal names to 'unlink', and allow 't=unlink' as a synonym for 't=delete' in the web-API interface. Incidentally, improve a test to check for the rename button as well as the unlink button. fixes #1104 |
---|
6548 | david-sarah@jacaranda.org**20110713001218 |
---|
6549 | Ignore-this: 3eef6b3f81b94a9c0020a38eb20aa069 |
---|
6550 | ] |
---|
6551 | [src/allmydata/web/filenode.py: delete a stale comment that was made incorrect by changeset [3133]. |
---|
6552 | david-sarah@jacaranda.org**20110801203009 |
---|
6553 | Ignore-this: b3912e95a874647027efdc97822dd10e |
---|
6554 | ] |
---|
6555 | [fix typo introduced during rebasing of 'remove get_serverid from |
---|
6556 | Brian Warner <warner@lothar.com>**20110801200341 |
---|
6557 | Ignore-this: 4235b0f585c0533892193941dbbd89a8 |
---|
6558 | DownloadStatus.add_dyhb_request and customers' patch, to fix test failure. |
---|
6559 | ] |
---|
6560 | [remove get_serverid from DownloadStatus.add_dyhb_request and customers |
---|
6561 | zooko@zooko.com**20110801185401 |
---|
6562 | Ignore-this: db188c18566d2d0ab39a80c9dc8f6be6 |
---|
6563 | This patch is a rebase of a patch originally written by Brian. I didn't change any of the intent of Brian's patch, just ported it to current trunk. |
---|
6564 | refs #1363 |
---|
6565 | ] |
---|
6566 | [remove get_serverid from DownloadStatus.add_block_request and customers |
---|
6567 | zooko@zooko.com**20110801185344 |
---|
6568 | Ignore-this: 8bfa8201d6147f69b0fbe31beea9c1e |
---|
6569 | This is a rebase of a patch Brian originally wrote. I haven't changed the intent of that patch, just ported it to trunk. |
---|
6570 | refs #1363 |
---|
6571 | ] |
---|
6572 | [apply zooko's advice: storage_client get_known_servers() returns a frozenset, caller sorts |
---|
6573 | warner@lothar.com**20110801174452 |
---|
6574 | Ignore-this: 2aa13ea6cbed4e9084bd604bf8633692 |
---|
6575 | refs #1363 |
---|
6576 | ] |
---|
6577 | [test_immutable.Test: rewrite to use NoNetworkGrid, now takes 2.7s not 97s |
---|
6578 | warner@lothar.com**20110801174444 |
---|
6579 | Ignore-this: 54f30b5d7461d2b3514e2a0172f3a98c |
---|
6580 | remove now-unused ShareManglingMixin |
---|
6581 | refs #1363 |
---|
6582 | ] |
---|
6583 | [DownloadStatus.add_known_share wants to be used by Finder, web.status |
---|
6584 | warner@lothar.com**20110801174436 |
---|
6585 | Ignore-this: 1433bcd73099a579abe449f697f35f9 |
---|
6586 | refs #1363 |
---|
6587 | ] |
---|
6588 | [replace IServer.name() with get_name(), and get_longname() |
---|
6589 | warner@lothar.com**20110801174428 |
---|
6590 | Ignore-this: e5a6f7f6687fd7732ddf41cfdd7c491b |
---|
6591 | |
---|
6592 | This patch was originally written by Brian, but was re-recorded by Zooko to use |
---|
6593 | darcs replace instead of hunks for any file in which it would result in fewer |
---|
6594 | total hunks. |
---|
6595 | refs #1363 |
---|
6596 | ] |
---|
6597 | [upload.py: apply David-Sarah's advice rename (un)contacted(2) trackers to first_pass/second_pass/next_pass |
---|
6598 | zooko@zooko.com**20110801174143 |
---|
6599 | Ignore-this: e36e1420bba0620a0107bd90032a5198 |
---|
6600 | This patch was written by Brian but was re-recorded by Zooko (with David-Sarah looking on) to use darcs replace instead of editing to rename the three variables to their new names. |
---|
6601 | refs #1363 |
---|
6602 | ] |
---|
6603 | [Coalesce multiple Share.loop() calls, make downloads faster. Closes #1268. |
---|
6604 | Brian Warner <warner@lothar.com>**20110801151834 |
---|
6605 | Ignore-this: 48530fce36c01c0ff708f61c2de7e67a |
---|
6606 | ] |
---|
6607 | [src/allmydata/_auto_deps.py: 'i686' is another way of spelling x86. |
---|
6608 | david-sarah@jacaranda.org**20110801034035 |
---|
6609 | Ignore-this: 6971e0621db2fba794d86395b4d51038 |
---|
6610 | ] |
---|
6611 | [tahoe_rm.py: better error message when there is no path. refs #1292 |
---|
6612 | david-sarah@jacaranda.org**20110122064212 |
---|
6613 | Ignore-this: ff3bb2c9f376250e5fd77eb009e09018 |
---|
6614 | ] |
---|
6615 | [test_cli.py: Test for error message when 'tahoe rm' is invoked without a path. refs #1292 |
---|
6616 | david-sarah@jacaranda.org**20110104105108 |
---|
6617 | Ignore-this: 29ec2f2e0251e446db96db002ad5dd7d |
---|
6618 | ] |
---|
6619 | [src/allmydata/__init__.py: suppress a spurious warning from 'bin/tahoe --version[-and-path]' about twisted-web and twisted-core packages. |
---|
6620 | david-sarah@jacaranda.org**20110801005209 |
---|
6621 | Ignore-this: 50e7cd53cca57b1870d9df0361c7c709 |
---|
6622 | ] |
---|
6623 | [test_cli.py: use to_str on fields loaded using simplejson.loads in new tests. refs #1304 |
---|
6624 | david-sarah@jacaranda.org**20110730032521 |
---|
6625 | Ignore-this: d1d6dfaefd1b4e733181bf127c79c00b |
---|
6626 | ] |
---|
6627 | [cli: make 'tahoe cp' overwrite mutable files in-place |
---|
6628 | Kevan Carstensen <kevan@isnotajoke.com>**20110729202039 |
---|
6629 | Ignore-this: b2ad21a19439722f05c49bfd35b01855 |
---|
6630 | ] |
---|
6631 | [SFTP: write an error message to standard error for unrecognized shell commands. Change the existing message for shell sessions to be written to standard error, and refactor some duplicated code. Also change the lines of the error messages to end in CRLF, and take into account Kevan's review comments. fixes #1442, #1446 |
---|
6632 | david-sarah@jacaranda.org**20110729233102 |
---|
6633 | Ignore-this: d2f2bb4664f25007d1602bf7333e2cdd |
---|
6634 | ] |
---|
6635 | [src/allmydata/scripts/cli.py: fix pyflakes warning. |
---|
6636 | david-sarah@jacaranda.org**20110728021402 |
---|
6637 | Ignore-this: 94050140ddb99865295973f49927c509 |
---|
6638 | ] |
---|
6639 | [Fix the help synopses of CLI commands to include [options] in the right place. fixes #1359, fixes #636 |
---|
6640 | david-sarah@jacaranda.org**20110724225440 |
---|
6641 | Ignore-this: 2a8e488a5f63dabfa9db9efd83768a5 |
---|
6642 | ] |
---|
6643 | [encodingutil: argv and output encodings are always the same on all platforms. Lose the unnecessary generality of them being different. fixes #1120 |
---|
6644 | david-sarah@jacaranda.org**20110629185356 |
---|
6645 | Ignore-this: 5ebacbe6903dfa83ffd3ff8436a97787 |
---|
6646 | ] |
---|
6647 | [docs/man/tahoe.1: add man page. fixes #1420 |
---|
6648 | david-sarah@jacaranda.org**20110724171728 |
---|
6649 | Ignore-this: fc7601ec7f25494288d6141d0ae0004c |
---|
6650 | ] |
---|
6651 | [Update the dependency on zope.interface to fix an incompatiblity between Nevow and zope.interface 3.6.4. fixes #1435 |
---|
6652 | david-sarah@jacaranda.org**20110721234941 |
---|
6653 | Ignore-this: 2ff3fcfc030fca1a4d4c7f1fed0f2aa9 |
---|
6654 | ] |
---|
6655 | [frontends/ftpd.py: remove the check for IWriteFile.close since we're now guaranteed to be using Twisted >= 10.1 which has it. |
---|
6656 | david-sarah@jacaranda.org**20110722000320 |
---|
6657 | Ignore-this: 55cd558b791526113db3f83c00ec328a |
---|
6658 | ] |
---|
6659 | [Update the dependency on Twisted to >= 10.1. This allows us to simplify some documentation: it's no longer necessary to install pywin32 on Windows, or apply a patch to Twisted in order to use the FTP frontend. fixes #1274, #1438. refs #1429 |
---|
6660 | david-sarah@jacaranda.org**20110721233658 |
---|
6661 | Ignore-this: 81b41745477163c9b39c0b59db91cc62 |
---|
6662 | ] |
---|
6663 | [misc/build_helpers/run_trial.py: undo change to block pywin32 (it didn't work because run_trial.py is no longer used). refs #1334 |
---|
6664 | david-sarah@jacaranda.org**20110722035402 |
---|
6665 | Ignore-this: 5d03f544c4154f088e26c7107494bf39 |
---|
6666 | ] |
---|
6667 | [misc/build_helpers/run_trial.py: ensure that pywin32 is not on the sys.path when running the test suite. Includes some temporary debugging printouts that will be removed. refs #1334 |
---|
6668 | david-sarah@jacaranda.org**20110722024907 |
---|
6669 | Ignore-this: 5141a9f83a4085ed4ca21f0bbb20bb9c |
---|
6670 | ] |
---|
6671 | [docs/running.rst: use 'tahoe run ~/.tahoe' instead of 'tahoe run' (the default is the current directory, unlike 'tahoe start'). |
---|
6672 | david-sarah@jacaranda.org**20110718005949 |
---|
6673 | Ignore-this: 81837fbce073e93d88a3e7ae3122458c |
---|
6674 | ] |
---|
6675 | [docs/running.rst: say to put the introducer.furl in tahoe.cfg. |
---|
6676 | david-sarah@jacaranda.org**20110717194315 |
---|
6677 | Ignore-this: 954cc4c08e413e8c62685d58ff3e11f3 |
---|
6678 | ] |
---|
6679 | [README.txt: say that quickstart.rst is in the docs directory. |
---|
6680 | david-sarah@jacaranda.org**20110717192400 |
---|
6681 | Ignore-this: bc6d35a85c496b77dbef7570677ea42a |
---|
6682 | ] |
---|
6683 | [setup: remove the dependency on foolscap's "secure_connections" extra, add a dependency on pyOpenSSL |
---|
6684 | zooko@zooko.com**20110717114226 |
---|
6685 | Ignore-this: df222120d41447ce4102616921626c82 |
---|
6686 | fixes #1383 |
---|
6687 | ] |
---|
6688 | [test_sftp.py cleanup: remove a redundant definition of failUnlessReallyEqual. |
---|
6689 | david-sarah@jacaranda.org**20110716181813 |
---|
6690 | Ignore-this: 50113380b368c573f07ac6fe2eb1e97f |
---|
6691 | ] |
---|
6692 | [docs: add missing link in NEWS.rst |
---|
6693 | zooko@zooko.com**20110712153307 |
---|
6694 | Ignore-this: be7b7eb81c03700b739daa1027d72b35 |
---|
6695 | ] |
---|
6696 | [contrib: remove the contributed fuse modules and the entire contrib/ directory, which is now empty |
---|
6697 | zooko@zooko.com**20110712153229 |
---|
6698 | Ignore-this: 723c4f9e2211027c79d711715d972c5 |
---|
6699 | Also remove a couple of vestigial references to figleaf, which is long gone. |
---|
6700 | fixes #1409 (remove contrib/fuse) |
---|
6701 | ] |
---|
6702 | [add Protovis.js-based download-status timeline visualization |
---|
6703 | Brian Warner <warner@lothar.com>**20110629222606 |
---|
6704 | Ignore-this: 477ccef5c51b30e246f5b6e04ab4a127 |
---|
6705 | |
---|
6706 | provide status overlap info on the webapi t=json output, add decode/decrypt |
---|
6707 | rate tooltips, add zoomin/zoomout buttons |
---|
6708 | ] |
---|
6709 | [add more download-status data, fix tests |
---|
6710 | Brian Warner <warner@lothar.com>**20110629222555 |
---|
6711 | Ignore-this: e9e0b7e0163f1e95858aa646b9b17b8c |
---|
6712 | ] |
---|
6713 | [prepare for viz: improve DownloadStatus events |
---|
6714 | Brian Warner <warner@lothar.com>**20110629222542 |
---|
6715 | Ignore-this: 16d0bde6b734bb501aa6f1174b2b57be |
---|
6716 | |
---|
6717 | consolidate IDownloadStatusHandlingConsumer stuff into DownloadNode |
---|
6718 | ] |
---|
6719 | [docs: fix error in crypto specification that was noticed by Taylor R Campbell <campbell+tahoe@mumble.net> |
---|
6720 | zooko@zooko.com**20110629185711 |
---|
6721 | Ignore-this: b921ed60c1c8ba3c390737fbcbe47a67 |
---|
6722 | ] |
---|
6723 | [setup.py: don't make bin/tahoe.pyscript executable. fixes #1347 |
---|
6724 | david-sarah@jacaranda.org**20110130235809 |
---|
6725 | Ignore-this: 3454c8b5d9c2c77ace03de3ef2d9398a |
---|
6726 | ] |
---|
6727 | [Makefile: remove targets relating to 'setup.py check_auto_deps' which no longer exists. fixes #1345 |
---|
6728 | david-sarah@jacaranda.org**20110626054124 |
---|
6729 | Ignore-this: abb864427a1b91bd10d5132b4589fd90 |
---|
6730 | ] |
---|
6731 | [Makefile: add 'make check' as an alias for 'make test'. Also remove an unnecessary dependency of 'test' on 'build' and 'src/allmydata/_version.py'. fixes #1344 |
---|
6732 | david-sarah@jacaranda.org**20110623205528 |
---|
6733 | Ignore-this: c63e23146c39195de52fb17c7c49b2da |
---|
6734 | ] |
---|
6735 | [Rename test_package_initialization.py to (much shorter) test_import.py . |
---|
6736 | Brian Warner <warner@lothar.com>**20110611190234 |
---|
6737 | Ignore-this: 3eb3dbac73600eeff5cfa6b65d65822 |
---|
6738 | |
---|
6739 | The former name was making my 'ls' listings hard to read, by forcing them |
---|
6740 | down to just two columns. |
---|
6741 | ] |
---|
6742 | [tests: fix tests to accomodate [20110611153758-92b7f-0ba5e4726fb6318dac28fb762a6512a003f4c430] |
---|
6743 | zooko@zooko.com**20110611163741 |
---|
6744 | Ignore-this: 64073a5f39e7937e8e5e1314c1a302d1 |
---|
6745 | Apparently none of the two authors (stercor, terrell), three reviewers (warner, davidsarah, terrell), or one committer (me) actually ran the tests. This is presumably due to #20. |
---|
6746 | fixes #1412 |
---|
6747 | ] |
---|
6748 | [wui: right-align the size column in the WUI |
---|
6749 | zooko@zooko.com**20110611153758 |
---|
6750 | Ignore-this: 492bdaf4373c96f59f90581c7daf7cd7 |
---|
6751 | Thanks to Ted "stercor" Rolle Jr. and Terrell Russell. |
---|
6752 | fixes #1412 |
---|
6753 | ] |
---|
6754 | [docs: three minor fixes |
---|
6755 | zooko@zooko.com**20110610121656 |
---|
6756 | Ignore-this: fec96579eb95aceb2ad5fc01a814c8a2 |
---|
6757 | CREDITS for arc for stats tweak |
---|
6758 | fix link to .zip file in quickstart.rst (thanks to ChosenOne for noticing) |
---|
6759 | English usage tweak |
---|
6760 | ] |
---|
6761 | [docs/running.rst: fix stray HTML (not .rst) link noticed by ChosenOne. |
---|
6762 | david-sarah@jacaranda.org**20110609223719 |
---|
6763 | Ignore-this: fc50ac9c94792dcac6f1067df8ac0d4a |
---|
6764 | ] |
---|
6765 | [server.py: get_latencies now reports percentiles _only_ if there are sufficient observations for the interpretation of the percentile to be unambiguous. |
---|
6766 | wilcoxjg@gmail.com**20110527120135 |
---|
6767 | Ignore-this: 2e7029764bffc60e26f471d7c2b6611e |
---|
6768 | interfaces.py: modified the return type of RIStatsProvider.get_stats to allow for None as a return value |
---|
6769 | NEWS.rst, stats.py: documentation of change to get_latencies |
---|
6770 | stats.rst: now documents percentile modification in get_latencies |
---|
6771 | test_storage.py: test_latencies now expects None in output categories that contain too few samples for the associated percentile to be unambiguously reported. |
---|
6772 | fixes #1392 |
---|
6773 | ] |
---|
6774 | [docs: revert link in relnotes.txt from NEWS.rst to NEWS, since the former did not exist at revision 5000. |
---|
6775 | david-sarah@jacaranda.org**20110517011214 |
---|
6776 | Ignore-this: 6a5be6e70241e3ec0575641f64343df7 |
---|
6777 | ] |
---|
6778 | [docs: convert NEWS to NEWS.rst and change all references to it. |
---|
6779 | david-sarah@jacaranda.org**20110517010255 |
---|
6780 | Ignore-this: a820b93ea10577c77e9c8206dbfe770d |
---|
6781 | ] |
---|
6782 | [docs: remove out-of-date docs/testgrid/introducer.furl and containing directory. fixes #1404 |
---|
6783 | david-sarah@jacaranda.org**20110512140559 |
---|
6784 | Ignore-this: 784548fc5367fac5450df1c46890876d |
---|
6785 | ] |
---|
6786 | [scripts/common.py: don't assume that the default alias is always 'tahoe' (it is, but the API of get_alias doesn't say so). refs #1342 |
---|
6787 | david-sarah@jacaranda.org**20110130164923 |
---|
6788 | Ignore-this: a271e77ce81d84bb4c43645b891d92eb |
---|
6789 | ] |
---|
6790 | [setup: don't catch all Exception from check_requirement(), but only PackagingError and ImportError |
---|
6791 | zooko@zooko.com**20110128142006 |
---|
6792 | Ignore-this: 57d4bc9298b711e4bc9dc832c75295de |
---|
6793 | I noticed this because I had accidentally inserted a bug which caused AssertionError to be raised from check_requirement(). |
---|
6794 | ] |
---|
6795 | [M-x whitespace-cleanup |
---|
6796 | zooko@zooko.com**20110510193653 |
---|
6797 | Ignore-this: dea02f831298c0f65ad096960e7df5c7 |
---|
6798 | ] |
---|
6799 | [docs: fix typo in running.rst, thanks to arch_o_median |
---|
6800 | zooko@zooko.com**20110510193633 |
---|
6801 | Ignore-this: ca06de166a46abbc61140513918e79e8 |
---|
6802 | ] |
---|
6803 | [relnotes.txt: don't claim to work on Cygwin (which has been untested for some time). refs #1342 |
---|
6804 | david-sarah@jacaranda.org**20110204204902 |
---|
6805 | Ignore-this: 85ef118a48453d93fa4cddc32d65b25b |
---|
6806 | ] |
---|
6807 | [relnotes.txt: forseeable -> foreseeable. refs #1342 |
---|
6808 | david-sarah@jacaranda.org**20110204204116 |
---|
6809 | Ignore-this: 746debc4d82f4031ebf75ab4031b3a9 |
---|
6810 | ] |
---|
6811 | [replace remaining .html docs with .rst docs |
---|
6812 | zooko@zooko.com**20110510191650 |
---|
6813 | Ignore-this: d557d960a986d4ac8216d1677d236399 |
---|
6814 | Remove install.html (long since deprecated). |
---|
6815 | Also replace some obsolete references to install.html with references to quickstart.rst. |
---|
6816 | Fix some broken internal references within docs/historical/historical_known_issues.txt. |
---|
6817 | Thanks to Ravi Pinjala and Patrick McDonald. |
---|
6818 | refs #1227 |
---|
6819 | ] |
---|
6820 | [docs: FTP-and-SFTP.rst: fix a minor error and update the information about which version of Twisted fixes #1297 |
---|
6821 | zooko@zooko.com**20110428055232 |
---|
6822 | Ignore-this: b63cfb4ebdbe32fb3b5f885255db4d39 |
---|
6823 | ] |
---|
6824 | [munin tahoe_files plugin: fix incorrect file count |
---|
6825 | francois@ctrlaltdel.ch**20110428055312 |
---|
6826 | Ignore-this: 334ba49a0bbd93b4a7b06a25697aba34 |
---|
6827 | fixes #1391 |
---|
6828 | ] |
---|
6829 | [corrected "k must never be smaller than N" to "k must never be greater than N" |
---|
6830 | secorp@allmydata.org**20110425010308 |
---|
6831 | Ignore-this: 233129505d6c70860087f22541805eac |
---|
6832 | ] |
---|
6833 | [Fix a test failure in test_package_initialization on Python 2.4.x due to exceptions being stringified differently than in later versions of Python. refs #1389 |
---|
6834 | david-sarah@jacaranda.org**20110411190738 |
---|
6835 | Ignore-this: 7847d26bc117c328c679f08a7baee519 |
---|
6836 | ] |
---|
6837 | [tests: add test for including the ImportError message and traceback entry in the summary of errors from importing dependencies. refs #1389 |
---|
6838 | david-sarah@jacaranda.org**20110410155844 |
---|
6839 | Ignore-this: fbecdbeb0d06a0f875fe8d4030aabafa |
---|
6840 | ] |
---|
6841 | [allmydata/__init__.py: preserve the message and last traceback entry (file, line number, function, and source line) of ImportErrors in the package versions string. fixes #1389 |
---|
6842 | david-sarah@jacaranda.org**20110410155705 |
---|
6843 | Ignore-this: 2f87b8b327906cf8bfca9440a0904900 |
---|
6844 | ] |
---|
6845 | [remove unused variable detected by pyflakes |
---|
6846 | zooko@zooko.com**20110407172231 |
---|
6847 | Ignore-this: 7344652d5e0720af822070d91f03daf9 |
---|
6848 | ] |
---|
6849 | [allmydata/__init__.py: Nicer reporting of unparseable version numbers in dependencies. fixes #1388 |
---|
6850 | david-sarah@jacaranda.org**20110401202750 |
---|
6851 | Ignore-this: 9c6bd599259d2405e1caadbb3e0d8c7f |
---|
6852 | ] |
---|
6853 | [update FTP-and-SFTP.rst: the necessary patch is included in Twisted-10.1 |
---|
6854 | Brian Warner <warner@lothar.com>**20110325232511 |
---|
6855 | Ignore-this: d5307faa6900f143193bfbe14e0f01a |
---|
6856 | ] |
---|
6857 | [control.py: remove all uses of s.get_serverid() |
---|
6858 | warner@lothar.com**20110227011203 |
---|
6859 | Ignore-this: f80a787953bd7fa3d40e828bde00e855 |
---|
6860 | ] |
---|
6861 | [web: remove some uses of s.get_serverid(), not all |
---|
6862 | warner@lothar.com**20110227011159 |
---|
6863 | Ignore-this: a9347d9cf6436537a47edc6efde9f8be |
---|
6864 | ] |
---|
6865 | [immutable/downloader/fetcher.py: remove all get_serverid() calls |
---|
6866 | warner@lothar.com**20110227011156 |
---|
6867 | Ignore-this: fb5ef018ade1749348b546ec24f7f09a |
---|
6868 | ] |
---|
6869 | [immutable/downloader/fetcher.py: fix diversity bug in server-response handling |
---|
6870 | warner@lothar.com**20110227011153 |
---|
6871 | Ignore-this: bcd62232c9159371ae8a16ff63d22c1b |
---|
6872 | |
---|
6873 | When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the |
---|
6874 | _shares_from_server dict was being popped incorrectly (using shnum as the |
---|
6875 | index instead of serverid). I'm still thinking through the consequences of |
---|
6876 | this bug. It was probably benign and really hard to detect. I think it would |
---|
6877 | cause us to incorrectly believe that we're pulling too many shares from a |
---|
6878 | server, and thus prefer a different server rather than asking for a second |
---|
6879 | share from the first server. The diversity code is intended to spread out the |
---|
6880 | number of shares simultaneously being requested from each server, but with |
---|
6881 | this bug, it might be spreading out the total number of shares requested at |
---|
6882 | all, not just simultaneously. (note that SegmentFetcher is scoped to a single |
---|
6883 | segment, so the effect doesn't last very long). |
---|
6884 | ] |
---|
6885 | [immutable/downloader/share.py: reduce get_serverid(), one left, update ext deps |
---|
6886 | warner@lothar.com**20110227011150 |
---|
6887 | Ignore-this: d8d56dd8e7b280792b40105e13664554 |
---|
6888 | |
---|
6889 | test_download.py: create+check MyShare instances better, make sure they share |
---|
6890 | Server objects, now that finder.py cares |
---|
6891 | ] |
---|
6892 | [immutable/downloader/finder.py: reduce use of get_serverid(), one left |
---|
6893 | warner@lothar.com**20110227011146 |
---|
6894 | Ignore-this: 5785be173b491ae8a78faf5142892020 |
---|
6895 | ] |
---|
6896 | [immutable/offloaded.py: reduce use of get_serverid() a bit more |
---|
6897 | warner@lothar.com**20110227011142 |
---|
6898 | Ignore-this: b48acc1b2ae1b311da7f3ba4ffba38f |
---|
6899 | ] |
---|
6900 | [immutable/upload.py: reduce use of get_serverid() |
---|
6901 | warner@lothar.com**20110227011138 |
---|
6902 | Ignore-this: ffdd7ff32bca890782119a6e9f1495f6 |
---|
6903 | ] |
---|
6904 | [immutable/checker.py: remove some uses of s.get_serverid(), not all |
---|
6905 | warner@lothar.com**20110227011134 |
---|
6906 | Ignore-this: e480a37efa9e94e8016d826c492f626e |
---|
6907 | ] |
---|
6908 | [add remaining get_* methods to storage_client.Server, NoNetworkServer, and |
---|
6909 | warner@lothar.com**20110227011132 |
---|
6910 | Ignore-this: 6078279ddf42b179996a4b53bee8c421 |
---|
6911 | MockIServer stubs |
---|
6912 | ] |
---|
6913 | [upload.py: rearrange _make_trackers a bit, no behavior changes |
---|
6914 | warner@lothar.com**20110227011128 |
---|
6915 | Ignore-this: 296d4819e2af452b107177aef6ebb40f |
---|
6916 | ] |
---|
6917 | [happinessutil.py: finally rename merge_peers to merge_servers |
---|
6918 | warner@lothar.com**20110227011124 |
---|
6919 | Ignore-this: c8cd381fea1dd888899cb71e4f86de6e |
---|
6920 | ] |
---|
6921 | [test_upload.py: factor out FakeServerTracker |
---|
6922 | warner@lothar.com**20110227011120 |
---|
6923 | Ignore-this: 6c182cba90e908221099472cc159325b |
---|
6924 | ] |
---|
6925 | [test_upload.py: server-vs-tracker cleanup |
---|
6926 | warner@lothar.com**20110227011115 |
---|
6927 | Ignore-this: 2915133be1a3ba456e8603885437e03 |
---|
6928 | ] |
---|
6929 | [happinessutil.py: server-vs-tracker cleanup |
---|
6930 | warner@lothar.com**20110227011111 |
---|
6931 | Ignore-this: b856c84033562d7d718cae7cb01085a9 |
---|
6932 | ] |
---|
6933 | [upload.py: more tracker-vs-server cleanup |
---|
6934 | warner@lothar.com**20110227011107 |
---|
6935 | Ignore-this: bb75ed2afef55e47c085b35def2de315 |
---|
6936 | ] |
---|
6937 | [upload.py: fix var names to avoid confusion between 'trackers' and 'servers' |
---|
6938 | warner@lothar.com**20110227011103 |
---|
6939 | Ignore-this: 5d5e3415b7d2732d92f42413c25d205d |
---|
6940 | ] |
---|
6941 | [refactor: s/peer/server/ in immutable/upload, happinessutil.py, test_upload |
---|
6942 | warner@lothar.com**20110227011100 |
---|
6943 | Ignore-this: 7ea858755cbe5896ac212a925840fe68 |
---|
6944 | |
---|
6945 | No behavioral changes, just updating variable/method names and log messages. |
---|
6946 | The effects outside these three files should be minimal: some exception |
---|
6947 | messages changed (to say "server" instead of "peer"), and some internal class |
---|
6948 | names were changed. A few things still use "peer" to minimize external |
---|
6949 | changes, like UploadResults.timings["peer_selection"] and |
---|
6950 | happinessutil.merge_peers, which can be changed later. |
---|
6951 | ] |
---|
6952 | [storage_client.py: clean up test_add_server/test_add_descriptor, remove .test_servers |
---|
6953 | warner@lothar.com**20110227011056 |
---|
6954 | Ignore-this: efad933e78179d3d5fdcd6d1ef2b19cc |
---|
6955 | ] |
---|
6956 | [test_client.py, upload.py:: remove KiB/MiB/etc constants, and other dead code |
---|
6957 | warner@lothar.com**20110227011051 |
---|
6958 | Ignore-this: dc83c5794c2afc4f81e592f689c0dc2d |
---|
6959 | ] |
---|
6960 | [test: increase timeout on a network test because Francois's ARM machine hit that timeout |
---|
6961 | zooko@zooko.com**20110317165909 |
---|
6962 | Ignore-this: 380c345cdcbd196268ca5b65664ac85b |
---|
6963 | I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish. |
---|
6964 | ] |
---|
6965 | [docs/configuration.rst: add a "Frontend Configuration" section |
---|
6966 | Brian Warner <warner@lothar.com>**20110222014323 |
---|
6967 | Ignore-this: 657018aa501fe4f0efef9851628444ca |
---|
6968 | |
---|
6969 | this points to docs/frontends/*.rst, which were previously underlinked |
---|
6970 | ] |
---|
6971 | [web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366 |
---|
6972 | "Brian Warner <warner@lothar.com>"**20110221061544 |
---|
6973 | Ignore-this: 799d4de19933f2309b3c0c19a63bb888 |
---|
6974 | ] |
---|
6975 | [Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable. |
---|
6976 | david-sarah@jacaranda.org**20110221015817 |
---|
6977 | Ignore-this: 51d181698f8c20d3aca58b057e9c475a |
---|
6978 | ] |
---|
6979 | [allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355. |
---|
6980 | david-sarah@jacaranda.org**20110221020125 |
---|
6981 | Ignore-this: b0744ed58f161bf188e037bad077fc48 |
---|
6982 | ] |
---|
6983 | [Refactor StorageFarmBroker handling of servers |
---|
6984 | Brian Warner <warner@lothar.com>**20110221015804 |
---|
6985 | Ignore-this: 842144ed92f5717699b8f580eab32a51 |
---|
6986 | |
---|
6987 | Pass around IServer instance instead of (peerid, rref) tuple. Replace |
---|
6988 | "descriptor" with "server". Other replacements: |
---|
6989 | |
---|
6990 | get_all_servers -> get_connected_servers/get_known_servers |
---|
6991 | get_servers_for_index -> get_servers_for_psi (now returns IServers) |
---|
6992 | |
---|
6993 | This change still needs to be pushed further down: lots of code is now |
---|
6994 | getting the IServer and then distributing (peerid, rref) internally. |
---|
6995 | Instead, it ought to distribute the IServer internally and delay |
---|
6996 | extracting a serverid or rref until the last moment. |
---|
6997 | |
---|
6998 | no_network.py was updated to retain parallelism. |
---|
6999 | ] |
---|
7000 | [TAG allmydata-tahoe-1.8.2 |
---|
7001 | warner@lothar.com**20110131020101] |
---|
7002 | Patch bundle hash: |
---|
7003 | 560431b78d817c16e2332809410a03a184cf6910 |
---|