1 | Wed Aug 3 00:48:07 MDT 2011 wilcoxjg@gmail.com |
---|
2 | * the file test_backends has been added. |
---|
3 | |
---|
4 | Wed Aug 3 14:30:27 MDT 2011 wilcoxjg@gmail.com |
---|
5 | * Added directories and new modules for the null backend. |
---|
6 | |
---|
7 | Wed Aug 3 14:50:28 MDT 2011 wilcoxjg@gmail.com |
---|
8 | * update common goal to pass nullserver test |
---|
9 | |
---|
10 | Wed Aug 3 15:02:32 MDT 2011 wilcoxjg@gmail.com |
---|
11 | * the backend pluggable version of storage/server.py |
---|
12 | |
---|
13 | Wed Aug 3 15:08:39 MDT 2011 wilcoxjg@gmail.com |
---|
14 | * add interfaces to null/core.py |
---|
15 | |
---|
16 | Wed Aug 3 15:10:53 MDT 2011 wilcoxjg@gmail.com |
---|
17 | * change immutable.py includes removal of ImmutableShare which now lives in the backend specfic core.py module. |
---|
18 | |
---|
19 | New patches: |
---|
20 | |
---|
21 | [the file test_backends has been added. |
---|
22 | wilcoxjg@gmail.com**20110803064807 |
---|
23 | Ignore-this: d25e943f69da4f224acd809fc6e23c3b |
---|
24 | ] { |
---|
25 | addfile ./src/allmydata/test/test_backends.py |
---|
26 | hunk ./src/allmydata/test/test_backends.py 1 |
---|
27 | +import os, stat |
---|
28 | +from twisted.trial import unittest |
---|
29 | +from allmydata.util.log import msg |
---|
30 | +from allmydata.test.common_util import ReallyEqualMixin |
---|
31 | +import mock |
---|
32 | +# This is the code that we're going to be testing. |
---|
33 | +from allmydata.storage.server import StorageServer |
---|
34 | +from allmydata.storage.backends.das.core import DASCore |
---|
35 | +from allmydata.storage.backends.null.core import NullCore |
---|
36 | +from allmydata.storage.common import si_si2dir |
---|
37 | +# The following share file content was generated with |
---|
38 | +# storage.immutable.ShareFile from Tahoe-LAFS v1.8.2 |
---|
39 | +# with share data == 'a'. The total size of this input |
---|
40 | +# is 85 bytes. |
---|
41 | +shareversionnumber = '\x00\x00\x00\x01' |
---|
42 | +sharedatalength = '\x00\x00\x00\x01' |
---|
43 | +numberofleases = '\x00\x00\x00\x01' |
---|
44 | +shareinputdata = 'a' |
---|
45 | +ownernumber = '\x00\x00\x00\x00' |
---|
46 | +renewsecret = 'x'*32 |
---|
47 | +cancelsecret = 'y'*32 |
---|
48 | +expirationtime = '\x00(\xde\x80' |
---|
49 | +nextlease = '' |
---|
50 | +containerdata = shareversionnumber + sharedatalength + numberofleases |
---|
51 | +client_data = shareinputdata + ownernumber + renewsecret + \ |
---|
52 | + cancelsecret + expirationtime + nextlease |
---|
53 | +share_data = containerdata + client_data |
---|
54 | +testnodeid = 'testnodeidxxxxxxxxxx' |
---|
55 | +expiration_policy = {'enabled' : False, |
---|
56 | + 'mode' : 'age', |
---|
57 | + 'override_lease_duration' : None, |
---|
58 | + 'cutoff_date' : None, |
---|
59 | + 'sharetypes' : None} |
---|
60 | + |
---|
61 | + |
---|
62 | +class MockFileSystem(unittest.TestCase): |
---|
63 | + """ I simulate a filesystem that the code under test can use. I simulate |
---|
64 | + just the parts of the filesystem that the current implementation of DAS |
---|
65 | + backend needs. """ |
---|
66 | + def setUp(self): |
---|
67 | + # Make patcher, patch, and make effects for fs using functions. |
---|
68 | + msg( "%s.setUp()" % (self,)) |
---|
69 | + self.mockedfilepaths = {} |
---|
70 | + #keys are pathnames, values are MockFilePath objects. This is necessary because |
---|
71 | + #MockFilePath behavior sometimes depends on the filesystem. Where it does, |
---|
72 | + #self.mockedfilepaths has the relevent info. |
---|
73 | + self.storedir = MockFilePath('teststoredir', self.mockedfilepaths) |
---|
74 | + self.basedir = self.storedir.child('shares') |
---|
75 | + self.baseincdir = self.basedir.child('incoming') |
---|
76 | + self.sharedirfinalname = self.basedir.child('or').child('orsxg5dtorxxeylhmvpws3temv4a') |
---|
77 | + self.sharedirincomingname = self.baseincdir.child('or').child('orsxg5dtorxxeylhmvpws3temv4a') |
---|
78 | + self.shareincomingname = self.sharedirincomingname.child('0') |
---|
79 | + self.sharefinalname = self.sharedirfinalname.child('0') |
---|
80 | + |
---|
81 | + self.FilePathFake = mock.patch('allmydata.storage.backends.das.core.FilePath', new = MockFilePath ) |
---|
82 | + FakePath = self.FilePathFake.__enter__() |
---|
83 | + |
---|
84 | + self.BCountingCrawler = mock.patch('allmydata.storage.backends.das.core.BucketCountingCrawler') |
---|
85 | + FakeBCC = self.BCountingCrawler.__enter__() |
---|
86 | + FakeBCC.side_effect = self.call_FakeBCC |
---|
87 | + |
---|
88 | + self.LeaseCheckingCrawler = mock.patch('allmydata.storage.backends.das.core.LeaseCheckingCrawler') |
---|
89 | + FakeLCC = self.LeaseCheckingCrawler.__enter__() |
---|
90 | + FakeLCC.side_effect = self.call_FakeLCC |
---|
91 | + |
---|
92 | + self.get_available_space = mock.patch('allmydata.util.fileutil.get_available_space') |
---|
93 | + GetSpace = self.get_available_space.__enter__() |
---|
94 | + GetSpace.side_effect = self.call_get_available_space |
---|
95 | + |
---|
96 | + self.statforsize = mock.patch('allmydata.storage.backends.das.core.filepath.stat') |
---|
97 | + getsize = self.statforsize.__enter__() |
---|
98 | + getsize.side_effect = self.call_statforsize |
---|
99 | + |
---|
100 | + def call_FakeBCC(self, StateFile): |
---|
101 | + return MockBCC() |
---|
102 | + |
---|
103 | + def call_FakeLCC(self, StateFile, HistoryFile, ExpirationPolicy): |
---|
104 | + return MockLCC() |
---|
105 | + |
---|
106 | + def call_get_available_space(self, storedir, reservedspace): |
---|
107 | + # The input vector has an input size of 85. |
---|
108 | + return 85 - reservedspace |
---|
109 | + |
---|
110 | + def call_statforsize(self, fakefpname): |
---|
111 | + return self.mockedfilepaths[fakefpname].fileobject.size() |
---|
112 | + |
---|
113 | + def tearDown(self): |
---|
114 | + msg( "%s.tearDown()" % (self,)) |
---|
115 | + FakePath = self.FilePathFake.__exit__() |
---|
116 | + self.mockedfilepaths = {} |
---|
117 | + |
---|
118 | + |
---|
119 | +class MockFilePath: |
---|
120 | + def __init__(self, pathstring, ffpathsenvironment, existance=False): |
---|
121 | + # I can't jsut make the values MockFileObjects because they may be directories. |
---|
122 | + self.mockedfilepaths = ffpathsenvironment |
---|
123 | + self.path = pathstring |
---|
124 | + self.existance = existance |
---|
125 | + if not self.mockedfilepaths.has_key(self.path): |
---|
126 | + # The first MockFilePath object is special |
---|
127 | + self.mockedfilepaths[self.path] = self |
---|
128 | + self.fileobject = None |
---|
129 | + else: |
---|
130 | + self.fileobject = self.mockedfilepaths[self.path].fileobject |
---|
131 | + self.spawn = {} |
---|
132 | + self.antecedent = os.path.dirname(self.path) |
---|
133 | + |
---|
134 | + def setContent(self, contentstring): |
---|
135 | + # This method rewrites the data in the file that corresponds to its path |
---|
136 | + # name whether it preexisted or not. |
---|
137 | + self.fileobject = MockFileObject(contentstring) |
---|
138 | + self.existance = True |
---|
139 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
140 | + self.mockedfilepaths[self.path].existance = self.existance |
---|
141 | + self.setparents() |
---|
142 | + |
---|
143 | + def create(self): |
---|
144 | + # This method chokes if there's a pre-existing file! |
---|
145 | + if self.mockedfilepaths[self.path].fileobject: |
---|
146 | + raise OSError |
---|
147 | + else: |
---|
148 | + self.fileobject = MockFileObject(contentstring) |
---|
149 | + self.existance = True |
---|
150 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
151 | + self.mockedfilepaths[self.path].existance = self.existance |
---|
152 | + self.setparents() |
---|
153 | + |
---|
154 | + def open(self, mode='r'): |
---|
155 | + # XXX Makes no use of mode. |
---|
156 | + if not self.mockedfilepaths[self.path].fileobject: |
---|
157 | + # If there's no fileobject there already then make one and put it there. |
---|
158 | + self.fileobject = MockFileObject() |
---|
159 | + self.existance = True |
---|
160 | + self.mockedfilepaths[self.path].fileobject = self.fileobject |
---|
161 | + self.mockedfilepaths[self.path].existance = self.existance |
---|
162 | + else: |
---|
163 | + # Otherwise get a ref to it. |
---|
164 | + self.fileobject = self.mockedfilepaths[self.path].fileobject |
---|
165 | + self.existance = self.mockedfilepaths[self.path].existance |
---|
166 | + return self.fileobject.open(mode) |
---|
167 | + |
---|
168 | + def child(self, childstring): |
---|
169 | + arg2child = os.path.join(self.path, childstring) |
---|
170 | + child = MockFilePath(arg2child, self.mockedfilepaths) |
---|
171 | + return child |
---|
172 | + |
---|
173 | + def children(self): |
---|
174 | + childrenfromffs = [ffp for ffp in self.mockedfilepaths.values() if ffp.path.startswith(self.path)] |
---|
175 | + childrenfromffs = [ffp for ffp in childrenfromffs if not ffp.path.endswith(self.path)] |
---|
176 | + childrenfromffs = [ffp for ffp in childrenfromffs if ffp.exists()] |
---|
177 | + self.spawn = frozenset(childrenfromffs) |
---|
178 | + return self.spawn |
---|
179 | + |
---|
180 | + def parent(self): |
---|
181 | + if self.mockedfilepaths.has_key(self.antecedent): |
---|
182 | + parent = self.mockedfilepaths[self.antecedent] |
---|
183 | + else: |
---|
184 | + parent = MockFilePath(self.antecedent, self.mockedfilepaths) |
---|
185 | + return parent |
---|
186 | + |
---|
187 | + def parents(self): |
---|
188 | + antecedents = [] |
---|
189 | + def f(fps, antecedents): |
---|
190 | + newfps = os.path.split(fps)[0] |
---|
191 | + if newfps: |
---|
192 | + antecedents.append(newfps) |
---|
193 | + f(newfps, antecedents) |
---|
194 | + f(self.path, antecedents) |
---|
195 | + return antecedents |
---|
196 | + |
---|
197 | + def setparents(self): |
---|
198 | + for fps in self.parents(): |
---|
199 | + if not self.mockedfilepaths.has_key(fps): |
---|
200 | + self.mockedfilepaths[fps] = MockFilePath(fps, self.mockedfilepaths, exists=True) |
---|
201 | + |
---|
202 | + def basename(self): |
---|
203 | + return os.path.split(self.path)[1] |
---|
204 | + |
---|
205 | + def moveTo(self, newffp): |
---|
206 | + # XXX Makes no distinction between file and directory arguments, this is deviation from filepath.moveTo |
---|
207 | + if self.mockedfilepaths[newffp.path].exists(): |
---|
208 | + raise OSError |
---|
209 | + else: |
---|
210 | + self.mockedfilepaths[newffp.path] = self |
---|
211 | + self.path = newffp.path |
---|
212 | + |
---|
213 | + def getsize(self): |
---|
214 | + return self.fileobject.getsize() |
---|
215 | + |
---|
216 | + def exists(self): |
---|
217 | + return self.existance |
---|
218 | + |
---|
219 | + def isdir(self): |
---|
220 | + return True |
---|
221 | + |
---|
222 | + def makedirs(self): |
---|
223 | + # XXX These methods assume that fp_<FOO> functions in fileutil will be tested elsewhere! |
---|
224 | + pass |
---|
225 | + |
---|
226 | + def remove(self): |
---|
227 | + pass |
---|
228 | + |
---|
229 | + |
---|
230 | +class MockFileObject: |
---|
231 | + def __init__(self, contentstring=''): |
---|
232 | + self.buffer = contentstring |
---|
233 | + self.pos = 0 |
---|
234 | + def open(self, mode='r'): |
---|
235 | + return self |
---|
236 | + def write(self, instring): |
---|
237 | + begin = self.pos |
---|
238 | + padlen = begin - len(self.buffer) |
---|
239 | + if padlen > 0: |
---|
240 | + self.buffer += '\x00' * padlen |
---|
241 | + end = self.pos + len(instring) |
---|
242 | + self.buffer = self.buffer[:begin]+instring+self.buffer[end:] |
---|
243 | + self.pos = end |
---|
244 | + def close(self): |
---|
245 | + self.pos = 0 |
---|
246 | + def seek(self, pos): |
---|
247 | + self.pos = pos |
---|
248 | + def read(self, numberbytes): |
---|
249 | + return self.buffer[self.pos:self.pos+numberbytes] |
---|
250 | + def tell(self): |
---|
251 | + return self.pos |
---|
252 | + def size(self): |
---|
253 | + # XXX This method A: Is not to be found in a real file B: Is part of a wild-mung-up of filepath.stat! |
---|
254 | + # XXX Finally we shall hopefully use a getsize method soon, must consult first though. |
---|
255 | + # Hmmm... perhaps we need to sometimes stat the address when there's not a mockfileobject present? |
---|
256 | + return {stat.ST_SIZE:len(self.buffer)} |
---|
257 | + def getsize(self): |
---|
258 | + return len(self.buffer) |
---|
259 | + |
---|
260 | +class MockBCC: |
---|
261 | + def setServiceParent(self, Parent): |
---|
262 | + pass |
---|
263 | + |
---|
264 | + |
---|
265 | +class MockLCC: |
---|
266 | + def setServiceParent(self, Parent): |
---|
267 | + pass |
---|
268 | + |
---|
269 | + |
---|
270 | +class TestServerWithNullBackend(unittest.TestCase, ReallyEqualMixin): |
---|
271 | + """ NullBackend is just for testing and executable documentation, so |
---|
272 | + this test is actually a test of StorageServer in which we're using |
---|
273 | + NullBackend as helper code for the test, rather than a test of |
---|
274 | + NullBackend. """ |
---|
275 | + def setUp(self): |
---|
276 | + self.ss = StorageServer(testnodeid, backend=NullCore()) |
---|
277 | + |
---|
278 | + @mock.patch('os.mkdir') |
---|
279 | + @mock.patch('__builtin__.open') |
---|
280 | + @mock.patch('os.listdir') |
---|
281 | + @mock.patch('os.path.isdir') |
---|
282 | + def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir): |
---|
283 | + """ Write a new share. """ |
---|
284 | + |
---|
285 | + alreadygot, bs = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
286 | + bs[0].remote_write(0, 'a') |
---|
287 | + self.failIf(mockisdir.called) |
---|
288 | + self.failIf(mocklistdir.called) |
---|
289 | + self.failIf(mockopen.called) |
---|
290 | + self.failIf(mockmkdir.called) |
---|
291 | + |
---|
292 | + |
---|
293 | +class TestServerConstruction(MockFileSystem, ReallyEqualMixin): |
---|
294 | + def test_create_server_fs_backend(self): |
---|
295 | + """ This tests whether a server instance can be constructed with a |
---|
296 | + filesystem backend. To pass the test, it mustn't use the filesystem |
---|
297 | + outside of its configured storedir. """ |
---|
298 | + StorageServer(testnodeid, backend=DASCore(self.storedir, expiration_policy)) |
---|
299 | + |
---|
300 | + |
---|
301 | +class TestServerAndFSBackend(MockFileSystem, ReallyEqualMixin): |
---|
302 | + """ This tests both the StorageServer and the DAS backend together. """ |
---|
303 | + def setUp(self): |
---|
304 | + MockFileSystem.setUp(self) |
---|
305 | + try: |
---|
306 | + self.backend = DASCore(self.storedir, expiration_policy) |
---|
307 | + self.ss = StorageServer(testnodeid, self.backend) |
---|
308 | + |
---|
309 | + self.backendwithreserve = DASCore(self.storedir, expiration_policy, reserved_space = 1) |
---|
310 | + self.sswithreserve = StorageServer(testnodeid, self.backendwithreserve) |
---|
311 | + except: |
---|
312 | + MockFileSystem.tearDown(self) |
---|
313 | + raise |
---|
314 | + |
---|
315 | + @mock.patch('time.time') |
---|
316 | + @mock.patch('allmydata.util.fileutil.get_available_space') |
---|
317 | + def test_out_of_space(self, mockget_available_space, mocktime): |
---|
318 | + mocktime.return_value = 0 |
---|
319 | + |
---|
320 | + def call_get_available_space(dir, reserve): |
---|
321 | + return 0 |
---|
322 | + |
---|
323 | + mockget_available_space.side_effect = call_get_available_space |
---|
324 | + alreadygotc, bsc = self.sswithreserve.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
325 | + self.failUnlessReallyEqual(bsc, {}) |
---|
326 | + |
---|
327 | + @mock.patch('time.time') |
---|
328 | + def test_write_and_read_share(self, mocktime): |
---|
329 | + """ |
---|
330 | + Write a new share, read it, and test the server's (and FS backend's) |
---|
331 | + handling of simultaneous and successive attempts to write the same |
---|
332 | + share. |
---|
333 | + """ |
---|
334 | + mocktime.return_value = 0 |
---|
335 | + # Inspect incoming and fail unless it's empty. |
---|
336 | + incomingset = self.ss.backend.get_incoming_shnums('teststorage_index') |
---|
337 | + |
---|
338 | + self.failUnlessReallyEqual(incomingset, frozenset()) |
---|
339 | + |
---|
340 | + # Populate incoming with the sharenum: 0. |
---|
341 | + alreadygot, bs = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, frozenset((0,)), 1, mock.Mock()) |
---|
342 | + |
---|
343 | + # This is a transparent-box test: Inspect incoming and fail unless the sharenum: 0 is listed there. |
---|
344 | + self.failUnlessReallyEqual(self.ss.backend.get_incoming_shnums('teststorage_index'), frozenset((0,))) |
---|
345 | + |
---|
346 | + |
---|
347 | + |
---|
348 | + # Attempt to create a second share writer with the same sharenum. |
---|
349 | + alreadygota, bsa = self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, frozenset((0,)), 1, mock.Mock()) |
---|
350 | + |
---|
351 | + # Show that no sharewriter results from a remote_allocate_buckets |
---|
352 | + # with the same si and sharenum, until BucketWriter.remote_close() |
---|
353 | + # has been called. |
---|
354 | + self.failIf(bsa) |
---|
355 | + |
---|
356 | + # Test allocated size. |
---|
357 | + spaceint = self.ss.allocated_size() |
---|
358 | + self.failUnlessReallyEqual(spaceint, 1) |
---|
359 | + |
---|
360 | + # Write 'a' to shnum 0. Only tested together with close and read. |
---|
361 | + bs[0].remote_write(0, 'a') |
---|
362 | + |
---|
363 | + # Preclose: Inspect final, failUnless nothing there. |
---|
364 | + self.failUnlessReallyEqual(len(list(self.backend.get_shares('teststorage_index'))), 0) |
---|
365 | + bs[0].remote_close() |
---|
366 | + |
---|
367 | + # Postclose: (Omnibus) failUnless written data is in final. |
---|
368 | + sharesinfinal = list(self.backend.get_shares('teststorage_index')) |
---|
369 | + self.failUnlessReallyEqual(len(sharesinfinal), 1) |
---|
370 | + contents = sharesinfinal[0].read_share_data(0, 73) |
---|
371 | + self.failUnlessReallyEqual(contents, client_data) |
---|
372 | + |
---|
373 | + # Exercise the case that the share we're asking to allocate is |
---|
374 | + # already (completely) uploaded. |
---|
375 | + self.ss.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock()) |
---|
376 | + |
---|
377 | + |
---|
378 | + def test_read_old_share(self): |
---|
379 | + """ This tests whether the code correctly finds and reads |
---|
380 | + shares written out by old (Tahoe-LAFS <= v1.8.2) |
---|
381 | + servers. There is a similar test in test_download, but that one |
---|
382 | + is from the perspective of the client and exercises a deeper |
---|
383 | + stack of code. This one is for exercising just the |
---|
384 | + StorageServer object. """ |
---|
385 | + # Contruct a file with the appropriate contents in the mockfilesystem. |
---|
386 | + datalen = len(share_data) |
---|
387 | + finalhome = si_si2dir(self.basedir, 'teststorage_index').child(str(0)) |
---|
388 | + finalhome.setContent(share_data) |
---|
389 | + |
---|
390 | + # Now begin the test. |
---|
391 | + bs = self.ss.remote_get_buckets('teststorage_index') |
---|
392 | + |
---|
393 | + self.failUnlessEqual(len(bs), 1) |
---|
394 | + b = bs['0'] |
---|
395 | + # These should match by definition, the next two cases cover cases without (completely) unambiguous behaviors. |
---|
396 | + self.failUnlessReallyEqual(b.remote_read(0, datalen), client_data) |
---|
397 | + # If you try to read past the end you get the as much data as is there. |
---|
398 | + self.failUnlessReallyEqual(b.remote_read(0, datalen+20), client_data) |
---|
399 | + # If you start reading past the end of the file you get the empty string. |
---|
400 | + self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '') |
---|
401 | } |
---|
402 | [Added directories and new modules for the null backend. |
---|
403 | wilcoxjg@gmail.com**20110803203027 |
---|
404 | Ignore-this: 7f53da28245add96cd987c03e755ac |
---|
405 | ] { |
---|
406 | hunk ./src/allmydata/interfaces.py 270 |
---|
407 | store that on disk. |
---|
408 | """ |
---|
409 | |
---|
410 | +class IStorageBackend(Interface): |
---|
411 | + """ |
---|
412 | + Objects of this kind live on the server side and are used by the |
---|
413 | + storage server object. |
---|
414 | + """ |
---|
415 | + def get_available_space(self, reserved_space): |
---|
416 | + """ Returns available space for share storage in bytes, or |
---|
417 | + None if this information is not available or if the available |
---|
418 | + space is unlimited. |
---|
419 | + |
---|
420 | + If the backend is configured for read-only mode then this will |
---|
421 | + return 0. |
---|
422 | + |
---|
423 | + reserved_space is how many bytes to subtract from the answer, so |
---|
424 | + you can pass how many bytes you would like to leave unused on this |
---|
425 | + filesystem as reserved_space. """ |
---|
426 | + |
---|
427 | + def get_bucket_shares(self): |
---|
428 | + """XXX""" |
---|
429 | + |
---|
430 | + def get_share(self): |
---|
431 | + """XXX""" |
---|
432 | + |
---|
433 | + def make_bucket_writer(self): |
---|
434 | + """XXX""" |
---|
435 | + |
---|
436 | +class IStorageBackendShare(Interface): |
---|
437 | + """ |
---|
438 | + This object contains as much as all of the share data. It is intended |
---|
439 | + for lazy evaluation such that in many use cases substantially less than |
---|
440 | + all of the share data will be accessed. |
---|
441 | + """ |
---|
442 | + def is_complete(self): |
---|
443 | + """ |
---|
444 | + Returns the share state, or None if the share does not exist. |
---|
445 | + """ |
---|
446 | + |
---|
447 | class IStorageBucketWriter(Interface): |
---|
448 | """ |
---|
449 | Objects of this kind live on the client side. |
---|
450 | adddir ./src/allmydata/storage/backends |
---|
451 | addfile ./src/allmydata/storage/backends/__init__.py |
---|
452 | addfile ./src/allmydata/storage/backends/base.py |
---|
453 | hunk ./src/allmydata/storage/backends/base.py 1 |
---|
454 | +from twisted.application import service |
---|
455 | + |
---|
456 | +class Backend(service.MultiService): |
---|
457 | + def __init__(self): |
---|
458 | + service.MultiService.__init__(self) |
---|
459 | adddir ./src/allmydata/storage/backends/null |
---|
460 | addfile ./src/allmydata/storage/backends/null/__init__.py |
---|
461 | addfile ./src/allmydata/storage/backends/null/core.py |
---|
462 | hunk ./src/allmydata/storage/backends/null/core.py 1 |
---|
463 | +from allmydata.storage.backends.base import Backend |
---|
464 | +from allmydata.storage.immutable import BucketWriter, BucketReader |
---|
465 | + |
---|
466 | +class NullCore(Backend): |
---|
467 | + def __init__(self): |
---|
468 | + Backend.__init__(self) |
---|
469 | + |
---|
470 | + def get_available_space(self): |
---|
471 | + return None |
---|
472 | + |
---|
473 | + def get_shares(self, storage_index): |
---|
474 | + return set() |
---|
475 | + |
---|
476 | + def get_share(self, storage_index, sharenum): |
---|
477 | + return None |
---|
478 | + |
---|
479 | + def make_bucket_writer(self, storageindex, shnum, max_space_per_bucket, lease_info, canary): |
---|
480 | + immutableshare = ImmutableShare() |
---|
481 | + return BucketWriter(self.ss, immutableshare, max_space_per_bucket, lease_info, canary) |
---|
482 | + |
---|
483 | + def set_storage_server(self, ss): |
---|
484 | + self.ss = ss |
---|
485 | + |
---|
486 | + def get_incoming_shnums(self, storageindex): |
---|
487 | + return frozenset() |
---|
488 | + |
---|
489 | +class ImmutableShare: |
---|
490 | + sharetype = "immutable" |
---|
491 | + |
---|
492 | + def __init__(self): |
---|
493 | + """ If max_size is not None then I won't allow more than |
---|
494 | + max_size to be written to me. If create=True then max_size |
---|
495 | + must not be None. """ |
---|
496 | + pass |
---|
497 | + |
---|
498 | + def get_shnum(self): |
---|
499 | + return self.shnum |
---|
500 | + |
---|
501 | + def unlink(self): |
---|
502 | + os.unlink(self.fname) |
---|
503 | + |
---|
504 | + def read_share_data(self, offset, length): |
---|
505 | + precondition(offset >= 0) |
---|
506 | + # Reads beyond the end of the data are truncated. Reads that start |
---|
507 | + # beyond the end of the data return an empty string. |
---|
508 | + seekpos = self._data_offset+offset |
---|
509 | + fsize = os.path.getsize(self.fname) |
---|
510 | + actuallength = max(0, min(length, fsize-seekpos)) |
---|
511 | + if actuallength == 0: |
---|
512 | + return "" |
---|
513 | + f = open(self.fname, 'rb') |
---|
514 | + f.seek(seekpos) |
---|
515 | + return f.read(actuallength) |
---|
516 | + |
---|
517 | + def write_share_data(self, offset, data): |
---|
518 | + pass |
---|
519 | + |
---|
520 | + def _write_lease_record(self, f, lease_number, lease_info): |
---|
521 | + offset = self._lease_offset + lease_number * self.LEASE_SIZE |
---|
522 | + f.seek(offset) |
---|
523 | + assert f.tell() == offset |
---|
524 | + f.write(lease_info.to_immutable_data()) |
---|
525 | + |
---|
526 | + def _read_num_leases(self, f): |
---|
527 | + f.seek(0x08) |
---|
528 | + (num_leases,) = struct.unpack(">L", f.read(4)) |
---|
529 | + return num_leases |
---|
530 | + |
---|
531 | + def _write_num_leases(self, f, num_leases): |
---|
532 | + f.seek(0x08) |
---|
533 | + f.write(struct.pack(">L", num_leases)) |
---|
534 | + |
---|
535 | + def _truncate_leases(self, f, num_leases): |
---|
536 | + f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE) |
---|
537 | + |
---|
538 | + def get_leases(self): |
---|
539 | + """Yields a LeaseInfo instance for all leases.""" |
---|
540 | + f = open(self.fname, 'rb') |
---|
541 | + (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
542 | + f.seek(self._lease_offset) |
---|
543 | + for i in range(num_leases): |
---|
544 | + data = f.read(self.LEASE_SIZE) |
---|
545 | + if data: |
---|
546 | + yield LeaseInfo().from_immutable_data(data) |
---|
547 | + |
---|
548 | + def add_lease(self, lease): |
---|
549 | + pass |
---|
550 | + |
---|
551 | + def renew_lease(self, renew_secret, new_expire_time): |
---|
552 | + for i,lease in enumerate(self.get_leases()): |
---|
553 | + if constant_time_compare(lease.renew_secret, renew_secret): |
---|
554 | + # yup. See if we need to update the owner time. |
---|
555 | + if new_expire_time > lease.expiration_time: |
---|
556 | + # yes |
---|
557 | + lease.expiration_time = new_expire_time |
---|
558 | + f = open(self.fname, 'rb+') |
---|
559 | + self._write_lease_record(f, i, lease) |
---|
560 | + f.close() |
---|
561 | + return |
---|
562 | + raise IndexError("unable to renew non-existent lease") |
---|
563 | + |
---|
564 | + def add_or_renew_lease(self, lease_info): |
---|
565 | + try: |
---|
566 | + self.renew_lease(lease_info.renew_secret, |
---|
567 | + lease_info.expiration_time) |
---|
568 | + except IndexError: |
---|
569 | + self.add_lease(lease_info) |
---|
570 | + |
---|
571 | + |
---|
572 | + def cancel_lease(self, cancel_secret): |
---|
573 | + """Remove a lease with the given cancel_secret. If the last lease is |
---|
574 | + cancelled, the file will be removed. Return the number of bytes that |
---|
575 | + were freed (by truncating the list of leases, and possibly by |
---|
576 | + deleting the file. Raise IndexError if there was no lease with the |
---|
577 | + given cancel_secret. |
---|
578 | + """ |
---|
579 | + |
---|
580 | + leases = list(self.get_leases()) |
---|
581 | + num_leases_removed = 0 |
---|
582 | + for i,lease in enumerate(leases): |
---|
583 | + if constant_time_compare(lease.cancel_secret, cancel_secret): |
---|
584 | + leases[i] = None |
---|
585 | + num_leases_removed += 1 |
---|
586 | + if not num_leases_removed: |
---|
587 | + raise IndexError("unable to find matching lease to cancel") |
---|
588 | + if num_leases_removed: |
---|
589 | + # pack and write out the remaining leases. We write these out in |
---|
590 | + # the same order as they were added, so that if we crash while |
---|
591 | + # doing this, we won't lose any non-cancelled leases. |
---|
592 | + leases = [l for l in leases if l] # remove the cancelled leases |
---|
593 | + f = open(self.fname, 'rb+') |
---|
594 | + for i,lease in enumerate(leases): |
---|
595 | + self._write_lease_record(f, i, lease) |
---|
596 | + self._write_num_leases(f, len(leases)) |
---|
597 | + self._truncate_leases(f, len(leases)) |
---|
598 | + f.close() |
---|
599 | + space_freed = self.LEASE_SIZE * num_leases_removed |
---|
600 | + if not len(leases): |
---|
601 | + space_freed += os.stat(self.fname)[stat.ST_SIZE] |
---|
602 | + self.unlink() |
---|
603 | + return space_freed |
---|
604 | } |
---|
605 | [update common goal to pass nullserver test |
---|
606 | wilcoxjg@gmail.com**20110803205028 |
---|
607 | Ignore-this: 18c4fa21133757a46dbeea9446bfd981 |
---|
608 | ] { |
---|
609 | hunk ./src/allmydata/storage/backends/null/core.py 3 |
---|
610 | from allmydata.storage.backends.base import Backend |
---|
611 | from allmydata.storage.immutable import BucketWriter, BucketReader |
---|
612 | +from zope.interface import implements |
---|
613 | |
---|
614 | class NullCore(Backend): |
---|
615 | hunk ./src/allmydata/storage/backends/null/core.py 6 |
---|
616 | + implements(IStorageBackend) |
---|
617 | def __init__(self): |
---|
618 | Backend.__init__(self) |
---|
619 | |
---|
620 | hunk ./src/allmydata/storage/backends/null/core.py 30 |
---|
621 | return frozenset() |
---|
622 | |
---|
623 | class ImmutableShare: |
---|
624 | + implements(IStorageBackendShare) |
---|
625 | sharetype = "immutable" |
---|
626 | |
---|
627 | def __init__(self): |
---|
628 | hunk ./src/allmydata/storage/common.py 19 |
---|
629 | def si_a2b(ascii_storageindex): |
---|
630 | return base32.a2b(ascii_storageindex) |
---|
631 | |
---|
632 | -def storage_index_to_dir(storageindex): |
---|
633 | +def si_si2dir(startfp, storageindex): |
---|
634 | sia = si_b2a(storageindex) |
---|
635 | hunk ./src/allmydata/storage/common.py 21 |
---|
636 | - return os.path.join(sia[:2], sia) |
---|
637 | + newfp = startfp.child(sia[:2]) |
---|
638 | + return newfp.child(sia) |
---|
639 | } |
---|
640 | [the backend pluggable version of storage/server.py |
---|
641 | wilcoxjg@gmail.com**20110803210232 |
---|
642 | Ignore-this: d8d9be1371973a75b1d480fc045a132 |
---|
643 | ] { |
---|
644 | hunk ./src/allmydata/storage/server.py 1 |
---|
645 | -import os, re, weakref, struct, time |
---|
646 | +import os, weakref, struct, time |
---|
647 | |
---|
648 | from foolscap.api import Referenceable |
---|
649 | from twisted.application import service |
---|
650 | hunk ./src/allmydata/storage/server.py 11 |
---|
651 | from allmydata.util import fileutil, idlib, log, time_format |
---|
652 | import allmydata # for __full_version__ |
---|
653 | |
---|
654 | -from allmydata.storage.common import si_b2a, si_a2b, storage_index_to_dir |
---|
655 | -_pyflakes_hush = [si_b2a, si_a2b, storage_index_to_dir] # re-exported |
---|
656 | +from allmydata.storage.common import si_b2a, si_a2b, si_si2dir |
---|
657 | +_pyflakes_hush = [si_b2a, si_a2b, si_si2dir] # re-exported |
---|
658 | from allmydata.storage.lease import LeaseInfo |
---|
659 | from allmydata.storage.mutable import MutableShareFile, EmptyShare, \ |
---|
660 | create_mutable_sharefile |
---|
661 | hunk ./src/allmydata/storage/server.py 16 |
---|
662 | -from allmydata.storage.immutable import ShareFile, BucketWriter, BucketReader |
---|
663 | -from allmydata.storage.crawler import BucketCountingCrawler |
---|
664 | -from allmydata.storage.expirer import LeaseCheckingCrawler |
---|
665 | - |
---|
666 | -# storage/ |
---|
667 | -# storage/shares/incoming |
---|
668 | -# incoming/ holds temp dirs named $START/$STORAGEINDEX/$SHARENUM which will |
---|
669 | -# be moved to storage/shares/$START/$STORAGEINDEX/$SHARENUM upon success |
---|
670 | -# storage/shares/$START/$STORAGEINDEX |
---|
671 | -# storage/shares/$START/$STORAGEINDEX/$SHARENUM |
---|
672 | - |
---|
673 | -# Where "$START" denotes the first 10 bits worth of $STORAGEINDEX (that's 2 |
---|
674 | -# base-32 chars). |
---|
675 | - |
---|
676 | -# $SHARENUM matches this regex: |
---|
677 | -NUM_RE=re.compile("^[0-9]+$") |
---|
678 | - |
---|
679 | - |
---|
680 | |
---|
681 | class StorageServer(service.MultiService, Referenceable): |
---|
682 | implements(RIStorageServer, IStatsProducer) |
---|
683 | hunk ./src/allmydata/storage/server.py 20 |
---|
684 | name = 'storage' |
---|
685 | - LeaseCheckerClass = LeaseCheckingCrawler |
---|
686 | |
---|
687 | hunk ./src/allmydata/storage/server.py 21 |
---|
688 | - def __init__(self, storedir, nodeid, reserved_space=0, |
---|
689 | - discard_storage=False, readonly_storage=False, |
---|
690 | - stats_provider=None, |
---|
691 | - expiration_enabled=False, |
---|
692 | - expiration_mode="age", |
---|
693 | - expiration_override_lease_duration=None, |
---|
694 | - expiration_cutoff_date=None, |
---|
695 | - expiration_sharetypes=("mutable", "immutable")): |
---|
696 | + def __init__(self, nodeid, backend, reserved_space=0, |
---|
697 | + readonly_storage=False, |
---|
698 | + stats_provider=None ): |
---|
699 | service.MultiService.__init__(self) |
---|
700 | assert isinstance(nodeid, str) |
---|
701 | assert len(nodeid) == 20 |
---|
702 | hunk ./src/allmydata/storage/server.py 28 |
---|
703 | self.my_nodeid = nodeid |
---|
704 | - self.storedir = storedir |
---|
705 | - sharedir = os.path.join(storedir, "shares") |
---|
706 | - fileutil.make_dirs(sharedir) |
---|
707 | - self.sharedir = sharedir |
---|
708 | - # we don't actually create the corruption-advisory dir until necessary |
---|
709 | - self.corruption_advisory_dir = os.path.join(storedir, |
---|
710 | - "corruption-advisories") |
---|
711 | - self.reserved_space = int(reserved_space) |
---|
712 | - self.no_storage = discard_storage |
---|
713 | - self.readonly_storage = readonly_storage |
---|
714 | self.stats_provider = stats_provider |
---|
715 | if self.stats_provider: |
---|
716 | self.stats_provider.register_producer(self) |
---|
717 | hunk ./src/allmydata/storage/server.py 31 |
---|
718 | - self.incomingdir = os.path.join(sharedir, 'incoming') |
---|
719 | - self._clean_incomplete() |
---|
720 | - fileutil.make_dirs(self.incomingdir) |
---|
721 | self._active_writers = weakref.WeakKeyDictionary() |
---|
722 | hunk ./src/allmydata/storage/server.py 32 |
---|
723 | + self.backend = backend |
---|
724 | + self.backend.setServiceParent(self) |
---|
725 | + self.backend.set_storage_server(self) |
---|
726 | log.msg("StorageServer created", facility="tahoe.storage") |
---|
727 | |
---|
728 | hunk ./src/allmydata/storage/server.py 37 |
---|
729 | - if reserved_space: |
---|
730 | - if self.get_available_space() is None: |
---|
731 | - log.msg("warning: [storage]reserved_space= is set, but this platform does not support an API to get disk statistics (statvfs(2) or GetDiskFreeSpaceEx), so this reservation cannot be honored", |
---|
732 | - umin="0wZ27w", level=log.UNUSUAL) |
---|
733 | - |
---|
734 | self.latencies = {"allocate": [], # immutable |
---|
735 | "write": [], |
---|
736 | "close": [], |
---|
737 | hunk ./src/allmydata/storage/server.py 48 |
---|
738 | "renew": [], |
---|
739 | "cancel": [], |
---|
740 | } |
---|
741 | - self.add_bucket_counter() |
---|
742 | - |
---|
743 | - statefile = os.path.join(self.storedir, "lease_checker.state") |
---|
744 | - historyfile = os.path.join(self.storedir, "lease_checker.history") |
---|
745 | - klass = self.LeaseCheckerClass |
---|
746 | - self.lease_checker = klass(self, statefile, historyfile, |
---|
747 | - expiration_enabled, expiration_mode, |
---|
748 | - expiration_override_lease_duration, |
---|
749 | - expiration_cutoff_date, |
---|
750 | - expiration_sharetypes) |
---|
751 | - self.lease_checker.setServiceParent(self) |
---|
752 | |
---|
753 | def __repr__(self): |
---|
754 | return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),) |
---|
755 | hunk ./src/allmydata/storage/server.py 52 |
---|
756 | |
---|
757 | - def add_bucket_counter(self): |
---|
758 | - statefile = os.path.join(self.storedir, "bucket_counter.state") |
---|
759 | - self.bucket_counter = BucketCountingCrawler(self, statefile) |
---|
760 | - self.bucket_counter.setServiceParent(self) |
---|
761 | - |
---|
762 | def count(self, name, delta=1): |
---|
763 | if self.stats_provider: |
---|
764 | self.stats_provider.count("storage_server." + name, delta) |
---|
765 | hunk ./src/allmydata/storage/server.py 66 |
---|
766 | """Return a dict, indexed by category, that contains a dict of |
---|
767 | latency numbers for each category. If there are sufficient samples |
---|
768 | for unambiguous interpretation, each dict will contain the |
---|
769 | - following keys: mean, 01_0_percentile, 10_0_percentile, |
---|
770 | + following keys: samplesize, mean, 01_0_percentile, 10_0_percentile, |
---|
771 | 50_0_percentile (median), 90_0_percentile, 95_0_percentile, |
---|
772 | 99_0_percentile, 99_9_percentile. If there are insufficient |
---|
773 | samples for a given percentile to be interpreted unambiguously |
---|
774 | hunk ./src/allmydata/storage/server.py 88 |
---|
775 | else: |
---|
776 | stats["mean"] = None |
---|
777 | |
---|
778 | - orderstatlist = [(0.01, "01_0_percentile", 100), (0.1, "10_0_percentile", 10),\ |
---|
779 | - (0.50, "50_0_percentile", 10), (0.90, "90_0_percentile", 10),\ |
---|
780 | - (0.95, "95_0_percentile", 20), (0.99, "99_0_percentile", 100),\ |
---|
781 | + orderstatlist = [(0.1, "10_0_percentile", 10), (0.5, "50_0_percentile", 10), \ |
---|
782 | + (0.9, "90_0_percentile", 10), (0.95, "95_0_percentile", 20), \ |
---|
783 | + (0.01, "01_0_percentile", 100), (0.99, "99_0_percentile", 100),\ |
---|
784 | (0.999, "99_9_percentile", 1000)] |
---|
785 | |
---|
786 | for percentile, percentilestring, minnumtoobserve in orderstatlist: |
---|
787 | hunk ./src/allmydata/storage/server.py 107 |
---|
788 | kwargs["facility"] = "tahoe.storage" |
---|
789 | return log.msg(*args, **kwargs) |
---|
790 | |
---|
791 | - def _clean_incomplete(self): |
---|
792 | - fileutil.rm_dir(self.incomingdir) |
---|
793 | - |
---|
794 | def get_stats(self): |
---|
795 | # remember: RIStatsProvider requires that our return dict |
---|
796 | hunk ./src/allmydata/storage/server.py 109 |
---|
797 | - # contains numeric values. |
---|
798 | + # contains numeric, or None values. |
---|
799 | stats = { 'storage_server.allocated': self.allocated_size(), } |
---|
800 | stats['storage_server.reserved_space'] = self.reserved_space |
---|
801 | for category,ld in self.get_latencies().items(): |
---|
802 | hunk ./src/allmydata/storage/server.py 143 |
---|
803 | stats['storage_server.total_bucket_count'] = bucket_count |
---|
804 | return stats |
---|
805 | |
---|
806 | - def get_available_space(self): |
---|
807 | - """Returns available space for share storage in bytes, or None if no |
---|
808 | - API to get this information is available.""" |
---|
809 | - |
---|
810 | - if self.readonly_storage: |
---|
811 | - return 0 |
---|
812 | - return fileutil.get_available_space(self.storedir, self.reserved_space) |
---|
813 | - |
---|
814 | def allocated_size(self): |
---|
815 | space = 0 |
---|
816 | for bw in self._active_writers: |
---|
817 | hunk ./src/allmydata/storage/server.py 150 |
---|
818 | return space |
---|
819 | |
---|
820 | def remote_get_version(self): |
---|
821 | - remaining_space = self.get_available_space() |
---|
822 | + remaining_space = self.backend.get_available_space() |
---|
823 | if remaining_space is None: |
---|
824 | # We're on a platform that has no API to get disk stats. |
---|
825 | remaining_space = 2**64 |
---|
826 | hunk ./src/allmydata/storage/server.py 164 |
---|
827 | } |
---|
828 | return version |
---|
829 | |
---|
830 | - def remote_allocate_buckets(self, storage_index, |
---|
831 | + def remote_allocate_buckets(self, storageindex, |
---|
832 | renew_secret, cancel_secret, |
---|
833 | sharenums, allocated_size, |
---|
834 | canary, owner_num=0): |
---|
835 | hunk ./src/allmydata/storage/server.py 173 |
---|
836 | # to a particular owner. |
---|
837 | start = time.time() |
---|
838 | self.count("allocate") |
---|
839 | - alreadygot = set() |
---|
840 | + incoming = set() |
---|
841 | bucketwriters = {} # k: shnum, v: BucketWriter |
---|
842 | hunk ./src/allmydata/storage/server.py 175 |
---|
843 | - si_dir = storage_index_to_dir(storage_index) |
---|
844 | - si_s = si_b2a(storage_index) |
---|
845 | |
---|
846 | hunk ./src/allmydata/storage/server.py 176 |
---|
847 | + si_s = si_b2a(storageindex) |
---|
848 | log.msg("storage: allocate_buckets %s" % si_s) |
---|
849 | |
---|
850 | # in this implementation, the lease information (including secrets) |
---|
851 | hunk ./src/allmydata/storage/server.py 190 |
---|
852 | |
---|
853 | max_space_per_bucket = allocated_size |
---|
854 | |
---|
855 | - remaining_space = self.get_available_space() |
---|
856 | + remaining_space = self.backend.get_available_space() |
---|
857 | limited = remaining_space is not None |
---|
858 | if limited: |
---|
859 | # this is a bit conservative, since some of this allocated_size() |
---|
860 | hunk ./src/allmydata/storage/server.py 199 |
---|
861 | remaining_space -= self.allocated_size() |
---|
862 | # self.readonly_storage causes remaining_space <= 0 |
---|
863 | |
---|
864 | - # fill alreadygot with all shares that we have, not just the ones |
---|
865 | + # Fill alreadygot with all shares that we have, not just the ones |
---|
866 | # they asked about: this will save them a lot of work. Add or update |
---|
867 | # leases for all of them: if they want us to hold shares for this |
---|
868 | hunk ./src/allmydata/storage/server.py 202 |
---|
869 | - # file, they'll want us to hold leases for this file. |
---|
870 | - for (shnum, fn) in self._get_bucket_shares(storage_index): |
---|
871 | - alreadygot.add(shnum) |
---|
872 | - sf = ShareFile(fn) |
---|
873 | - sf.add_or_renew_lease(lease_info) |
---|
874 | + # file, they'll want us to hold leases for all the shares of it. |
---|
875 | + alreadygot = set() |
---|
876 | + for share in self.backend.get_shares(storageindex): |
---|
877 | + share.add_or_renew_lease(lease_info) |
---|
878 | + alreadygot.add(share.shnum) |
---|
879 | |
---|
880 | hunk ./src/allmydata/storage/server.py 208 |
---|
881 | - for shnum in sharenums: |
---|
882 | - incominghome = os.path.join(self.incomingdir, si_dir, "%d" % shnum) |
---|
883 | - finalhome = os.path.join(self.sharedir, si_dir, "%d" % shnum) |
---|
884 | - if os.path.exists(finalhome): |
---|
885 | - # great! we already have it. easy. |
---|
886 | - pass |
---|
887 | - elif os.path.exists(incominghome): |
---|
888 | - # Note that we don't create BucketWriters for shnums that |
---|
889 | - # have a partial share (in incoming/), so if a second upload |
---|
890 | - # occurs while the first is still in progress, the second |
---|
891 | - # uploader will use different storage servers. |
---|
892 | - pass |
---|
893 | - elif (not limited) or (remaining_space >= max_space_per_bucket): |
---|
894 | - # ok! we need to create the new share file. |
---|
895 | - bw = BucketWriter(self, incominghome, finalhome, |
---|
896 | - max_space_per_bucket, lease_info, canary) |
---|
897 | - if self.no_storage: |
---|
898 | - bw.throw_out_all_data = True |
---|
899 | + # all share numbers that are incoming |
---|
900 | + incoming = self.backend.get_incoming_shnums(storageindex) |
---|
901 | + |
---|
902 | + for shnum in ((sharenums - alreadygot) - incoming): |
---|
903 | + if (not limited) or (remaining_space >= max_space_per_bucket): |
---|
904 | + bw = self.backend.make_bucket_writer(storageindex, shnum, max_space_per_bucket, lease_info, canary) |
---|
905 | bucketwriters[shnum] = bw |
---|
906 | self._active_writers[bw] = 1 |
---|
907 | if limited: |
---|
908 | hunk ./src/allmydata/storage/server.py 219 |
---|
909 | remaining_space -= max_space_per_bucket |
---|
910 | else: |
---|
911 | - # bummer! not enough space to accept this bucket |
---|
912 | + # Bummer not enough space to accept this share. |
---|
913 | pass |
---|
914 | |
---|
915 | hunk ./src/allmydata/storage/server.py 222 |
---|
916 | - if bucketwriters: |
---|
917 | - fileutil.make_dirs(os.path.join(self.sharedir, si_dir)) |
---|
918 | - |
---|
919 | self.add_latency("allocate", time.time() - start) |
---|
920 | return alreadygot, bucketwriters |
---|
921 | |
---|
922 | hunk ./src/allmydata/storage/server.py 225 |
---|
923 | - def _iter_share_files(self, storage_index): |
---|
924 | - for shnum, filename in self._get_bucket_shares(storage_index): |
---|
925 | + def _iter_share_files(self, storageindex): |
---|
926 | + for shnum, filename in self._get_shares(storageindex): |
---|
927 | f = open(filename, 'rb') |
---|
928 | header = f.read(32) |
---|
929 | f.close() |
---|
930 | hunk ./src/allmydata/storage/server.py 231 |
---|
931 | if header[:32] == MutableShareFile.MAGIC: |
---|
932 | + # XXX Can I exploit this code? |
---|
933 | sf = MutableShareFile(filename, self) |
---|
934 | # note: if the share has been migrated, the renew_lease() |
---|
935 | # call will throw an exception, with information to help the |
---|
936 | hunk ./src/allmydata/storage/server.py 237 |
---|
937 | # client update the lease. |
---|
938 | elif header[:4] == struct.pack(">L", 1): |
---|
939 | + # Check if version number is "1". |
---|
940 | + # XXX WHAT ABOUT OTHER VERSIONS!!!!!!!? |
---|
941 | sf = ShareFile(filename) |
---|
942 | else: |
---|
943 | continue # non-sharefile |
---|
944 | hunk ./src/allmydata/storage/server.py 244 |
---|
945 | yield sf |
---|
946 | |
---|
947 | - def remote_add_lease(self, storage_index, renew_secret, cancel_secret, |
---|
948 | + def remote_add_lease(self, storageindex, renew_secret, cancel_secret, |
---|
949 | owner_num=1): |
---|
950 | start = time.time() |
---|
951 | self.count("add-lease") |
---|
952 | hunk ./src/allmydata/storage/server.py 252 |
---|
953 | lease_info = LeaseInfo(owner_num, |
---|
954 | renew_secret, cancel_secret, |
---|
955 | new_expire_time, self.my_nodeid) |
---|
956 | - for sf in self._iter_share_files(storage_index): |
---|
957 | + for sf in self._iter_share_files(storageindex): |
---|
958 | sf.add_or_renew_lease(lease_info) |
---|
959 | self.add_latency("add-lease", time.time() - start) |
---|
960 | return None |
---|
961 | hunk ./src/allmydata/storage/server.py 257 |
---|
962 | |
---|
963 | - def remote_renew_lease(self, storage_index, renew_secret): |
---|
964 | + def remote_renew_lease(self, storageindex, renew_secret): |
---|
965 | start = time.time() |
---|
966 | self.count("renew") |
---|
967 | new_expire_time = time.time() + 31*24*60*60 |
---|
968 | hunk ./src/allmydata/storage/server.py 262 |
---|
969 | found_buckets = False |
---|
970 | - for sf in self._iter_share_files(storage_index): |
---|
971 | + for sf in self._iter_share_files(storageindex): |
---|
972 | found_buckets = True |
---|
973 | sf.renew_lease(renew_secret, new_expire_time) |
---|
974 | self.add_latency("renew", time.time() - start) |
---|
975 | hunk ./src/allmydata/storage/server.py 269 |
---|
976 | if not found_buckets: |
---|
977 | raise IndexError("no such lease to renew") |
---|
978 | |
---|
979 | - def remote_cancel_lease(self, storage_index, cancel_secret): |
---|
980 | + def remote_cancel_lease(self, storageindex, cancel_secret): |
---|
981 | start = time.time() |
---|
982 | self.count("cancel") |
---|
983 | |
---|
984 | hunk ./src/allmydata/storage/server.py 275 |
---|
985 | total_space_freed = 0 |
---|
986 | found_buckets = False |
---|
987 | - for sf in self._iter_share_files(storage_index): |
---|
988 | + for sf in self._iter_share_files(storageindex): |
---|
989 | # note: if we can't find a lease on one share, we won't bother |
---|
990 | # looking in the others. Unless something broke internally |
---|
991 | # (perhaps we ran out of disk space while adding a lease), the |
---|
992 | hunk ./src/allmydata/storage/server.py 285 |
---|
993 | total_space_freed += sf.cancel_lease(cancel_secret) |
---|
994 | |
---|
995 | if found_buckets: |
---|
996 | - storagedir = os.path.join(self.sharedir, |
---|
997 | - storage_index_to_dir(storage_index)) |
---|
998 | - if not os.listdir(storagedir): |
---|
999 | - os.rmdir(storagedir) |
---|
1000 | + # XXX Yikes looks like code that shouldn't be in the server! |
---|
1001 | + storagedir = si_si2dir(self.sharedir, storageindex) |
---|
1002 | + fp_rmdir_if_empty(storagedir) |
---|
1003 | |
---|
1004 | if self.stats_provider: |
---|
1005 | self.stats_provider.count('storage_server.bytes_freed', |
---|
1006 | hunk ./src/allmydata/storage/server.py 301 |
---|
1007 | self.stats_provider.count('storage_server.bytes_added', consumed_size) |
---|
1008 | del self._active_writers[bw] |
---|
1009 | |
---|
1010 | - def _get_bucket_shares(self, storage_index): |
---|
1011 | - """Return a list of (shnum, pathname) tuples for files that hold |
---|
1012 | - shares for this storage_index. In each tuple, 'shnum' will always be |
---|
1013 | - the integer form of the last component of 'pathname'.""" |
---|
1014 | - storagedir = os.path.join(self.sharedir, storage_index_to_dir(storage_index)) |
---|
1015 | - try: |
---|
1016 | - for f in os.listdir(storagedir): |
---|
1017 | - if NUM_RE.match(f): |
---|
1018 | - filename = os.path.join(storagedir, f) |
---|
1019 | - yield (int(f), filename) |
---|
1020 | - except OSError: |
---|
1021 | - # Commonly caused by there being no buckets at all. |
---|
1022 | - pass |
---|
1023 | - |
---|
1024 | - def remote_get_buckets(self, storage_index): |
---|
1025 | + def remote_get_buckets(self, storageindex): |
---|
1026 | start = time.time() |
---|
1027 | self.count("get") |
---|
1028 | hunk ./src/allmydata/storage/server.py 304 |
---|
1029 | - si_s = si_b2a(storage_index) |
---|
1030 | + si_s = si_b2a(storageindex) |
---|
1031 | log.msg("storage: get_buckets %s" % si_s) |
---|
1032 | bucketreaders = {} # k: sharenum, v: BucketReader |
---|
1033 | hunk ./src/allmydata/storage/server.py 307 |
---|
1034 | - for shnum, filename in self._get_bucket_shares(storage_index): |
---|
1035 | - bucketreaders[shnum] = BucketReader(self, filename, |
---|
1036 | - storage_index, shnum) |
---|
1037 | + self.backend.set_storage_server(self) |
---|
1038 | + for share in self.backend.get_shares(storageindex): |
---|
1039 | + bucketreaders[share.get_shnum()] = self.backend.make_bucket_reader(share) |
---|
1040 | self.add_latency("get", time.time() - start) |
---|
1041 | return bucketreaders |
---|
1042 | |
---|
1043 | hunk ./src/allmydata/storage/server.py 313 |
---|
1044 | - def get_leases(self, storage_index): |
---|
1045 | + def get_leases(self, storageindex): |
---|
1046 | """Provide an iterator that yields all of the leases attached to this |
---|
1047 | bucket. Each lease is returned as a LeaseInfo instance. |
---|
1048 | |
---|
1049 | hunk ./src/allmydata/storage/server.py 323 |
---|
1050 | # since all shares get the same lease data, we just grab the leases |
---|
1051 | # from the first share |
---|
1052 | try: |
---|
1053 | - shnum, filename = self._get_bucket_shares(storage_index).next() |
---|
1054 | + shnum, filename = self._get_shares(storageindex).next() |
---|
1055 | sf = ShareFile(filename) |
---|
1056 | return sf.get_leases() |
---|
1057 | except StopIteration: |
---|
1058 | hunk ./src/allmydata/storage/server.py 329 |
---|
1059 | return iter([]) |
---|
1060 | |
---|
1061 | - def remote_slot_testv_and_readv_and_writev(self, storage_index, |
---|
1062 | + # XXX As far as Zancas' grockery has gotten. |
---|
1063 | + def remote_slot_testv_and_readv_and_writev(self, storageindex, |
---|
1064 | secrets, |
---|
1065 | test_and_write_vectors, |
---|
1066 | read_vector): |
---|
1067 | hunk ./src/allmydata/storage/server.py 336 |
---|
1068 | start = time.time() |
---|
1069 | self.count("writev") |
---|
1070 | - si_s = si_b2a(storage_index) |
---|
1071 | + si_s = si_b2a(storageindex) |
---|
1072 | log.msg("storage: slot_writev %s" % si_s) |
---|
1073 | hunk ./src/allmydata/storage/server.py 338 |
---|
1074 | - si_dir = storage_index_to_dir(storage_index) |
---|
1075 | + |
---|
1076 | (write_enabler, renew_secret, cancel_secret) = secrets |
---|
1077 | # shares exist if there is a file for them |
---|
1078 | hunk ./src/allmydata/storage/server.py 341 |
---|
1079 | - bucketdir = os.path.join(self.sharedir, si_dir) |
---|
1080 | + bucketdir = si_si2dir(self.sharedir, storageindex) |
---|
1081 | shares = {} |
---|
1082 | if os.path.isdir(bucketdir): |
---|
1083 | for sharenum_s in os.listdir(bucketdir): |
---|
1084 | hunk ./src/allmydata/storage/server.py 424 |
---|
1085 | self) |
---|
1086 | return share |
---|
1087 | |
---|
1088 | - def remote_slot_readv(self, storage_index, shares, readv): |
---|
1089 | + def remote_slot_readv(self, storageindex, shares, readv): |
---|
1090 | start = time.time() |
---|
1091 | self.count("readv") |
---|
1092 | hunk ./src/allmydata/storage/server.py 427 |
---|
1093 | - si_s = si_b2a(storage_index) |
---|
1094 | + si_s = si_b2a(storageindex) |
---|
1095 | lp = log.msg("storage: slot_readv %s %s" % (si_s, shares), |
---|
1096 | facility="tahoe.storage", level=log.OPERATIONAL) |
---|
1097 | hunk ./src/allmydata/storage/server.py 430 |
---|
1098 | - si_dir = storage_index_to_dir(storage_index) |
---|
1099 | # shares exist if there is a file for them |
---|
1100 | hunk ./src/allmydata/storage/server.py 431 |
---|
1101 | - bucketdir = os.path.join(self.sharedir, si_dir) |
---|
1102 | + bucketdir = si_si2dir(self.sharedir, storageindex) |
---|
1103 | if not os.path.isdir(bucketdir): |
---|
1104 | self.add_latency("readv", time.time() - start) |
---|
1105 | return {} |
---|
1106 | hunk ./src/allmydata/storage/server.py 450 |
---|
1107 | self.add_latency("readv", time.time() - start) |
---|
1108 | return datavs |
---|
1109 | |
---|
1110 | - def remote_advise_corrupt_share(self, share_type, storage_index, shnum, |
---|
1111 | + def remote_advise_corrupt_share(self, share_type, storageindex, shnum, |
---|
1112 | reason): |
---|
1113 | fileutil.make_dirs(self.corruption_advisory_dir) |
---|
1114 | now = time_format.iso_utc(sep="T") |
---|
1115 | hunk ./src/allmydata/storage/server.py 454 |
---|
1116 | - si_s = si_b2a(storage_index) |
---|
1117 | + si_s = si_b2a(storageindex) |
---|
1118 | # windows can't handle colons in the filename |
---|
1119 | fn = os.path.join(self.corruption_advisory_dir, |
---|
1120 | "%s--%s-%d" % (now, si_s, shnum)).replace(":","") |
---|
1121 | hunk ./src/allmydata/storage/server.py 461 |
---|
1122 | f = open(fn, "w") |
---|
1123 | f.write("report: Share Corruption\n") |
---|
1124 | f.write("type: %s\n" % share_type) |
---|
1125 | - f.write("storage_index: %s\n" % si_s) |
---|
1126 | + f.write("storageindex: %s\n" % si_s) |
---|
1127 | f.write("share_number: %d\n" % shnum) |
---|
1128 | f.write("\n") |
---|
1129 | f.write(reason) |
---|
1130 | } |
---|
1131 | [add interfaces to null/core.py |
---|
1132 | wilcoxjg@gmail.com**20110803210839 |
---|
1133 | Ignore-this: 5af0b48d4c98d19e1eedaa2180211583 |
---|
1134 | ] { |
---|
1135 | hunk ./src/allmydata/storage/backends/null/core.py 4 |
---|
1136 | from allmydata.storage.backends.base import Backend |
---|
1137 | from allmydata.storage.immutable import BucketWriter, BucketReader |
---|
1138 | from zope.interface import implements |
---|
1139 | +from allmydata.interfaces import IStorageBackend, IStorageBackendShare |
---|
1140 | |
---|
1141 | class NullCore(Backend): |
---|
1142 | implements(IStorageBackend) |
---|
1143 | hunk ./src/allmydata/test/test_backends.py 8 |
---|
1144 | import mock |
---|
1145 | # This is the code that we're going to be testing. |
---|
1146 | from allmydata.storage.server import StorageServer |
---|
1147 | -from allmydata.storage.backends.das.core import DASCore |
---|
1148 | from allmydata.storage.backends.null.core import NullCore |
---|
1149 | from allmydata.storage.common import si_si2dir |
---|
1150 | # The following share file content was generated with |
---|
1151 | } |
---|
1152 | [change immutable.py includes removal of ImmutableShare which now lives in the backend specfic core.py module. |
---|
1153 | wilcoxjg@gmail.com**20110803211053 |
---|
1154 | Ignore-this: 2155e62a386d6d88f9bbc0dcec6ded5 |
---|
1155 | ] { |
---|
1156 | hunk ./src/allmydata/storage/immutable.py 1 |
---|
1157 | -import os, stat, struct, time |
---|
1158 | +import os, time |
---|
1159 | |
---|
1160 | from foolscap.api import Referenceable |
---|
1161 | |
---|
1162 | hunk ./src/allmydata/storage/immutable.py 7 |
---|
1163 | from zope.interface import implements |
---|
1164 | from allmydata.interfaces import RIBucketWriter, RIBucketReader |
---|
1165 | -from allmydata.util import base32, fileutil, log |
---|
1166 | +from allmydata.util import base32, log |
---|
1167 | from allmydata.util.assertutil import precondition |
---|
1168 | from allmydata.util.hashutil import constant_time_compare |
---|
1169 | from allmydata.storage.lease import LeaseInfo |
---|
1170 | hunk ./src/allmydata/storage/immutable.py 14 |
---|
1171 | from allmydata.storage.common import UnknownImmutableContainerVersionError, \ |
---|
1172 | DataTooLargeError |
---|
1173 | |
---|
1174 | -# each share file (in storage/shares/$SI/$SHNUM) contains lease information |
---|
1175 | -# and share data. The share data is accessed by RIBucketWriter.write and |
---|
1176 | -# RIBucketReader.read . The lease information is not accessible through these |
---|
1177 | -# interfaces. |
---|
1178 | - |
---|
1179 | -# The share file has the following layout: |
---|
1180 | -# 0x00: share file version number, four bytes, current version is 1 |
---|
1181 | -# 0x04: share data length, four bytes big-endian = A # See Footnote 1 below. |
---|
1182 | -# 0x08: number of leases, four bytes big-endian |
---|
1183 | -# 0x0c: beginning of share data (see immutable.layout.WriteBucketProxy) |
---|
1184 | -# A+0x0c = B: first lease. Lease format is: |
---|
1185 | -# B+0x00: owner number, 4 bytes big-endian, 0 is reserved for no-owner |
---|
1186 | -# B+0x04: renew secret, 32 bytes (SHA256) |
---|
1187 | -# B+0x24: cancel secret, 32 bytes (SHA256) |
---|
1188 | -# B+0x44: expiration time, 4 bytes big-endian seconds-since-epoch |
---|
1189 | -# B+0x48: next lease, or end of record |
---|
1190 | - |
---|
1191 | -# Footnote 1: as of Tahoe v1.3.0 this field is not used by storage servers, |
---|
1192 | -# but it is still filled in by storage servers in case the storage server |
---|
1193 | -# software gets downgraded from >= Tahoe v1.3.0 to < Tahoe v1.3.0, or the |
---|
1194 | -# share file is moved from one storage server to another. The value stored in |
---|
1195 | -# this field is truncated, so if the actual share data length is >= 2**32, |
---|
1196 | -# then the value stored in this field will be the actual share data length |
---|
1197 | -# modulo 2**32. |
---|
1198 | - |
---|
1199 | -class ShareFile: |
---|
1200 | - LEASE_SIZE = struct.calcsize(">L32s32sL") |
---|
1201 | - sharetype = "immutable" |
---|
1202 | - |
---|
1203 | - def __init__(self, filename, max_size=None, create=False): |
---|
1204 | - """ If max_size is not None then I won't allow more than max_size to be written to me. If create=True and max_size must not be None. """ |
---|
1205 | - precondition((max_size is not None) or (not create), max_size, create) |
---|
1206 | - self.home = filename |
---|
1207 | - self._max_size = max_size |
---|
1208 | - if create: |
---|
1209 | - # touch the file, so later callers will see that we're working on |
---|
1210 | - # it. Also construct the metadata. |
---|
1211 | - assert not os.path.exists(self.home) |
---|
1212 | - fileutil.make_dirs(os.path.dirname(self.home)) |
---|
1213 | - f = open(self.home, 'wb') |
---|
1214 | - # The second field -- the four-byte share data length -- is no |
---|
1215 | - # longer used as of Tahoe v1.3.0, but we continue to write it in |
---|
1216 | - # there in case someone downgrades a storage server from >= |
---|
1217 | - # Tahoe-1.3.0 to < Tahoe-1.3.0, or moves a share file from one |
---|
1218 | - # server to another, etc. We do saturation -- a share data length |
---|
1219 | - # larger than 2**32-1 (what can fit into the field) is marked as |
---|
1220 | - # the largest length that can fit into the field. That way, even |
---|
1221 | - # if this does happen, the old < v1.3.0 server will still allow |
---|
1222 | - # clients to read the first part of the share. |
---|
1223 | - f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0)) |
---|
1224 | - f.close() |
---|
1225 | - self._lease_offset = max_size + 0x0c |
---|
1226 | - self._num_leases = 0 |
---|
1227 | - else: |
---|
1228 | - f = open(self.home, 'rb') |
---|
1229 | - filesize = os.path.getsize(self.home) |
---|
1230 | - (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1231 | - f.close() |
---|
1232 | - if version != 1: |
---|
1233 | - msg = "sharefile %s had version %d but we wanted 1" % \ |
---|
1234 | - (filename, version) |
---|
1235 | - raise UnknownImmutableContainerVersionError(msg) |
---|
1236 | - self._num_leases = num_leases |
---|
1237 | - self._lease_offset = filesize - (num_leases * self.LEASE_SIZE) |
---|
1238 | - self._data_offset = 0xc |
---|
1239 | - |
---|
1240 | - def unlink(self): |
---|
1241 | - os.unlink(self.home) |
---|
1242 | - |
---|
1243 | - def read_share_data(self, offset, length): |
---|
1244 | - precondition(offset >= 0) |
---|
1245 | - # reads beyond the end of the data are truncated. Reads that start |
---|
1246 | - # beyond the end of the data return an empty string. I wonder why |
---|
1247 | - # Python doesn't do the following computation for me? |
---|
1248 | - seekpos = self._data_offset+offset |
---|
1249 | - fsize = os.path.getsize(self.home) |
---|
1250 | - actuallength = max(0, min(length, fsize-seekpos)) |
---|
1251 | - if actuallength == 0: |
---|
1252 | - return "" |
---|
1253 | - f = open(self.home, 'rb') |
---|
1254 | - f.seek(seekpos) |
---|
1255 | - return f.read(actuallength) |
---|
1256 | - |
---|
1257 | - def write_share_data(self, offset, data): |
---|
1258 | - length = len(data) |
---|
1259 | - precondition(offset >= 0, offset) |
---|
1260 | - if self._max_size is not None and offset+length > self._max_size: |
---|
1261 | - raise DataTooLargeError(self._max_size, offset, length) |
---|
1262 | - f = open(self.home, 'rb+') |
---|
1263 | - real_offset = self._data_offset+offset |
---|
1264 | - f.seek(real_offset) |
---|
1265 | - assert f.tell() == real_offset |
---|
1266 | - f.write(data) |
---|
1267 | - f.close() |
---|
1268 | - |
---|
1269 | - def _write_lease_record(self, f, lease_number, lease_info): |
---|
1270 | - offset = self._lease_offset + lease_number * self.LEASE_SIZE |
---|
1271 | - f.seek(offset) |
---|
1272 | - assert f.tell() == offset |
---|
1273 | - f.write(lease_info.to_immutable_data()) |
---|
1274 | - |
---|
1275 | - def _read_num_leases(self, f): |
---|
1276 | - f.seek(0x08) |
---|
1277 | - (num_leases,) = struct.unpack(">L", f.read(4)) |
---|
1278 | - return num_leases |
---|
1279 | - |
---|
1280 | - def _write_num_leases(self, f, num_leases): |
---|
1281 | - f.seek(0x08) |
---|
1282 | - f.write(struct.pack(">L", num_leases)) |
---|
1283 | - |
---|
1284 | - def _truncate_leases(self, f, num_leases): |
---|
1285 | - f.truncate(self._lease_offset + num_leases * self.LEASE_SIZE) |
---|
1286 | - |
---|
1287 | - def get_leases(self): |
---|
1288 | - """Yields a LeaseInfo instance for all leases.""" |
---|
1289 | - f = open(self.home, 'rb') |
---|
1290 | - (version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc)) |
---|
1291 | - f.seek(self._lease_offset) |
---|
1292 | - for i in range(num_leases): |
---|
1293 | - data = f.read(self.LEASE_SIZE) |
---|
1294 | - if data: |
---|
1295 | - yield LeaseInfo().from_immutable_data(data) |
---|
1296 | - |
---|
1297 | - def add_lease(self, lease_info): |
---|
1298 | - f = open(self.home, 'rb+') |
---|
1299 | - num_leases = self._read_num_leases(f) |
---|
1300 | - self._write_lease_record(f, num_leases, lease_info) |
---|
1301 | - self._write_num_leases(f, num_leases+1) |
---|
1302 | - f.close() |
---|
1303 | - |
---|
1304 | - def renew_lease(self, renew_secret, new_expire_time): |
---|
1305 | - for i,lease in enumerate(self.get_leases()): |
---|
1306 | - if constant_time_compare(lease.renew_secret, renew_secret): |
---|
1307 | - # yup. See if we need to update the owner time. |
---|
1308 | - if new_expire_time > lease.expiration_time: |
---|
1309 | - # yes |
---|
1310 | - lease.expiration_time = new_expire_time |
---|
1311 | - f = open(self.home, 'rb+') |
---|
1312 | - self._write_lease_record(f, i, lease) |
---|
1313 | - f.close() |
---|
1314 | - return |
---|
1315 | - raise IndexError("unable to renew non-existent lease") |
---|
1316 | - |
---|
1317 | - def add_or_renew_lease(self, lease_info): |
---|
1318 | - try: |
---|
1319 | - self.renew_lease(lease_info.renew_secret, |
---|
1320 | - lease_info.expiration_time) |
---|
1321 | - except IndexError: |
---|
1322 | - self.add_lease(lease_info) |
---|
1323 | - |
---|
1324 | - |
---|
1325 | - def cancel_lease(self, cancel_secret): |
---|
1326 | - """Remove a lease with the given cancel_secret. If the last lease is |
---|
1327 | - cancelled, the file will be removed. Return the number of bytes that |
---|
1328 | - were freed (by truncating the list of leases, and possibly by |
---|
1329 | - deleting the file. Raise IndexError if there was no lease with the |
---|
1330 | - given cancel_secret. |
---|
1331 | - """ |
---|
1332 | - |
---|
1333 | - leases = list(self.get_leases()) |
---|
1334 | - num_leases_removed = 0 |
---|
1335 | - for i,lease in enumerate(leases): |
---|
1336 | - if constant_time_compare(lease.cancel_secret, cancel_secret): |
---|
1337 | - leases[i] = None |
---|
1338 | - num_leases_removed += 1 |
---|
1339 | - if not num_leases_removed: |
---|
1340 | - raise IndexError("unable to find matching lease to cancel") |
---|
1341 | - if num_leases_removed: |
---|
1342 | - # pack and write out the remaining leases. We write these out in |
---|
1343 | - # the same order as they were added, so that if we crash while |
---|
1344 | - # doing this, we won't lose any non-cancelled leases. |
---|
1345 | - leases = [l for l in leases if l] # remove the cancelled leases |
---|
1346 | - f = open(self.home, 'rb+') |
---|
1347 | - for i,lease in enumerate(leases): |
---|
1348 | - self._write_lease_record(f, i, lease) |
---|
1349 | - self._write_num_leases(f, len(leases)) |
---|
1350 | - self._truncate_leases(f, len(leases)) |
---|
1351 | - f.close() |
---|
1352 | - space_freed = self.LEASE_SIZE * num_leases_removed |
---|
1353 | - if not len(leases): |
---|
1354 | - space_freed += os.stat(self.home)[stat.ST_SIZE] |
---|
1355 | - self.unlink() |
---|
1356 | - return space_freed |
---|
1357 | - |
---|
1358 | - |
---|
1359 | class BucketWriter(Referenceable): |
---|
1360 | implements(RIBucketWriter) |
---|
1361 | |
---|
1362 | hunk ./src/allmydata/storage/immutable.py 17 |
---|
1363 | - def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary): |
---|
1364 | + def __init__(self, ss, immutableshare, max_size, lease_info, canary): |
---|
1365 | self.ss = ss |
---|
1366 | hunk ./src/allmydata/storage/immutable.py 19 |
---|
1367 | - self.incominghome = incominghome |
---|
1368 | - self.finalhome = finalhome |
---|
1369 | - self._max_size = max_size # don't allow the client to write more than this |
---|
1370 | + self._max_size = max_size # don't allow the client to write more than this print self.ss._active_writers.keys() |
---|
1371 | self._canary = canary |
---|
1372 | self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected) |
---|
1373 | self.closed = False |
---|
1374 | hunk ./src/allmydata/storage/immutable.py 24 |
---|
1375 | self.throw_out_all_data = False |
---|
1376 | - self._sharefile = ShareFile(incominghome, create=True, max_size=max_size) |
---|
1377 | + self._sharefile = immutableshare |
---|
1378 | # also, add our lease to the file now, so that other ones can be |
---|
1379 | # added by simultaneous uploaders |
---|
1380 | self._sharefile.add_lease(lease_info) |
---|
1381 | hunk ./src/allmydata/storage/immutable.py 45 |
---|
1382 | precondition(not self.closed) |
---|
1383 | start = time.time() |
---|
1384 | |
---|
1385 | - fileutil.make_dirs(os.path.dirname(self.finalhome)) |
---|
1386 | - fileutil.rename(self.incominghome, self.finalhome) |
---|
1387 | - try: |
---|
1388 | - # self.incominghome is like storage/shares/incoming/ab/abcde/4 . |
---|
1389 | - # We try to delete the parent (.../ab/abcde) to avoid leaving |
---|
1390 | - # these directories lying around forever, but the delete might |
---|
1391 | - # fail if we're working on another share for the same storage |
---|
1392 | - # index (like ab/abcde/5). The alternative approach would be to |
---|
1393 | - # use a hierarchy of objects (PrefixHolder, BucketHolder, |
---|
1394 | - # ShareWriter), each of which is responsible for a single |
---|
1395 | - # directory on disk, and have them use reference counting of |
---|
1396 | - # their children to know when they should do the rmdir. This |
---|
1397 | - # approach is simpler, but relies on os.rmdir refusing to delete |
---|
1398 | - # a non-empty directory. Do *not* use fileutil.rm_dir() here! |
---|
1399 | - os.rmdir(os.path.dirname(self.incominghome)) |
---|
1400 | - # we also delete the grandparent (prefix) directory, .../ab , |
---|
1401 | - # again to avoid leaving directories lying around. This might |
---|
1402 | - # fail if there is another bucket open that shares a prefix (like |
---|
1403 | - # ab/abfff). |
---|
1404 | - os.rmdir(os.path.dirname(os.path.dirname(self.incominghome))) |
---|
1405 | - # we leave the great-grandparent (incoming/) directory in place. |
---|
1406 | - except EnvironmentError: |
---|
1407 | - # ignore the "can't rmdir because the directory is not empty" |
---|
1408 | - # exceptions, those are normal consequences of the |
---|
1409 | - # above-mentioned conditions. |
---|
1410 | - pass |
---|
1411 | + self._sharefile.close() |
---|
1412 | + filelen = self._sharefile.stat() |
---|
1413 | self._sharefile = None |
---|
1414 | hunk ./src/allmydata/storage/immutable.py 48 |
---|
1415 | + |
---|
1416 | self.closed = True |
---|
1417 | self._canary.dontNotifyOnDisconnect(self._disconnect_marker) |
---|
1418 | |
---|
1419 | hunk ./src/allmydata/storage/immutable.py 52 |
---|
1420 | - filelen = os.stat(self.finalhome)[stat.ST_SIZE] |
---|
1421 | self.ss.bucket_writer_closed(self, filelen) |
---|
1422 | self.ss.add_latency("close", time.time() - start) |
---|
1423 | self.ss.count("close") |
---|
1424 | hunk ./src/allmydata/storage/immutable.py 90 |
---|
1425 | class BucketReader(Referenceable): |
---|
1426 | implements(RIBucketReader) |
---|
1427 | |
---|
1428 | - def __init__(self, ss, sharefname, storage_index=None, shnum=None): |
---|
1429 | + def __init__(self, ss, share): |
---|
1430 | self.ss = ss |
---|
1431 | hunk ./src/allmydata/storage/immutable.py 92 |
---|
1432 | - self._share_file = ShareFile(sharefname) |
---|
1433 | - self.storage_index = storage_index |
---|
1434 | - self.shnum = shnum |
---|
1435 | + self._share_file = share |
---|
1436 | + self.storageindex = share.storageindex |
---|
1437 | + self.shnum = share.shnum |
---|
1438 | |
---|
1439 | def __repr__(self): |
---|
1440 | return "<%s %s %s>" % (self.__class__.__name__, |
---|
1441 | hunk ./src/allmydata/storage/immutable.py 98 |
---|
1442 | - base32.b2a_l(self.storage_index[:8], 60), |
---|
1443 | + base32.b2a_l(self.storageindex[:8], 60), |
---|
1444 | self.shnum) |
---|
1445 | |
---|
1446 | def remote_read(self, offset, length): |
---|
1447 | hunk ./src/allmydata/storage/immutable.py 110 |
---|
1448 | |
---|
1449 | def remote_advise_corrupt_share(self, reason): |
---|
1450 | return self.ss.remote_advise_corrupt_share("immutable", |
---|
1451 | - self.storage_index, |
---|
1452 | + self.storageindex, |
---|
1453 | self.shnum, |
---|
1454 | reason) |
---|
1455 | } |
---|
1456 | |
---|
1457 | Context: |
---|
1458 | |
---|
1459 | [Slightly improve error message about old config files (avoid unnecessary Unicode escaping). refs #1385 |
---|
1460 | david-sarah@jacaranda.org**20110803163848 |
---|
1461 | Ignore-this: a3e3930fba7ccf90b8db3d2ed5829df4 |
---|
1462 | ] |
---|
1463 | [test_checker.py: increase timeout for TooParallel.test_immutable (was consistently failing on ARM buildslave). |
---|
1464 | david-sarah@jacaranda.org**20110803163213 |
---|
1465 | Ignore-this: d0efceaf12628e8791862b80c85b5d56 |
---|
1466 | ] |
---|
1467 | [Fix the bug that prevents an introducer from starting when introducer.furl already exists. Also remove some dead code that used to read old config files, and rename 'warn_about_old_config_files' to reflect that it's not a warning. refs #1385 |
---|
1468 | david-sarah@jacaranda.org**20110803013212 |
---|
1469 | Ignore-this: 2d6cd14bd06a7493b26f2027aff78f4d |
---|
1470 | ] |
---|
1471 | [test_runner.py: modify RunNode.test_introducer to test that starting an introducer works when the introducer.furl file already exists. refs #1385 |
---|
1472 | david-sarah@jacaranda.org**20110803012704 |
---|
1473 | Ignore-this: 8cf7f27ac4bfbb5ad8ca4a974106d437 |
---|
1474 | ] |
---|
1475 | [verifier: correct a bug introduced in changeset [5106] that caused us to only verify the first block of a file. refs #1395 |
---|
1476 | david-sarah@jacaranda.org**20110802172437 |
---|
1477 | Ignore-this: 87fb77854a839ff217dce73544775b11 |
---|
1478 | ] |
---|
1479 | [test_repairer: add a deterministic test of share data corruption that always flips the bits of the last byte of the share data. refs #1395 |
---|
1480 | david-sarah@jacaranda.org**20110802175841 |
---|
1481 | Ignore-this: 72f54603785007e88220c8d979e08be7 |
---|
1482 | ] |
---|
1483 | [verifier: serialize the fetching of blocks within a share so that we don't use too much RAM |
---|
1484 | zooko@zooko.com**20110802063703 |
---|
1485 | Ignore-this: debd9bac07dcbb6803f835a9e2eabaa1 |
---|
1486 | |
---|
1487 | Shares are still verified in parallel, but within a share, don't request a |
---|
1488 | block until the previous block has been verified and the memory we used to hold |
---|
1489 | it has been freed up. |
---|
1490 | |
---|
1491 | Patch originally due to Brian. This version has a mockery-patchery-style test |
---|
1492 | which is "low tech" (it implements the patching inline in the test code instead |
---|
1493 | of using an extension of the mock.patch() function from the mock library) and |
---|
1494 | which unpatches in case of exception. |
---|
1495 | |
---|
1496 | fixes #1395 |
---|
1497 | ] |
---|
1498 | [add docs about timing-channel attacks |
---|
1499 | Brian Warner <warner@lothar.com>**20110802044541 |
---|
1500 | Ignore-this: 73114d5f5ed9ce252597b707dba3a194 |
---|
1501 | ] |
---|
1502 | ['test-coverage' now needs PYTHONPATH=. to find TOP/twisted/plugins/ |
---|
1503 | Brian Warner <warner@lothar.com>**20110802041952 |
---|
1504 | Ignore-this: d40f1f4cb426ea1c362fc961baedde2 |
---|
1505 | ] |
---|
1506 | [remove nodeid from WriteBucketProxy classes and customers |
---|
1507 | warner@lothar.com**20110801224317 |
---|
1508 | Ignore-this: e55334bb0095de11711eeb3af827e8e8 |
---|
1509 | refs #1363 |
---|
1510 | ] |
---|
1511 | [remove get_serverid() from ReadBucketProxy and customers, including Checker |
---|
1512 | warner@lothar.com**20110801224307 |
---|
1513 | Ignore-this: 837aba457bc853e4fd413ab1a94519cb |
---|
1514 | and debug.py dump-share commands |
---|
1515 | refs #1363 |
---|
1516 | ] |
---|
1517 | [reject old-style (pre-Tahoe-LAFS-v1.3) configuration files |
---|
1518 | zooko@zooko.com**20110801232423 |
---|
1519 | Ignore-this: b58218fcc064cc75ad8f05ed0c38902b |
---|
1520 | Check for the existence of any of them and if any are found raise exception which will abort the startup of the node. |
---|
1521 | This is a backwards-incompatible change for anyone who is still using old-style configuration files. |
---|
1522 | fixes #1385 |
---|
1523 | ] |
---|
1524 | [whitespace-cleanup |
---|
1525 | zooko@zooko.com**20110725015546 |
---|
1526 | Ignore-this: 442970d0545183b97adc7bd66657876c |
---|
1527 | ] |
---|
1528 | [tests: use fileutil.write() instead of open() to ensure timely close even without CPython-style reference counting |
---|
1529 | zooko@zooko.com**20110331145427 |
---|
1530 | Ignore-this: 75aae4ab8e5fa0ad698f998aaa1888ce |
---|
1531 | Some of these already had an explicit close() but I went ahead and replaced them with fileutil.write() as well for the sake of uniformity. |
---|
1532 | ] |
---|
1533 | [Address Kevan's comment in #776 about Options classes missed when adding 'self.command_name'. refs #776, #1359 |
---|
1534 | david-sarah@jacaranda.org**20110801221317 |
---|
1535 | Ignore-this: 8881d42cf7e6a1d15468291b0cb8fab9 |
---|
1536 | ] |
---|
1537 | [docs/frontends/webapi.rst: change some more instances of 'delete' or 'remove' to 'unlink', change some section titles, and use two blank lines between all sections. refs #776, #1104 |
---|
1538 | david-sarah@jacaranda.org**20110801220919 |
---|
1539 | Ignore-this: 572327591137bb05c24c44812d4b163f |
---|
1540 | ] |
---|
1541 | [cleanup: implement rm as a synonym for unlink rather than vice-versa. refs #776 |
---|
1542 | david-sarah@jacaranda.org**20110801220108 |
---|
1543 | Ignore-this: 598dcbed870f4f6bb9df62de9111b343 |
---|
1544 | ] |
---|
1545 | [docs/webapi.rst: address Kevan's comments about use of 'delete' on ref #1104 |
---|
1546 | david-sarah@jacaranda.org**20110801205356 |
---|
1547 | Ignore-this: 4fbf03864934753c951ddeff64392491 |
---|
1548 | ] |
---|
1549 | [docs: some changes of 'delete' or 'rm' to 'unlink'. refs #1104 |
---|
1550 | david-sarah@jacaranda.org**20110713002722 |
---|
1551 | Ignore-this: 304d2a330d5e6e77d5f1feed7814b21c |
---|
1552 | ] |
---|
1553 | [WUI: change the label of the button to unlink a file from 'del' to 'unlink'. Also change some internal names to 'unlink', and allow 't=unlink' as a synonym for 't=delete' in the web-API interface. Incidentally, improve a test to check for the rename button as well as the unlink button. fixes #1104 |
---|
1554 | david-sarah@jacaranda.org**20110713001218 |
---|
1555 | Ignore-this: 3eef6b3f81b94a9c0020a38eb20aa069 |
---|
1556 | ] |
---|
1557 | [src/allmydata/web/filenode.py: delete a stale comment that was made incorrect by changeset [3133]. |
---|
1558 | david-sarah@jacaranda.org**20110801203009 |
---|
1559 | Ignore-this: b3912e95a874647027efdc97822dd10e |
---|
1560 | ] |
---|
1561 | [fix typo introduced during rebasing of 'remove get_serverid from |
---|
1562 | Brian Warner <warner@lothar.com>**20110801200341 |
---|
1563 | Ignore-this: 4235b0f585c0533892193941dbbd89a8 |
---|
1564 | DownloadStatus.add_dyhb_request and customers' patch, to fix test failure. |
---|
1565 | ] |
---|
1566 | [remove get_serverid from DownloadStatus.add_dyhb_request and customers |
---|
1567 | zooko@zooko.com**20110801185401 |
---|
1568 | Ignore-this: db188c18566d2d0ab39a80c9dc8f6be6 |
---|
1569 | This patch is a rebase of a patch originally written by Brian. I didn't change any of the intent of Brian's patch, just ported it to current trunk. |
---|
1570 | refs #1363 |
---|
1571 | ] |
---|
1572 | [remove get_serverid from DownloadStatus.add_block_request and customers |
---|
1573 | zooko@zooko.com**20110801185344 |
---|
1574 | Ignore-this: 8bfa8201d6147f69b0fbe31beea9c1e |
---|
1575 | This is a rebase of a patch Brian originally wrote. I haven't changed the intent of that patch, just ported it to trunk. |
---|
1576 | refs #1363 |
---|
1577 | ] |
---|
1578 | [apply zooko's advice: storage_client get_known_servers() returns a frozenset, caller sorts |
---|
1579 | warner@lothar.com**20110801174452 |
---|
1580 | Ignore-this: 2aa13ea6cbed4e9084bd604bf8633692 |
---|
1581 | refs #1363 |
---|
1582 | ] |
---|
1583 | [test_immutable.Test: rewrite to use NoNetworkGrid, now takes 2.7s not 97s |
---|
1584 | warner@lothar.com**20110801174444 |
---|
1585 | Ignore-this: 54f30b5d7461d2b3514e2a0172f3a98c |
---|
1586 | remove now-unused ShareManglingMixin |
---|
1587 | refs #1363 |
---|
1588 | ] |
---|
1589 | [DownloadStatus.add_known_share wants to be used by Finder, web.status |
---|
1590 | warner@lothar.com**20110801174436 |
---|
1591 | Ignore-this: 1433bcd73099a579abe449f697f35f9 |
---|
1592 | refs #1363 |
---|
1593 | ] |
---|
1594 | [replace IServer.name() with get_name(), and get_longname() |
---|
1595 | warner@lothar.com**20110801174428 |
---|
1596 | Ignore-this: e5a6f7f6687fd7732ddf41cfdd7c491b |
---|
1597 | |
---|
1598 | This patch was originally written by Brian, but was re-recorded by Zooko to use |
---|
1599 | darcs replace instead of hunks for any file in which it would result in fewer |
---|
1600 | total hunks. |
---|
1601 | refs #1363 |
---|
1602 | ] |
---|
1603 | [upload.py: apply David-Sarah's advice rename (un)contacted(2) trackers to first_pass/second_pass/next_pass |
---|
1604 | zooko@zooko.com**20110801174143 |
---|
1605 | Ignore-this: e36e1420bba0620a0107bd90032a5198 |
---|
1606 | This patch was written by Brian but was re-recorded by Zooko (with David-Sarah looking on) to use darcs replace instead of editing to rename the three variables to their new names. |
---|
1607 | refs #1363 |
---|
1608 | ] |
---|
1609 | [Coalesce multiple Share.loop() calls, make downloads faster. Closes #1268. |
---|
1610 | Brian Warner <warner@lothar.com>**20110801151834 |
---|
1611 | Ignore-this: 48530fce36c01c0ff708f61c2de7e67a |
---|
1612 | ] |
---|
1613 | [src/allmydata/_auto_deps.py: 'i686' is another way of spelling x86. |
---|
1614 | david-sarah@jacaranda.org**20110801034035 |
---|
1615 | Ignore-this: 6971e0621db2fba794d86395b4d51038 |
---|
1616 | ] |
---|
1617 | [tahoe_rm.py: better error message when there is no path. refs #1292 |
---|
1618 | david-sarah@jacaranda.org**20110122064212 |
---|
1619 | Ignore-this: ff3bb2c9f376250e5fd77eb009e09018 |
---|
1620 | ] |
---|
1621 | [test_cli.py: Test for error message when 'tahoe rm' is invoked without a path. refs #1292 |
---|
1622 | david-sarah@jacaranda.org**20110104105108 |
---|
1623 | Ignore-this: 29ec2f2e0251e446db96db002ad5dd7d |
---|
1624 | ] |
---|
1625 | [src/allmydata/__init__.py: suppress a spurious warning from 'bin/tahoe --version[-and-path]' about twisted-web and twisted-core packages. |
---|
1626 | david-sarah@jacaranda.org**20110801005209 |
---|
1627 | Ignore-this: 50e7cd53cca57b1870d9df0361c7c709 |
---|
1628 | ] |
---|
1629 | [test_cli.py: use to_str on fields loaded using simplejson.loads in new tests. refs #1304 |
---|
1630 | david-sarah@jacaranda.org**20110730032521 |
---|
1631 | Ignore-this: d1d6dfaefd1b4e733181bf127c79c00b |
---|
1632 | ] |
---|
1633 | [cli: make 'tahoe cp' overwrite mutable files in-place |
---|
1634 | Kevan Carstensen <kevan@isnotajoke.com>**20110729202039 |
---|
1635 | Ignore-this: b2ad21a19439722f05c49bfd35b01855 |
---|
1636 | ] |
---|
1637 | [SFTP: write an error message to standard error for unrecognized shell commands. Change the existing message for shell sessions to be written to standard error, and refactor some duplicated code. Also change the lines of the error messages to end in CRLF, and take into account Kevan's review comments. fixes #1442, #1446 |
---|
1638 | david-sarah@jacaranda.org**20110729233102 |
---|
1639 | Ignore-this: d2f2bb4664f25007d1602bf7333e2cdd |
---|
1640 | ] |
---|
1641 | [src/allmydata/scripts/cli.py: fix pyflakes warning. |
---|
1642 | david-sarah@jacaranda.org**20110728021402 |
---|
1643 | Ignore-this: 94050140ddb99865295973f49927c509 |
---|
1644 | ] |
---|
1645 | [Fix the help synopses of CLI commands to include [options] in the right place. fixes #1359, fixes #636 |
---|
1646 | david-sarah@jacaranda.org**20110724225440 |
---|
1647 | Ignore-this: 2a8e488a5f63dabfa9db9efd83768a5 |
---|
1648 | ] |
---|
1649 | [encodingutil: argv and output encodings are always the same on all platforms. Lose the unnecessary generality of them being different. fixes #1120 |
---|
1650 | david-sarah@jacaranda.org**20110629185356 |
---|
1651 | Ignore-this: 5ebacbe6903dfa83ffd3ff8436a97787 |
---|
1652 | ] |
---|
1653 | [docs/man/tahoe.1: add man page. fixes #1420 |
---|
1654 | david-sarah@jacaranda.org**20110724171728 |
---|
1655 | Ignore-this: fc7601ec7f25494288d6141d0ae0004c |
---|
1656 | ] |
---|
1657 | [Update the dependency on zope.interface to fix an incompatiblity between Nevow and zope.interface 3.6.4. fixes #1435 |
---|
1658 | david-sarah@jacaranda.org**20110721234941 |
---|
1659 | Ignore-this: 2ff3fcfc030fca1a4d4c7f1fed0f2aa9 |
---|
1660 | ] |
---|
1661 | [frontends/ftpd.py: remove the check for IWriteFile.close since we're now guaranteed to be using Twisted >= 10.1 which has it. |
---|
1662 | david-sarah@jacaranda.org**20110722000320 |
---|
1663 | Ignore-this: 55cd558b791526113db3f83c00ec328a |
---|
1664 | ] |
---|
1665 | [Update the dependency on Twisted to >= 10.1. This allows us to simplify some documentation: it's no longer necessary to install pywin32 on Windows, or apply a patch to Twisted in order to use the FTP frontend. fixes #1274, #1438. refs #1429 |
---|
1666 | david-sarah@jacaranda.org**20110721233658 |
---|
1667 | Ignore-this: 81b41745477163c9b39c0b59db91cc62 |
---|
1668 | ] |
---|
1669 | [misc/build_helpers/run_trial.py: undo change to block pywin32 (it didn't work because run_trial.py is no longer used). refs #1334 |
---|
1670 | david-sarah@jacaranda.org**20110722035402 |
---|
1671 | Ignore-this: 5d03f544c4154f088e26c7107494bf39 |
---|
1672 | ] |
---|
1673 | [misc/build_helpers/run_trial.py: ensure that pywin32 is not on the sys.path when running the test suite. Includes some temporary debugging printouts that will be removed. refs #1334 |
---|
1674 | david-sarah@jacaranda.org**20110722024907 |
---|
1675 | Ignore-this: 5141a9f83a4085ed4ca21f0bbb20bb9c |
---|
1676 | ] |
---|
1677 | [docs/running.rst: use 'tahoe run ~/.tahoe' instead of 'tahoe run' (the default is the current directory, unlike 'tahoe start'). |
---|
1678 | david-sarah@jacaranda.org**20110718005949 |
---|
1679 | Ignore-this: 81837fbce073e93d88a3e7ae3122458c |
---|
1680 | ] |
---|
1681 | [docs/running.rst: say to put the introducer.furl in tahoe.cfg. |
---|
1682 | david-sarah@jacaranda.org**20110717194315 |
---|
1683 | Ignore-this: 954cc4c08e413e8c62685d58ff3e11f3 |
---|
1684 | ] |
---|
1685 | [README.txt: say that quickstart.rst is in the docs directory. |
---|
1686 | david-sarah@jacaranda.org**20110717192400 |
---|
1687 | Ignore-this: bc6d35a85c496b77dbef7570677ea42a |
---|
1688 | ] |
---|
1689 | [setup: remove the dependency on foolscap's "secure_connections" extra, add a dependency on pyOpenSSL |
---|
1690 | zooko@zooko.com**20110717114226 |
---|
1691 | Ignore-this: df222120d41447ce4102616921626c82 |
---|
1692 | fixes #1383 |
---|
1693 | ] |
---|
1694 | [test_sftp.py cleanup: remove a redundant definition of failUnlessReallyEqual. |
---|
1695 | david-sarah@jacaranda.org**20110716181813 |
---|
1696 | Ignore-this: 50113380b368c573f07ac6fe2eb1e97f |
---|
1697 | ] |
---|
1698 | [docs: add missing link in NEWS.rst |
---|
1699 | zooko@zooko.com**20110712153307 |
---|
1700 | Ignore-this: be7b7eb81c03700b739daa1027d72b35 |
---|
1701 | ] |
---|
1702 | [contrib: remove the contributed fuse modules and the entire contrib/ directory, which is now empty |
---|
1703 | zooko@zooko.com**20110712153229 |
---|
1704 | Ignore-this: 723c4f9e2211027c79d711715d972c5 |
---|
1705 | Also remove a couple of vestigial references to figleaf, which is long gone. |
---|
1706 | fixes #1409 (remove contrib/fuse) |
---|
1707 | ] |
---|
1708 | [add Protovis.js-based download-status timeline visualization |
---|
1709 | Brian Warner <warner@lothar.com>**20110629222606 |
---|
1710 | Ignore-this: 477ccef5c51b30e246f5b6e04ab4a127 |
---|
1711 | |
---|
1712 | provide status overlap info on the webapi t=json output, add decode/decrypt |
---|
1713 | rate tooltips, add zoomin/zoomout buttons |
---|
1714 | ] |
---|
1715 | [add more download-status data, fix tests |
---|
1716 | Brian Warner <warner@lothar.com>**20110629222555 |
---|
1717 | Ignore-this: e9e0b7e0163f1e95858aa646b9b17b8c |
---|
1718 | ] |
---|
1719 | [prepare for viz: improve DownloadStatus events |
---|
1720 | Brian Warner <warner@lothar.com>**20110629222542 |
---|
1721 | Ignore-this: 16d0bde6b734bb501aa6f1174b2b57be |
---|
1722 | |
---|
1723 | consolidate IDownloadStatusHandlingConsumer stuff into DownloadNode |
---|
1724 | ] |
---|
1725 | [docs: fix error in crypto specification that was noticed by Taylor R Campbell <campbell+tahoe@mumble.net> |
---|
1726 | zooko@zooko.com**20110629185711 |
---|
1727 | Ignore-this: b921ed60c1c8ba3c390737fbcbe47a67 |
---|
1728 | ] |
---|
1729 | [setup.py: don't make bin/tahoe.pyscript executable. fixes #1347 |
---|
1730 | david-sarah@jacaranda.org**20110130235809 |
---|
1731 | Ignore-this: 3454c8b5d9c2c77ace03de3ef2d9398a |
---|
1732 | ] |
---|
1733 | [Makefile: remove targets relating to 'setup.py check_auto_deps' which no longer exists. fixes #1345 |
---|
1734 | david-sarah@jacaranda.org**20110626054124 |
---|
1735 | Ignore-this: abb864427a1b91bd10d5132b4589fd90 |
---|
1736 | ] |
---|
1737 | [Makefile: add 'make check' as an alias for 'make test'. Also remove an unnecessary dependency of 'test' on 'build' and 'src/allmydata/_version.py'. fixes #1344 |
---|
1738 | david-sarah@jacaranda.org**20110623205528 |
---|
1739 | Ignore-this: c63e23146c39195de52fb17c7c49b2da |
---|
1740 | ] |
---|
1741 | [Rename test_package_initialization.py to (much shorter) test_import.py . |
---|
1742 | Brian Warner <warner@lothar.com>**20110611190234 |
---|
1743 | Ignore-this: 3eb3dbac73600eeff5cfa6b65d65822 |
---|
1744 | |
---|
1745 | The former name was making my 'ls' listings hard to read, by forcing them |
---|
1746 | down to just two columns. |
---|
1747 | ] |
---|
1748 | [tests: fix tests to accomodate [20110611153758-92b7f-0ba5e4726fb6318dac28fb762a6512a003f4c430] |
---|
1749 | zooko@zooko.com**20110611163741 |
---|
1750 | Ignore-this: 64073a5f39e7937e8e5e1314c1a302d1 |
---|
1751 | Apparently none of the two authors (stercor, terrell), three reviewers (warner, davidsarah, terrell), or one committer (me) actually ran the tests. This is presumably due to #20. |
---|
1752 | fixes #1412 |
---|
1753 | ] |
---|
1754 | [wui: right-align the size column in the WUI |
---|
1755 | zooko@zooko.com**20110611153758 |
---|
1756 | Ignore-this: 492bdaf4373c96f59f90581c7daf7cd7 |
---|
1757 | Thanks to Ted "stercor" Rolle Jr. and Terrell Russell. |
---|
1758 | fixes #1412 |
---|
1759 | ] |
---|
1760 | [docs: three minor fixes |
---|
1761 | zooko@zooko.com**20110610121656 |
---|
1762 | Ignore-this: fec96579eb95aceb2ad5fc01a814c8a2 |
---|
1763 | CREDITS for arc for stats tweak |
---|
1764 | fix link to .zip file in quickstart.rst (thanks to ChosenOne for noticing) |
---|
1765 | English usage tweak |
---|
1766 | ] |
---|
1767 | [docs/running.rst: fix stray HTML (not .rst) link noticed by ChosenOne. |
---|
1768 | david-sarah@jacaranda.org**20110609223719 |
---|
1769 | Ignore-this: fc50ac9c94792dcac6f1067df8ac0d4a |
---|
1770 | ] |
---|
1771 | [server.py: get_latencies now reports percentiles _only_ if there are sufficient observations for the interpretation of the percentile to be unambiguous. |
---|
1772 | wilcoxjg@gmail.com**20110527120135 |
---|
1773 | Ignore-this: 2e7029764bffc60e26f471d7c2b6611e |
---|
1774 | interfaces.py: modified the return type of RIStatsProvider.get_stats to allow for None as a return value |
---|
1775 | NEWS.rst, stats.py: documentation of change to get_latencies |
---|
1776 | stats.rst: now documents percentile modification in get_latencies |
---|
1777 | test_storage.py: test_latencies now expects None in output categories that contain too few samples for the associated percentile to be unambiguously reported. |
---|
1778 | fixes #1392 |
---|
1779 | ] |
---|
1780 | [docs: revert link in relnotes.txt from NEWS.rst to NEWS, since the former did not exist at revision 5000. |
---|
1781 | david-sarah@jacaranda.org**20110517011214 |
---|
1782 | Ignore-this: 6a5be6e70241e3ec0575641f64343df7 |
---|
1783 | ] |
---|
1784 | [docs: convert NEWS to NEWS.rst and change all references to it. |
---|
1785 | david-sarah@jacaranda.org**20110517010255 |
---|
1786 | Ignore-this: a820b93ea10577c77e9c8206dbfe770d |
---|
1787 | ] |
---|
1788 | [docs: remove out-of-date docs/testgrid/introducer.furl and containing directory. fixes #1404 |
---|
1789 | david-sarah@jacaranda.org**20110512140559 |
---|
1790 | Ignore-this: 784548fc5367fac5450df1c46890876d |
---|
1791 | ] |
---|
1792 | [scripts/common.py: don't assume that the default alias is always 'tahoe' (it is, but the API of get_alias doesn't say so). refs #1342 |
---|
1793 | david-sarah@jacaranda.org**20110130164923 |
---|
1794 | Ignore-this: a271e77ce81d84bb4c43645b891d92eb |
---|
1795 | ] |
---|
1796 | [setup: don't catch all Exception from check_requirement(), but only PackagingError and ImportError |
---|
1797 | zooko@zooko.com**20110128142006 |
---|
1798 | Ignore-this: 57d4bc9298b711e4bc9dc832c75295de |
---|
1799 | I noticed this because I had accidentally inserted a bug which caused AssertionError to be raised from check_requirement(). |
---|
1800 | ] |
---|
1801 | [M-x whitespace-cleanup |
---|
1802 | zooko@zooko.com**20110510193653 |
---|
1803 | Ignore-this: dea02f831298c0f65ad096960e7df5c7 |
---|
1804 | ] |
---|
1805 | [docs: fix typo in running.rst, thanks to arch_o_median |
---|
1806 | zooko@zooko.com**20110510193633 |
---|
1807 | Ignore-this: ca06de166a46abbc61140513918e79e8 |
---|
1808 | ] |
---|
1809 | [relnotes.txt: don't claim to work on Cygwin (which has been untested for some time). refs #1342 |
---|
1810 | david-sarah@jacaranda.org**20110204204902 |
---|
1811 | Ignore-this: 85ef118a48453d93fa4cddc32d65b25b |
---|
1812 | ] |
---|
1813 | [relnotes.txt: forseeable -> foreseeable. refs #1342 |
---|
1814 | david-sarah@jacaranda.org**20110204204116 |
---|
1815 | Ignore-this: 746debc4d82f4031ebf75ab4031b3a9 |
---|
1816 | ] |
---|
1817 | [replace remaining .html docs with .rst docs |
---|
1818 | zooko@zooko.com**20110510191650 |
---|
1819 | Ignore-this: d557d960a986d4ac8216d1677d236399 |
---|
1820 | Remove install.html (long since deprecated). |
---|
1821 | Also replace some obsolete references to install.html with references to quickstart.rst. |
---|
1822 | Fix some broken internal references within docs/historical/historical_known_issues.txt. |
---|
1823 | Thanks to Ravi Pinjala and Patrick McDonald. |
---|
1824 | refs #1227 |
---|
1825 | ] |
---|
1826 | [docs: FTP-and-SFTP.rst: fix a minor error and update the information about which version of Twisted fixes #1297 |
---|
1827 | zooko@zooko.com**20110428055232 |
---|
1828 | Ignore-this: b63cfb4ebdbe32fb3b5f885255db4d39 |
---|
1829 | ] |
---|
1830 | [munin tahoe_files plugin: fix incorrect file count |
---|
1831 | francois@ctrlaltdel.ch**20110428055312 |
---|
1832 | Ignore-this: 334ba49a0bbd93b4a7b06a25697aba34 |
---|
1833 | fixes #1391 |
---|
1834 | ] |
---|
1835 | [corrected "k must never be smaller than N" to "k must never be greater than N" |
---|
1836 | secorp@allmydata.org**20110425010308 |
---|
1837 | Ignore-this: 233129505d6c70860087f22541805eac |
---|
1838 | ] |
---|
1839 | [Fix a test failure in test_package_initialization on Python 2.4.x due to exceptions being stringified differently than in later versions of Python. refs #1389 |
---|
1840 | david-sarah@jacaranda.org**20110411190738 |
---|
1841 | Ignore-this: 7847d26bc117c328c679f08a7baee519 |
---|
1842 | ] |
---|
1843 | [tests: add test for including the ImportError message and traceback entry in the summary of errors from importing dependencies. refs #1389 |
---|
1844 | david-sarah@jacaranda.org**20110410155844 |
---|
1845 | Ignore-this: fbecdbeb0d06a0f875fe8d4030aabafa |
---|
1846 | ] |
---|
1847 | [allmydata/__init__.py: preserve the message and last traceback entry (file, line number, function, and source line) of ImportErrors in the package versions string. fixes #1389 |
---|
1848 | david-sarah@jacaranda.org**20110410155705 |
---|
1849 | Ignore-this: 2f87b8b327906cf8bfca9440a0904900 |
---|
1850 | ] |
---|
1851 | [remove unused variable detected by pyflakes |
---|
1852 | zooko@zooko.com**20110407172231 |
---|
1853 | Ignore-this: 7344652d5e0720af822070d91f03daf9 |
---|
1854 | ] |
---|
1855 | [allmydata/__init__.py: Nicer reporting of unparseable version numbers in dependencies. fixes #1388 |
---|
1856 | david-sarah@jacaranda.org**20110401202750 |
---|
1857 | Ignore-this: 9c6bd599259d2405e1caadbb3e0d8c7f |
---|
1858 | ] |
---|
1859 | [update FTP-and-SFTP.rst: the necessary patch is included in Twisted-10.1 |
---|
1860 | Brian Warner <warner@lothar.com>**20110325232511 |
---|
1861 | Ignore-this: d5307faa6900f143193bfbe14e0f01a |
---|
1862 | ] |
---|
1863 | [control.py: remove all uses of s.get_serverid() |
---|
1864 | warner@lothar.com**20110227011203 |
---|
1865 | Ignore-this: f80a787953bd7fa3d40e828bde00e855 |
---|
1866 | ] |
---|
1867 | [web: remove some uses of s.get_serverid(), not all |
---|
1868 | warner@lothar.com**20110227011159 |
---|
1869 | Ignore-this: a9347d9cf6436537a47edc6efde9f8be |
---|
1870 | ] |
---|
1871 | [immutable/downloader/fetcher.py: remove all get_serverid() calls |
---|
1872 | warner@lothar.com**20110227011156 |
---|
1873 | Ignore-this: fb5ef018ade1749348b546ec24f7f09a |
---|
1874 | ] |
---|
1875 | [immutable/downloader/fetcher.py: fix diversity bug in server-response handling |
---|
1876 | warner@lothar.com**20110227011153 |
---|
1877 | Ignore-this: bcd62232c9159371ae8a16ff63d22c1b |
---|
1878 | |
---|
1879 | When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the |
---|
1880 | _shares_from_server dict was being popped incorrectly (using shnum as the |
---|
1881 | index instead of serverid). I'm still thinking through the consequences of |
---|
1882 | this bug. It was probably benign and really hard to detect. I think it would |
---|
1883 | cause us to incorrectly believe that we're pulling too many shares from a |
---|
1884 | server, and thus prefer a different server rather than asking for a second |
---|
1885 | share from the first server. The diversity code is intended to spread out the |
---|
1886 | number of shares simultaneously being requested from each server, but with |
---|
1887 | this bug, it might be spreading out the total number of shares requested at |
---|
1888 | all, not just simultaneously. (note that SegmentFetcher is scoped to a single |
---|
1889 | segment, so the effect doesn't last very long). |
---|
1890 | ] |
---|
1891 | [immutable/downloader/share.py: reduce get_serverid(), one left, update ext deps |
---|
1892 | warner@lothar.com**20110227011150 |
---|
1893 | Ignore-this: d8d56dd8e7b280792b40105e13664554 |
---|
1894 | |
---|
1895 | test_download.py: create+check MyShare instances better, make sure they share |
---|
1896 | Server objects, now that finder.py cares |
---|
1897 | ] |
---|
1898 | [immutable/downloader/finder.py: reduce use of get_serverid(), one left |
---|
1899 | warner@lothar.com**20110227011146 |
---|
1900 | Ignore-this: 5785be173b491ae8a78faf5142892020 |
---|
1901 | ] |
---|
1902 | [immutable/offloaded.py: reduce use of get_serverid() a bit more |
---|
1903 | warner@lothar.com**20110227011142 |
---|
1904 | Ignore-this: b48acc1b2ae1b311da7f3ba4ffba38f |
---|
1905 | ] |
---|
1906 | [immutable/upload.py: reduce use of get_serverid() |
---|
1907 | warner@lothar.com**20110227011138 |
---|
1908 | Ignore-this: ffdd7ff32bca890782119a6e9f1495f6 |
---|
1909 | ] |
---|
1910 | [immutable/checker.py: remove some uses of s.get_serverid(), not all |
---|
1911 | warner@lothar.com**20110227011134 |
---|
1912 | Ignore-this: e480a37efa9e94e8016d826c492f626e |
---|
1913 | ] |
---|
1914 | [add remaining get_* methods to storage_client.Server, NoNetworkServer, and |
---|
1915 | warner@lothar.com**20110227011132 |
---|
1916 | Ignore-this: 6078279ddf42b179996a4b53bee8c421 |
---|
1917 | MockIServer stubs |
---|
1918 | ] |
---|
1919 | [upload.py: rearrange _make_trackers a bit, no behavior changes |
---|
1920 | warner@lothar.com**20110227011128 |
---|
1921 | Ignore-this: 296d4819e2af452b107177aef6ebb40f |
---|
1922 | ] |
---|
1923 | [happinessutil.py: finally rename merge_peers to merge_servers |
---|
1924 | warner@lothar.com**20110227011124 |
---|
1925 | Ignore-this: c8cd381fea1dd888899cb71e4f86de6e |
---|
1926 | ] |
---|
1927 | [test_upload.py: factor out FakeServerTracker |
---|
1928 | warner@lothar.com**20110227011120 |
---|
1929 | Ignore-this: 6c182cba90e908221099472cc159325b |
---|
1930 | ] |
---|
1931 | [test_upload.py: server-vs-tracker cleanup |
---|
1932 | warner@lothar.com**20110227011115 |
---|
1933 | Ignore-this: 2915133be1a3ba456e8603885437e03 |
---|
1934 | ] |
---|
1935 | [happinessutil.py: server-vs-tracker cleanup |
---|
1936 | warner@lothar.com**20110227011111 |
---|
1937 | Ignore-this: b856c84033562d7d718cae7cb01085a9 |
---|
1938 | ] |
---|
1939 | [upload.py: more tracker-vs-server cleanup |
---|
1940 | warner@lothar.com**20110227011107 |
---|
1941 | Ignore-this: bb75ed2afef55e47c085b35def2de315 |
---|
1942 | ] |
---|
1943 | [upload.py: fix var names to avoid confusion between 'trackers' and 'servers' |
---|
1944 | warner@lothar.com**20110227011103 |
---|
1945 | Ignore-this: 5d5e3415b7d2732d92f42413c25d205d |
---|
1946 | ] |
---|
1947 | [refactor: s/peer/server/ in immutable/upload, happinessutil.py, test_upload |
---|
1948 | warner@lothar.com**20110227011100 |
---|
1949 | Ignore-this: 7ea858755cbe5896ac212a925840fe68 |
---|
1950 | |
---|
1951 | No behavioral changes, just updating variable/method names and log messages. |
---|
1952 | The effects outside these three files should be minimal: some exception |
---|
1953 | messages changed (to say "server" instead of "peer"), and some internal class |
---|
1954 | names were changed. A few things still use "peer" to minimize external |
---|
1955 | changes, like UploadResults.timings["peer_selection"] and |
---|
1956 | happinessutil.merge_peers, which can be changed later. |
---|
1957 | ] |
---|
1958 | [storage_client.py: clean up test_add_server/test_add_descriptor, remove .test_servers |
---|
1959 | warner@lothar.com**20110227011056 |
---|
1960 | Ignore-this: efad933e78179d3d5fdcd6d1ef2b19cc |
---|
1961 | ] |
---|
1962 | [test_client.py, upload.py:: remove KiB/MiB/etc constants, and other dead code |
---|
1963 | warner@lothar.com**20110227011051 |
---|
1964 | Ignore-this: dc83c5794c2afc4f81e592f689c0dc2d |
---|
1965 | ] |
---|
1966 | [test: increase timeout on a network test because Francois's ARM machine hit that timeout |
---|
1967 | zooko@zooko.com**20110317165909 |
---|
1968 | Ignore-this: 380c345cdcbd196268ca5b65664ac85b |
---|
1969 | I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish. |
---|
1970 | ] |
---|
1971 | [docs/configuration.rst: add a "Frontend Configuration" section |
---|
1972 | Brian Warner <warner@lothar.com>**20110222014323 |
---|
1973 | Ignore-this: 657018aa501fe4f0efef9851628444ca |
---|
1974 | |
---|
1975 | this points to docs/frontends/*.rst, which were previously underlinked |
---|
1976 | ] |
---|
1977 | [web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366 |
---|
1978 | "Brian Warner <warner@lothar.com>"**20110221061544 |
---|
1979 | Ignore-this: 799d4de19933f2309b3c0c19a63bb888 |
---|
1980 | ] |
---|
1981 | [Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable. |
---|
1982 | david-sarah@jacaranda.org**20110221015817 |
---|
1983 | Ignore-this: 51d181698f8c20d3aca58b057e9c475a |
---|
1984 | ] |
---|
1985 | [allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355. |
---|
1986 | david-sarah@jacaranda.org**20110221020125 |
---|
1987 | Ignore-this: b0744ed58f161bf188e037bad077fc48 |
---|
1988 | ] |
---|
1989 | [Refactor StorageFarmBroker handling of servers |
---|
1990 | Brian Warner <warner@lothar.com>**20110221015804 |
---|
1991 | Ignore-this: 842144ed92f5717699b8f580eab32a51 |
---|
1992 | |
---|
1993 | Pass around IServer instance instead of (peerid, rref) tuple. Replace |
---|
1994 | "descriptor" with "server". Other replacements: |
---|
1995 | |
---|
1996 | get_all_servers -> get_connected_servers/get_known_servers |
---|
1997 | get_servers_for_index -> get_servers_for_psi (now returns IServers) |
---|
1998 | |
---|
1999 | This change still needs to be pushed further down: lots of code is now |
---|
2000 | getting the IServer and then distributing (peerid, rref) internally. |
---|
2001 | Instead, it ought to distribute the IServer internally and delay |
---|
2002 | extracting a serverid or rref until the last moment. |
---|
2003 | |
---|
2004 | no_network.py was updated to retain parallelism. |
---|
2005 | ] |
---|
2006 | [TAG allmydata-tahoe-1.8.2 |
---|
2007 | warner@lothar.com**20110131020101] |
---|
2008 | Patch bundle hash: |
---|
2009 | 91397b6648481ccb96aa820cb85e769b33383c28 |
---|