Ticket #1392: testgetlatencies.darcs.patch

File testgetlatencies.darcs.patch, 18.4 KB (added by arch_o_median, at 2011-04-14T22:56:44Z)
Line 
1Fri Mar 25 14:35:14 MDT 2011  wilcoxjg@gmail.com
2  * storage: new mocking tests of storage server read and write
3  There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls.
4
5Thu Apr 14 16:48:23 MDT 2011  zooko@zooko.com
6  * test_server.py --> test_backends.py:  server.py: added testing of get_latencies in StorageServer
7  This patch test both coverage and handling of small samples in the get_latencies method of StorageServer.  get_latencies now distinguishes between highly repetitive latencies and small sample sizes.  This is of most concern at the big end of the latency distribution, although the ambiguity increases in general as the sample size decreases.
8
9New patches:
10
11[storage: new mocking tests of storage server read and write
12wilcoxjg@gmail.com**20110325203514
13 Ignore-this: df65c3c4f061dd1516f88662023fdb41
14 There are already tests of read and functionality in test_storage.py, but those tests let the code under test use a real filesystem whereas these tests mock all file system calls.
15] {
16addfile ./src/allmydata/test/test_server.py
17hunk ./src/allmydata/test/test_server.py 1
18+from twisted.trial import unittest
19+
20+from StringIO import StringIO
21+
22+from allmydata.test.common_util import ReallyEqualMixin
23+
24+import mock
25+
26+# This is the code that we're going to be testing.
27+from allmydata.storage.server import StorageServer
28+
29+# The following share file contents was generated with
30+# storage.immutable.ShareFile from Tahoe-LAFS v1.8.2
31+# with share data == 'a'.
32+share_data = 'a\x00\x00\x00\x00xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\x00(\xde\x80'
33+share_file_data = '\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x01' + share_data
34+
35+sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
36+
37+class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
38+    @mock.patch('__builtin__.open')
39+    def test_create_server(self, mockopen):
40+        """ This tests whether a server instance can be constructed. """
41+
42+        def call_open(fname, mode):
43+            if fname == 'testdir/bucket_counter.state':
44+                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
45+            elif fname == 'testdir/lease_checker.state':
46+                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
47+            elif fname == 'testdir/lease_checker.history':
48+                return StringIO()
49+        mockopen.side_effect = call_open
50+
51+        # Now begin the test.
52+        s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
53+
54+        # You passed!
55+
56+class TestServer(unittest.TestCase, ReallyEqualMixin):
57+    @mock.patch('__builtin__.open')
58+    def setUp(self, mockopen):
59+        def call_open(fname, mode):
60+            if fname == 'testdir/bucket_counter.state':
61+                raise IOError(2, "No such file or directory: 'testdir/bucket_counter.state'")
62+            elif fname == 'testdir/lease_checker.state':
63+                raise IOError(2, "No such file or directory: 'testdir/lease_checker.state'")
64+            elif fname == 'testdir/lease_checker.history':
65+                return StringIO()
66+        mockopen.side_effect = call_open
67+
68+        self.s = StorageServer('testdir', 'testnodeidxxxxxxxxxx')
69+
70+
71+    @mock.patch('time.time')
72+    @mock.patch('os.mkdir')
73+    @mock.patch('__builtin__.open')
74+    @mock.patch('os.listdir')
75+    @mock.patch('os.path.isdir')
76+    def test_write_share(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
77+        """Handle a report of corruption."""
78+
79+        def call_listdir(dirname):
80+            self.failUnlessReallyEqual(dirname, 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
81+            raise OSError(2, "No such file or directory: 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a'")
82+
83+        mocklistdir.side_effect = call_listdir
84+
85+        class MockFile:
86+            def __init__(self):
87+                self.buffer = ''
88+                self.pos = 0
89+            def write(self, instring):
90+                begin = self.pos
91+                padlen = begin - len(self.buffer)
92+                if padlen > 0:
93+                    self.buffer += '\x00' * padlen
94+                end = self.pos + len(instring)
95+                self.buffer = self.buffer[:begin]+instring+self.buffer[end:]
96+                self.pos = end
97+            def close(self):
98+                pass
99+            def seek(self, pos):
100+                self.pos = pos
101+            def read(self, numberbytes):
102+                return self.buffer[self.pos:self.pos+numberbytes]
103+            def tell(self):
104+                return self.pos
105+
106+        mocktime.return_value = 0
107+
108+        sharefile = MockFile()
109+        def call_open(fname, mode):
110+            self.failUnlessReallyEqual(fname, 'testdir/shares/incoming/or/orsxg5dtorxxeylhmvpws3temv4a/0' )
111+            return sharefile
112+
113+        mockopen.side_effect = call_open
114+        # Now begin the test.
115+        alreadygot, bs = self.s.remote_allocate_buckets('teststorage_index', 'x'*32, 'y'*32, set((0,)), 1, mock.Mock())
116+        print bs
117+        bs[0].remote_write(0, 'a')
118+        self.failUnlessReallyEqual(sharefile.buffer, share_file_data)
119+
120+
121+    @mock.patch('os.path.exists')
122+    @mock.patch('os.path.getsize')
123+    @mock.patch('__builtin__.open')
124+    @mock.patch('os.listdir')
125+    def test_read_share(self, mocklistdir, mockopen, mockgetsize, mockexists):
126+        """ This tests whether the code correctly finds and reads
127+        shares written out by old (Tahoe-LAFS <= v1.8.2)
128+        servers. There is a similar test in test_download, but that one
129+        is from the perspective of the client and exercises a deeper
130+        stack of code. This one is for exercising just the
131+        StorageServer object. """
132+
133+        def call_listdir(dirname):
134+            self.failUnlessReallyEqual(dirname,'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a')
135+            return ['0']
136+
137+        mocklistdir.side_effect = call_listdir
138+
139+        def call_open(fname, mode):
140+            self.failUnlessReallyEqual(fname, sharefname)
141+            self.failUnless('r' in mode, mode)
142+            self.failUnless('b' in mode, mode)
143+
144+            return StringIO(share_file_data)
145+        mockopen.side_effect = call_open
146+
147+        datalen = len(share_file_data)
148+        def call_getsize(fname):
149+            self.failUnlessReallyEqual(fname, sharefname)
150+            return datalen
151+        mockgetsize.side_effect = call_getsize
152+
153+        def call_exists(fname):
154+            self.failUnlessReallyEqual(fname, sharefname)
155+            return True
156+        mockexists.side_effect = call_exists
157+
158+        # Now begin the test.
159+        bs = self.s.remote_get_buckets('teststorage_index')
160+
161+        self.failUnlessEqual(len(bs), 1)
162+        b = bs[0]
163+        self.failUnlessReallyEqual(b.remote_read(0, datalen), share_data)
164+        # If you try to read past the end you get the as much data as is there.
165+        self.failUnlessReallyEqual(b.remote_read(0, datalen+20), share_data)
166+        # If you start reading past the end of the file you get the empty string.
167+        self.failUnlessReallyEqual(b.remote_read(datalen+1, 3), '')
168}
169[test_server.py --> test_backends.py:  server.py: added testing of get_latencies in StorageServer
170zooko@zooko.com**20110414224823
171 Ignore-this: 3e266de570f725f768d18c131e2c6d8
172 This patch test both coverage and handling of small samples in the get_latencies method of StorageServer.  get_latencies now distinguishes between highly repetitive latencies and small sample sizes.  This is of most concern at the big end of the latency distribution, although the ambiguity increases in general as the sample size decreases.
173] {
174move ./src/allmydata/test/test_server.py ./src/allmydata/test/test_backends.py
175hunk ./src/allmydata/storage/server.py 134
176             samples = self.latencies[category][:]
177             samples.sort()
178             count = len(samples)
179+            if count < 1000:
180+                output[category] = None
181+                continue
182+            samples.sort()
183+            stats = {}
184             stats["mean"] = sum(samples) / count
185             stats["01_0_percentile"] = samples[int(0.01 * count)]
186             stats["10_0_percentile"] = samples[int(0.1 * count)]
187hunk ./src/allmydata/test/test_backends.py 21
188 sharefname = 'testdir/shares/or/orsxg5dtorxxeylhmvpws3temv4a/0'
189 
190 class TestServerConstruction(unittest.TestCase, ReallyEqualMixin):
191+    @mock.patch('time.time')
192+    @mock.patch('os.mkdir')
193+    @mock.patch('__builtin__.open')
194+    @mock.patch('os.listdir')
195+    @mock.patch('os.path.isdir')
196+    def test_create_server_null_backend(self, mockisdir, mocklistdir, mockopen, mockmkdir, mocktime):
197+        """ This tests whether a server instance can be constructed
198+        with a null backend. The server instance fails the test if it
199+        tries to read or write to the file system. """
200+
201+        # Now begin the test.
202+        s = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(),stats_provider=mock.Mock())
203+
204+        # The null backend should not talk to the os.
205+        self.failIf(mockisdir.called)
206+        self.failIf(mocklistdir.called)
207+        self.failIf(mockopen.called)
208+        self.failIf(mockmkdir.called)
209+        #self.failIf(mocktime.called)
210+       
211+        #  The server's representation should not change.
212+        self.failUnlessReallyEqual(s.__repr__(),'<StorageServer orsxg5do>')
213+
214+        #  There should be no latencies when the backend is null.  *** The "cancel" category is left out to increase coverage.  This seems like a dubious decision.  Must consult with more knowledgeable persons.
215+        numbersamples = 1001
216+        for category in ["allocate","write","close","read","get",\
217+                         "writev","readv","add-lease","renew"]:#,"cancel"]:
218+            [s.add_latency(category,x) for x in numbersamples*[0]]
219+        l = s.get_latencies()
220+
221+        # Now test that get_latencies correctly reports None for small sample-sizes.
222+        s1 = StorageServer('testnodeidxxxxxxxxxx', backend=NullBackend(),stats_provider=mock.Mock())
223+        #  There should be no latencies when the backend is null.  *** The "cancel" category is left out to increase coverage.  This seems like a dubious decision.  Must consult with more knowledgeable persons.
224+        numbersamples = 10
225+        for category in ["allocate","write","close","read","get",\
226+                         "writev","readv","add-lease","renew"]:#,"cancel"]:
227+            [s1.add_latency(category,x) for x in numbersamples*[0]]
228+        l1 = s1.get_latencies()
229+
230+        for key in l1.keys():
231+            self.failUnlessReallyEqual(l1[key],None)
232+        # You passed!
233+
234+    @mock.patch('time.time')
235+    @mock.patch('os.mkdir')
236     @mock.patch('__builtin__.open')
237     def test_create_server(self, mockopen):
238         """ This tests whether a server instance can be constructed. """
239}
240
241Context:
242
243[Fix a test failure in test_package_initialization on Python 2.4.x due to exceptions being stringified differently than in later versions of Python. refs #1389
244david-sarah@jacaranda.org**20110411190738
245 Ignore-this: 7847d26bc117c328c679f08a7baee519
246]
247[tests: add test for including the ImportError message and traceback entry in the summary of errors from importing dependencies. refs #1389
248david-sarah@jacaranda.org**20110410155844
249 Ignore-this: fbecdbeb0d06a0f875fe8d4030aabafa
250]
251[allmydata/__init__.py: preserve the message and last traceback entry (file, line number, function, and source line) of ImportErrors in the package versions string. fixes #1389
252david-sarah@jacaranda.org**20110410155705
253 Ignore-this: 2f87b8b327906cf8bfca9440a0904900
254]
255[remove unused variable detected by pyflakes
256zooko@zooko.com**20110407172231
257 Ignore-this: 7344652d5e0720af822070d91f03daf9
258]
259[allmydata/__init__.py: Nicer reporting of unparseable version numbers in dependencies. fixes #1388
260david-sarah@jacaranda.org**20110401202750
261 Ignore-this: 9c6bd599259d2405e1caadbb3e0d8c7f
262]
263[update FTP-and-SFTP.rst: the necessary patch is included in Twisted-10.1
264Brian Warner <warner@lothar.com>**20110325232511
265 Ignore-this: d5307faa6900f143193bfbe14e0f01a
266]
267[control.py: remove all uses of s.get_serverid()
268warner@lothar.com**20110227011203
269 Ignore-this: f80a787953bd7fa3d40e828bde00e855
270]
271[web: remove some uses of s.get_serverid(), not all
272warner@lothar.com**20110227011159
273 Ignore-this: a9347d9cf6436537a47edc6efde9f8be
274]
275[immutable/downloader/fetcher.py: remove all get_serverid() calls
276warner@lothar.com**20110227011156
277 Ignore-this: fb5ef018ade1749348b546ec24f7f09a
278]
279[immutable/downloader/fetcher.py: fix diversity bug in server-response handling
280warner@lothar.com**20110227011153
281 Ignore-this: bcd62232c9159371ae8a16ff63d22c1b
282 
283 When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the
284 _shares_from_server dict was being popped incorrectly (using shnum as the
285 index instead of serverid). I'm still thinking through the consequences of
286 this bug. It was probably benign and really hard to detect. I think it would
287 cause us to incorrectly believe that we're pulling too many shares from a
288 server, and thus prefer a different server rather than asking for a second
289 share from the first server. The diversity code is intended to spread out the
290 number of shares simultaneously being requested from each server, but with
291 this bug, it might be spreading out the total number of shares requested at
292 all, not just simultaneously. (note that SegmentFetcher is scoped to a single
293 segment, so the effect doesn't last very long).
294]
295[immutable/downloader/share.py: reduce get_serverid(), one left, update ext deps
296warner@lothar.com**20110227011150
297 Ignore-this: d8d56dd8e7b280792b40105e13664554
298 
299 test_download.py: create+check MyShare instances better, make sure they share
300 Server objects, now that finder.py cares
301]
302[immutable/downloader/finder.py: reduce use of get_serverid(), one left
303warner@lothar.com**20110227011146
304 Ignore-this: 5785be173b491ae8a78faf5142892020
305]
306[immutable/offloaded.py: reduce use of get_serverid() a bit more
307warner@lothar.com**20110227011142
308 Ignore-this: b48acc1b2ae1b311da7f3ba4ffba38f
309]
310[immutable/upload.py: reduce use of get_serverid()
311warner@lothar.com**20110227011138
312 Ignore-this: ffdd7ff32bca890782119a6e9f1495f6
313]
314[immutable/checker.py: remove some uses of s.get_serverid(), not all
315warner@lothar.com**20110227011134
316 Ignore-this: e480a37efa9e94e8016d826c492f626e
317]
318[add remaining get_* methods to storage_client.Server, NoNetworkServer, and
319warner@lothar.com**20110227011132
320 Ignore-this: 6078279ddf42b179996a4b53bee8c421
321 MockIServer stubs
322]
323[upload.py: rearrange _make_trackers a bit, no behavior changes
324warner@lothar.com**20110227011128
325 Ignore-this: 296d4819e2af452b107177aef6ebb40f
326]
327[happinessutil.py: finally rename merge_peers to merge_servers
328warner@lothar.com**20110227011124
329 Ignore-this: c8cd381fea1dd888899cb71e4f86de6e
330]
331[test_upload.py: factor out FakeServerTracker
332warner@lothar.com**20110227011120
333 Ignore-this: 6c182cba90e908221099472cc159325b
334]
335[test_upload.py: server-vs-tracker cleanup
336warner@lothar.com**20110227011115
337 Ignore-this: 2915133be1a3ba456e8603885437e03
338]
339[happinessutil.py: server-vs-tracker cleanup
340warner@lothar.com**20110227011111
341 Ignore-this: b856c84033562d7d718cae7cb01085a9
342]
343[upload.py: more tracker-vs-server cleanup
344warner@lothar.com**20110227011107
345 Ignore-this: bb75ed2afef55e47c085b35def2de315
346]
347[upload.py: fix var names to avoid confusion between 'trackers' and 'servers'
348warner@lothar.com**20110227011103
349 Ignore-this: 5d5e3415b7d2732d92f42413c25d205d
350]
351[refactor: s/peer/server/ in immutable/upload, happinessutil.py, test_upload
352warner@lothar.com**20110227011100
353 Ignore-this: 7ea858755cbe5896ac212a925840fe68
354 
355 No behavioral changes, just updating variable/method names and log messages.
356 The effects outside these three files should be minimal: some exception
357 messages changed (to say "server" instead of "peer"), and some internal class
358 names were changed. A few things still use "peer" to minimize external
359 changes, like UploadResults.timings["peer_selection"] and
360 happinessutil.merge_peers, which can be changed later.
361]
362[storage_client.py: clean up test_add_server/test_add_descriptor, remove .test_servers
363warner@lothar.com**20110227011056
364 Ignore-this: efad933e78179d3d5fdcd6d1ef2b19cc
365]
366[test_client.py, upload.py:: remove KiB/MiB/etc constants, and other dead code
367warner@lothar.com**20110227011051
368 Ignore-this: dc83c5794c2afc4f81e592f689c0dc2d
369]
370[test: increase timeout on a network test because Francois's ARM machine hit that timeout
371zooko@zooko.com**20110317165909
372 Ignore-this: 380c345cdcbd196268ca5b65664ac85b
373 I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish.
374]
375[docs/configuration.rst: add a "Frontend Configuration" section
376Brian Warner <warner@lothar.com>**20110222014323
377 Ignore-this: 657018aa501fe4f0efef9851628444ca
378 
379 this points to docs/frontends/*.rst, which were previously underlinked
380]
381[web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366
382"Brian Warner <warner@lothar.com>"**20110221061544
383 Ignore-this: 799d4de19933f2309b3c0c19a63bb888
384]
385[Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable.
386david-sarah@jacaranda.org**20110221015817
387 Ignore-this: 51d181698f8c20d3aca58b057e9c475a
388]
389[allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355.
390david-sarah@jacaranda.org**20110221020125
391 Ignore-this: b0744ed58f161bf188e037bad077fc48
392]
393[Refactor StorageFarmBroker handling of servers
394Brian Warner <warner@lothar.com>**20110221015804
395 Ignore-this: 842144ed92f5717699b8f580eab32a51
396 
397 Pass around IServer instance instead of (peerid, rref) tuple. Replace
398 "descriptor" with "server". Other replacements:
399 
400  get_all_servers -> get_connected_servers/get_known_servers
401  get_servers_for_index -> get_servers_for_psi (now returns IServers)
402 
403 This change still needs to be pushed further down: lots of code is now
404 getting the IServer and then distributing (peerid, rref) internally.
405 Instead, it ought to distribute the IServer internally and delay
406 extracting a serverid or rref until the last moment.
407 
408 no_network.py was updated to retain parallelism.
409]
410[TAG allmydata-tahoe-1.8.2
411warner@lothar.com**20110131020101]
412Patch bundle hash:
41388f420ebe3b0861ada6862c7fcb68ea2b7ca3685