1 | Tue Apr 26 14:59:58 MDT 2011 wilcoxjg@gmail.com |
---|
2 | * test_storage.py: test_latencies now expects None in output categories that contain too few samples for the associated percentile to be unambiguously reported. |
---|
3 | |
---|
4 | Tue Apr 26 15:16:41 MDT 2011 wilcoxjg@gmail.com |
---|
5 | * server.py: get_latencies now reports percentiles _only_ if there are sufficient observations for the interpretation of the percentile to be unambiguous. |
---|
6 | |
---|
7 | Thu May 19 11:10:41 MDT 2011 wilcoxjg@gmail.com |
---|
8 | * stats.rst: now documents percentile modification in get_latencies |
---|
9 | |
---|
10 | Mon May 23 16:29:08 MDT 2011 wilcoxjg@gmail.com |
---|
11 | * interfaces.py: modified the return type of RIStatsProvider.get_stats to allow for None as a return value |
---|
12 | |
---|
13 | New patches: |
---|
14 | |
---|
15 | [test_storage.py: test_latencies now expects None in output categories that contain too few samples for the associated percentile to be unambiguously reported. |
---|
16 | wilcoxjg@gmail.com**20110426205958 |
---|
17 | Ignore-this: 2cf1920eb878f97394940584c470f43a |
---|
18 | ] { |
---|
19 | hunk ./src/allmydata/test/test_storage.py 1314 |
---|
20 | ss.add_latency("allocate", 1.0 * i) |
---|
21 | for i in range(1000): |
---|
22 | ss.add_latency("renew", 1.0 * i) |
---|
23 | + for i in range(20): |
---|
24 | + ss.add_latency("write", 1.0 * i) |
---|
25 | for i in range(10): |
---|
26 | ss.add_latency("cancel", 2.0 * i) |
---|
27 | ss.add_latency("get", 5.0) |
---|
28 | hunk ./src/allmydata/test/test_storage.py 1323 |
---|
29 | output = ss.get_latencies() |
---|
30 | |
---|
31 | self.failUnlessEqual(sorted(output.keys()), |
---|
32 | - sorted(["allocate", "renew", "cancel", "get"])) |
---|
33 | + sorted(["allocate", "renew", "cancel", "write", "get"])) |
---|
34 | self.failUnlessEqual(len(ss.latencies["allocate"]), 1000) |
---|
35 | self.failUnless(abs(output["allocate"]["mean"] - 9500) < 1, output) |
---|
36 | self.failUnless(abs(output["allocate"]["01_0_percentile"] - 9010) < 1, output) |
---|
37 | hunk ./src/allmydata/test/test_storage.py 1344 |
---|
38 | self.failUnless(abs(output["renew"]["99_0_percentile"] - 990) < 1, output) |
---|
39 | self.failUnless(abs(output["renew"]["99_9_percentile"] - 999) < 1, output) |
---|
40 | |
---|
41 | + self.failUnlessEqual(len(ss.latencies["write"]), 20) |
---|
42 | + self.failUnless(abs(output["write"]["mean"] - 9) < 1, output) |
---|
43 | + self.failUnless(output["write"]["01_0_percentile"] == None, output) |
---|
44 | + self.failUnless(abs(output["write"]["10_0_percentile"] - 2) < 1, output) |
---|
45 | + self.failUnless(abs(output["write"]["50_0_percentile"] - 10) < 1, output) |
---|
46 | + self.failUnless(abs(output["write"]["90_0_percentile"] - 18) < 1, output) |
---|
47 | + self.failUnless(abs(output["write"]["95_0_percentile"] - 19) < 1, output) |
---|
48 | + self.failUnless(output["write"]["99_0_percentile"] == None, output) |
---|
49 | + self.failUnless(output["write"]["99_9_percentile"] == None, output) |
---|
50 | + |
---|
51 | self.failUnlessEqual(len(ss.latencies["cancel"]), 10) |
---|
52 | self.failUnless(abs(output["cancel"]["mean"] - 9) < 1, output) |
---|
53 | hunk ./src/allmydata/test/test_storage.py 1356 |
---|
54 | - self.failUnless(abs(output["cancel"]["01_0_percentile"] - 0) < 1, output) |
---|
55 | + self.failUnless(output["cancel"]["01_0_percentile"] == None, output) |
---|
56 | self.failUnless(abs(output["cancel"]["10_0_percentile"] - 2) < 1, output) |
---|
57 | self.failUnless(abs(output["cancel"]["50_0_percentile"] - 10) < 1, output) |
---|
58 | self.failUnless(abs(output["cancel"]["90_0_percentile"] - 18) < 1, output) |
---|
59 | hunk ./src/allmydata/test/test_storage.py 1360 |
---|
60 | - self.failUnless(abs(output["cancel"]["95_0_percentile"] - 18) < 1, output) |
---|
61 | - self.failUnless(abs(output["cancel"]["99_0_percentile"] - 18) < 1, output) |
---|
62 | - self.failUnless(abs(output["cancel"]["99_9_percentile"] - 18) < 1, output) |
---|
63 | + self.failUnless(output["cancel"]["95_0_percentile"] == None, output) |
---|
64 | + self.failUnless(output["cancel"]["99_0_percentile"] == None, output) |
---|
65 | + self.failUnless(output["cancel"]["99_9_percentile"] == None, output) |
---|
66 | |
---|
67 | self.failUnlessEqual(len(ss.latencies["get"]), 1) |
---|
68 | hunk ./src/allmydata/test/test_storage.py 1365 |
---|
69 | - self.failUnless(abs(output["get"]["mean"] - 5) < 1, output) |
---|
70 | - self.failUnless(abs(output["get"]["01_0_percentile"] - 5) < 1, output) |
---|
71 | - self.failUnless(abs(output["get"]["10_0_percentile"] - 5) < 1, output) |
---|
72 | - self.failUnless(abs(output["get"]["50_0_percentile"] - 5) < 1, output) |
---|
73 | - self.failUnless(abs(output["get"]["90_0_percentile"] - 5) < 1, output) |
---|
74 | - self.failUnless(abs(output["get"]["95_0_percentile"] - 5) < 1, output) |
---|
75 | - self.failUnless(abs(output["get"]["99_0_percentile"] - 5) < 1, output) |
---|
76 | - self.failUnless(abs(output["get"]["99_9_percentile"] - 5) < 1, output) |
---|
77 | + self.failUnless(output["get"]["mean"] == None, output) |
---|
78 | + self.failUnless(output["get"]["01_0_percentile"] == None, output) |
---|
79 | + self.failUnless(output["get"]["10_0_percentile"] == None, output) |
---|
80 | + self.failUnless(output["get"]["50_0_percentile"] == None, output) |
---|
81 | + self.failUnless(output["get"]["90_0_percentile"] == None, output) |
---|
82 | + self.failUnless(output["get"]["95_0_percentile"] == None, output) |
---|
83 | + self.failUnless(output["get"]["99_0_percentile"] == None, output) |
---|
84 | + self.failUnless(output["get"]["99_9_percentile"] == None, output) |
---|
85 | |
---|
86 | def remove_tags(s): |
---|
87 | s = re.sub(r'<[^>]*>', ' ', s) |
---|
88 | } |
---|
89 | [server.py: get_latencies now reports percentiles _only_ if there are sufficient observations for the interpretation of the percentile to be unambiguous. |
---|
90 | wilcoxjg@gmail.com**20110426211641 |
---|
91 | Ignore-this: 546001f34d53e35ce2025b05b4ea66b6 |
---|
92 | ] { |
---|
93 | hunk ./src/allmydata/storage/server.py 119 |
---|
94 | |
---|
95 | def get_latencies(self): |
---|
96 | """Return a dict, indexed by category, that contains a dict of |
---|
97 | - latency numbers for each category. Each dict will contain the |
---|
98 | + latency numbers for each category. If there are sufficient samples |
---|
99 | + for unambiguous interpretation, each dict will contain the |
---|
100 | following keys: mean, 01_0_percentile, 10_0_percentile, |
---|
101 | 50_0_percentile (median), 90_0_percentile, 95_0_percentile, |
---|
102 | hunk ./src/allmydata/storage/server.py 123 |
---|
103 | - 99_0_percentile, 99_9_percentile. If no samples have been collected |
---|
104 | - for the given category, then that category name will not be present |
---|
105 | - in the return value.""" |
---|
106 | + 99_0_percentile, 99_9_percentile. If there are insufficient |
---|
107 | + samples for a given percentile to be interpreted unambiguously |
---|
108 | + that percentile will be reported as None. If no samples have been |
---|
109 | + collected for the given category, then that category name will |
---|
110 | + not be present in the return value. """ |
---|
111 | # note that Amazon's Dynamo paper says they use 99.9% percentile. |
---|
112 | output = {} |
---|
113 | for category in self.latencies: |
---|
114 | hunk ./src/allmydata/storage/server.py 135 |
---|
115 | continue |
---|
116 | stats = {} |
---|
117 | samples = self.latencies[category][:] |
---|
118 | - samples.sort() |
---|
119 | count = len(samples) |
---|
120 | hunk ./src/allmydata/storage/server.py 136 |
---|
121 | - stats["mean"] = sum(samples) / count |
---|
122 | - stats["01_0_percentile"] = samples[int(0.01 * count)] |
---|
123 | - stats["10_0_percentile"] = samples[int(0.1 * count)] |
---|
124 | - stats["50_0_percentile"] = samples[int(0.5 * count)] |
---|
125 | - stats["90_0_percentile"] = samples[int(0.9 * count)] |
---|
126 | - stats["95_0_percentile"] = samples[int(0.95 * count)] |
---|
127 | - stats["99_0_percentile"] = samples[int(0.99 * count)] |
---|
128 | - stats["99_9_percentile"] = samples[int(0.999 * count)] |
---|
129 | + stats["samplesize"] = count |
---|
130 | + samples.sort() |
---|
131 | + if count > 1: |
---|
132 | + stats["mean"] = sum(samples) / count |
---|
133 | + else: |
---|
134 | + stats["mean"] = None |
---|
135 | + |
---|
136 | + orderstatlist = [(0.01, "01_0_percentile", 100), (0.1, "10_0_percentile", 10),\ |
---|
137 | + (0.50, "50_0_percentile", 10), (0.90, "90_0_percentile", 10),\ |
---|
138 | + (0.95, "95_0_percentile", 20), (0.99, "99_0_percentile", 100),\ |
---|
139 | + (0.999, "99_9_percentile", 1000)] |
---|
140 | + |
---|
141 | + for percentile, percentilestring, minnumtoobserve in orderstatlist: |
---|
142 | + if count >= minnumtoobserve: |
---|
143 | + stats[percentilestring] = samples[int(percentile*count)] |
---|
144 | + else: |
---|
145 | + stats[percentilestring] = None |
---|
146 | + |
---|
147 | output[category] = stats |
---|
148 | return output |
---|
149 | |
---|
150 | } |
---|
151 | [stats.rst: now documents percentile modification in get_latencies |
---|
152 | wilcoxjg@gmail.com**20110519171041 |
---|
153 | Ignore-this: ab728a6f8d382a046c84e152f00c0171 |
---|
154 | ] hunk ./docs/stats.rst 137 |
---|
155 | 999 out of the last 1000 operations were faster than the |
---|
156 | given number, and is the same threshold used by Amazon's |
---|
157 | internal SLA, according to the Dynamo paper). |
---|
158 | + Percentiles are only reported in the case of a sufficient |
---|
159 | + number of observations for unambiguous interpretation. For |
---|
160 | + example, the 99.9th percentile is (at the level of thousandths |
---|
161 | + precision) 9 thousandths greater than the 99th |
---|
162 | + percentile for sample sizes greater than or equal to 1000, |
---|
163 | + thus the 99.9th percentile is only reported for samples of 1000 |
---|
164 | + or more observations. |
---|
165 | + |
---|
166 | |
---|
167 | **counters.uploader.files_uploaded** |
---|
168 | |
---|
169 | [interfaces.py: modified the return type of RIStatsProvider.get_stats to allow for None as a return value |
---|
170 | wilcoxjg@gmail.com**20110523222908 |
---|
171 | Ignore-this: 569051254e18b521faaba5203c93d10c |
---|
172 | ] hunk ./src/allmydata/interfaces.py 2398 |
---|
173 | stats are instantaneous measures (potentially time averaged |
---|
174 | internally) |
---|
175 | """ |
---|
176 | - return DictOf(str, DictOf(str, ChoiceOf(float, int, long))) |
---|
177 | + return DictOf(str, DictOf(str, ChoiceOf(float, int, long, None))) |
---|
178 | |
---|
179 | class RIStatsGatherer(RemoteInterface): |
---|
180 | __remote_name__ = "RIStatsGatherer.tahoe.allmydata.com" |
---|
181 | |
---|
182 | Context: |
---|
183 | |
---|
184 | [docs: revert link in relnotes.txt from NEWS.rst to NEWS, since the former did not exist at revision 5000. |
---|
185 | david-sarah@jacaranda.org**20110517011214 |
---|
186 | Ignore-this: 6a5be6e70241e3ec0575641f64343df7 |
---|
187 | ] |
---|
188 | [docs: convert NEWS to NEWS.rst and change all references to it. |
---|
189 | david-sarah@jacaranda.org**20110517010255 |
---|
190 | Ignore-this: a820b93ea10577c77e9c8206dbfe770d |
---|
191 | ] |
---|
192 | [docs: remove out-of-date docs/testgrid/introducer.furl and containing directory. fixes #1404 |
---|
193 | david-sarah@jacaranda.org**20110512140559 |
---|
194 | Ignore-this: 784548fc5367fac5450df1c46890876d |
---|
195 | ] |
---|
196 | [scripts/common.py: don't assume that the default alias is always 'tahoe' (it is, but the API of get_alias doesn't say so). refs #1342 |
---|
197 | david-sarah@jacaranda.org**20110130164923 |
---|
198 | Ignore-this: a271e77ce81d84bb4c43645b891d92eb |
---|
199 | ] |
---|
200 | [setup: don't catch all Exception from check_requirement(), but only PackagingError and ImportError |
---|
201 | zooko@zooko.com**20110128142006 |
---|
202 | Ignore-this: 57d4bc9298b711e4bc9dc832c75295de |
---|
203 | I noticed this because I had accidentally inserted a bug which caused AssertionError to be raised from check_requirement(). |
---|
204 | ] |
---|
205 | [M-x whitespace-cleanup |
---|
206 | zooko@zooko.com**20110510193653 |
---|
207 | Ignore-this: dea02f831298c0f65ad096960e7df5c7 |
---|
208 | ] |
---|
209 | [docs: fix typo in running.rst, thanks to arch_o_median |
---|
210 | zooko@zooko.com**20110510193633 |
---|
211 | Ignore-this: ca06de166a46abbc61140513918e79e8 |
---|
212 | ] |
---|
213 | [relnotes.txt: don't claim to work on Cygwin (which has been untested for some time). refs #1342 |
---|
214 | david-sarah@jacaranda.org**20110204204902 |
---|
215 | Ignore-this: 85ef118a48453d93fa4cddc32d65b25b |
---|
216 | ] |
---|
217 | [relnotes.txt: forseeable -> foreseeable. refs #1342 |
---|
218 | david-sarah@jacaranda.org**20110204204116 |
---|
219 | Ignore-this: 746debc4d82f4031ebf75ab4031b3a9 |
---|
220 | ] |
---|
221 | [replace remaining .html docs with .rst docs |
---|
222 | zooko@zooko.com**20110510191650 |
---|
223 | Ignore-this: d557d960a986d4ac8216d1677d236399 |
---|
224 | Remove install.html (long since deprecated). |
---|
225 | Also replace some obsolete references to install.html with references to quickstart.rst. |
---|
226 | Fix some broken internal references within docs/historical/historical_known_issues.txt. |
---|
227 | Thanks to Ravi Pinjala and Patrick McDonald. |
---|
228 | refs #1227 |
---|
229 | ] |
---|
230 | [docs: FTP-and-SFTP.rst: fix a minor error and update the information about which version of Twisted fixes #1297 |
---|
231 | zooko@zooko.com**20110428055232 |
---|
232 | Ignore-this: b63cfb4ebdbe32fb3b5f885255db4d39 |
---|
233 | ] |
---|
234 | [munin tahoe_files plugin: fix incorrect file count |
---|
235 | francois@ctrlaltdel.ch**20110428055312 |
---|
236 | Ignore-this: 334ba49a0bbd93b4a7b06a25697aba34 |
---|
237 | fixes #1391 |
---|
238 | ] |
---|
239 | [corrected "k must never be smaller than N" to "k must never be greater than N" |
---|
240 | secorp@allmydata.org**20110425010308 |
---|
241 | Ignore-this: 233129505d6c70860087f22541805eac |
---|
242 | ] |
---|
243 | [Fix a test failure in test_package_initialization on Python 2.4.x due to exceptions being stringified differently than in later versions of Python. refs #1389 |
---|
244 | david-sarah@jacaranda.org**20110411190738 |
---|
245 | Ignore-this: 7847d26bc117c328c679f08a7baee519 |
---|
246 | ] |
---|
247 | [tests: add test for including the ImportError message and traceback entry in the summary of errors from importing dependencies. refs #1389 |
---|
248 | david-sarah@jacaranda.org**20110410155844 |
---|
249 | Ignore-this: fbecdbeb0d06a0f875fe8d4030aabafa |
---|
250 | ] |
---|
251 | [allmydata/__init__.py: preserve the message and last traceback entry (file, line number, function, and source line) of ImportErrors in the package versions string. fixes #1389 |
---|
252 | david-sarah@jacaranda.org**20110410155705 |
---|
253 | Ignore-this: 2f87b8b327906cf8bfca9440a0904900 |
---|
254 | ] |
---|
255 | [remove unused variable detected by pyflakes |
---|
256 | zooko@zooko.com**20110407172231 |
---|
257 | Ignore-this: 7344652d5e0720af822070d91f03daf9 |
---|
258 | ] |
---|
259 | [allmydata/__init__.py: Nicer reporting of unparseable version numbers in dependencies. fixes #1388 |
---|
260 | david-sarah@jacaranda.org**20110401202750 |
---|
261 | Ignore-this: 9c6bd599259d2405e1caadbb3e0d8c7f |
---|
262 | ] |
---|
263 | [update FTP-and-SFTP.rst: the necessary patch is included in Twisted-10.1 |
---|
264 | Brian Warner <warner@lothar.com>**20110325232511 |
---|
265 | Ignore-this: d5307faa6900f143193bfbe14e0f01a |
---|
266 | ] |
---|
267 | [control.py: remove all uses of s.get_serverid() |
---|
268 | warner@lothar.com**20110227011203 |
---|
269 | Ignore-this: f80a787953bd7fa3d40e828bde00e855 |
---|
270 | ] |
---|
271 | [web: remove some uses of s.get_serverid(), not all |
---|
272 | warner@lothar.com**20110227011159 |
---|
273 | Ignore-this: a9347d9cf6436537a47edc6efde9f8be |
---|
274 | ] |
---|
275 | [immutable/downloader/fetcher.py: remove all get_serverid() calls |
---|
276 | warner@lothar.com**20110227011156 |
---|
277 | Ignore-this: fb5ef018ade1749348b546ec24f7f09a |
---|
278 | ] |
---|
279 | [immutable/downloader/fetcher.py: fix diversity bug in server-response handling |
---|
280 | warner@lothar.com**20110227011153 |
---|
281 | Ignore-this: bcd62232c9159371ae8a16ff63d22c1b |
---|
282 | |
---|
283 | When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the |
---|
284 | _shares_from_server dict was being popped incorrectly (using shnum as the |
---|
285 | index instead of serverid). I'm still thinking through the consequences of |
---|
286 | this bug. It was probably benign and really hard to detect. I think it would |
---|
287 | cause us to incorrectly believe that we're pulling too many shares from a |
---|
288 | server, and thus prefer a different server rather than asking for a second |
---|
289 | share from the first server. The diversity code is intended to spread out the |
---|
290 | number of shares simultaneously being requested from each server, but with |
---|
291 | this bug, it might be spreading out the total number of shares requested at |
---|
292 | all, not just simultaneously. (note that SegmentFetcher is scoped to a single |
---|
293 | segment, so the effect doesn't last very long). |
---|
294 | ] |
---|
295 | [immutable/downloader/share.py: reduce get_serverid(), one left, update ext deps |
---|
296 | warner@lothar.com**20110227011150 |
---|
297 | Ignore-this: d8d56dd8e7b280792b40105e13664554 |
---|
298 | |
---|
299 | test_download.py: create+check MyShare instances better, make sure they share |
---|
300 | Server objects, now that finder.py cares |
---|
301 | ] |
---|
302 | [immutable/downloader/finder.py: reduce use of get_serverid(), one left |
---|
303 | warner@lothar.com**20110227011146 |
---|
304 | Ignore-this: 5785be173b491ae8a78faf5142892020 |
---|
305 | ] |
---|
306 | [immutable/offloaded.py: reduce use of get_serverid() a bit more |
---|
307 | warner@lothar.com**20110227011142 |
---|
308 | Ignore-this: b48acc1b2ae1b311da7f3ba4ffba38f |
---|
309 | ] |
---|
310 | [immutable/upload.py: reduce use of get_serverid() |
---|
311 | warner@lothar.com**20110227011138 |
---|
312 | Ignore-this: ffdd7ff32bca890782119a6e9f1495f6 |
---|
313 | ] |
---|
314 | [immutable/checker.py: remove some uses of s.get_serverid(), not all |
---|
315 | warner@lothar.com**20110227011134 |
---|
316 | Ignore-this: e480a37efa9e94e8016d826c492f626e |
---|
317 | ] |
---|
318 | [add remaining get_* methods to storage_client.Server, NoNetworkServer, and |
---|
319 | warner@lothar.com**20110227011132 |
---|
320 | Ignore-this: 6078279ddf42b179996a4b53bee8c421 |
---|
321 | MockIServer stubs |
---|
322 | ] |
---|
323 | [upload.py: rearrange _make_trackers a bit, no behavior changes |
---|
324 | warner@lothar.com**20110227011128 |
---|
325 | Ignore-this: 296d4819e2af452b107177aef6ebb40f |
---|
326 | ] |
---|
327 | [happinessutil.py: finally rename merge_peers to merge_servers |
---|
328 | warner@lothar.com**20110227011124 |
---|
329 | Ignore-this: c8cd381fea1dd888899cb71e4f86de6e |
---|
330 | ] |
---|
331 | [test_upload.py: factor out FakeServerTracker |
---|
332 | warner@lothar.com**20110227011120 |
---|
333 | Ignore-this: 6c182cba90e908221099472cc159325b |
---|
334 | ] |
---|
335 | [test_upload.py: server-vs-tracker cleanup |
---|
336 | warner@lothar.com**20110227011115 |
---|
337 | Ignore-this: 2915133be1a3ba456e8603885437e03 |
---|
338 | ] |
---|
339 | [happinessutil.py: server-vs-tracker cleanup |
---|
340 | warner@lothar.com**20110227011111 |
---|
341 | Ignore-this: b856c84033562d7d718cae7cb01085a9 |
---|
342 | ] |
---|
343 | [upload.py: more tracker-vs-server cleanup |
---|
344 | warner@lothar.com**20110227011107 |
---|
345 | Ignore-this: bb75ed2afef55e47c085b35def2de315 |
---|
346 | ] |
---|
347 | [upload.py: fix var names to avoid confusion between 'trackers' and 'servers' |
---|
348 | warner@lothar.com**20110227011103 |
---|
349 | Ignore-this: 5d5e3415b7d2732d92f42413c25d205d |
---|
350 | ] |
---|
351 | [refactor: s/peer/server/ in immutable/upload, happinessutil.py, test_upload |
---|
352 | warner@lothar.com**20110227011100 |
---|
353 | Ignore-this: 7ea858755cbe5896ac212a925840fe68 |
---|
354 | |
---|
355 | No behavioral changes, just updating variable/method names and log messages. |
---|
356 | The effects outside these three files should be minimal: some exception |
---|
357 | messages changed (to say "server" instead of "peer"), and some internal class |
---|
358 | names were changed. A few things still use "peer" to minimize external |
---|
359 | changes, like UploadResults.timings["peer_selection"] and |
---|
360 | happinessutil.merge_peers, which can be changed later. |
---|
361 | ] |
---|
362 | [storage_client.py: clean up test_add_server/test_add_descriptor, remove .test_servers |
---|
363 | warner@lothar.com**20110227011056 |
---|
364 | Ignore-this: efad933e78179d3d5fdcd6d1ef2b19cc |
---|
365 | ] |
---|
366 | [test_client.py, upload.py:: remove KiB/MiB/etc constants, and other dead code |
---|
367 | warner@lothar.com**20110227011051 |
---|
368 | Ignore-this: dc83c5794c2afc4f81e592f689c0dc2d |
---|
369 | ] |
---|
370 | [test: increase timeout on a network test because Francois's ARM machine hit that timeout |
---|
371 | zooko@zooko.com**20110317165909 |
---|
372 | Ignore-this: 380c345cdcbd196268ca5b65664ac85b |
---|
373 | I'm skeptical that the test was proceeding correctly but ran out of time. It seems more likely that it had gotten hung. But if we raise the timeout to an even more extravagant number then we can be even more certain that the test was never going to finish. |
---|
374 | ] |
---|
375 | [docs/configuration.rst: add a "Frontend Configuration" section |
---|
376 | Brian Warner <warner@lothar.com>**20110222014323 |
---|
377 | Ignore-this: 657018aa501fe4f0efef9851628444ca |
---|
378 | |
---|
379 | this points to docs/frontends/*.rst, which were previously underlinked |
---|
380 | ] |
---|
381 | [web/filenode.py: avoid calling req.finish() on closed HTTP connections. Closes #1366 |
---|
382 | "Brian Warner <warner@lothar.com>"**20110221061544 |
---|
383 | Ignore-this: 799d4de19933f2309b3c0c19a63bb888 |
---|
384 | ] |
---|
385 | [Add unit tests for cross_check_pkg_resources_versus_import, and a regression test for ref #1355. This requires a little refactoring to make it testable. |
---|
386 | david-sarah@jacaranda.org**20110221015817 |
---|
387 | Ignore-this: 51d181698f8c20d3aca58b057e9c475a |
---|
388 | ] |
---|
389 | [allmydata/__init__.py: .name was used in place of the correct .__name__ when printing an exception. Also, robustify string formatting by using %r instead of %s in some places. fixes #1355. |
---|
390 | david-sarah@jacaranda.org**20110221020125 |
---|
391 | Ignore-this: b0744ed58f161bf188e037bad077fc48 |
---|
392 | ] |
---|
393 | [Refactor StorageFarmBroker handling of servers |
---|
394 | Brian Warner <warner@lothar.com>**20110221015804 |
---|
395 | Ignore-this: 842144ed92f5717699b8f580eab32a51 |
---|
396 | |
---|
397 | Pass around IServer instance instead of (peerid, rref) tuple. Replace |
---|
398 | "descriptor" with "server". Other replacements: |
---|
399 | |
---|
400 | get_all_servers -> get_connected_servers/get_known_servers |
---|
401 | get_servers_for_index -> get_servers_for_psi (now returns IServers) |
---|
402 | |
---|
403 | This change still needs to be pushed further down: lots of code is now |
---|
404 | getting the IServer and then distributing (peerid, rref) internally. |
---|
405 | Instead, it ought to distribute the IServer internally and delay |
---|
406 | extracting a serverid or rref until the last moment. |
---|
407 | |
---|
408 | no_network.py was updated to retain parallelism. |
---|
409 | ] |
---|
410 | [TAG allmydata-tahoe-1.8.2 |
---|
411 | warner@lothar.com**20110131020101] |
---|
412 | Patch bundle hash: |
---|
413 | 090cdfa0b5cf52ed6bf5531bfc093a7723055edc |
---|