source: trunk/src/allmydata/test/common.py

Last change on this file was 1cfe843d, checked in by Alexandre Detiste <alexandre.detiste@…>, at 2024-02-22T23:40:25Z

more python2 removal

  • Property mode set to 100644
File size: 52.5 KB
Line 
1"""
2Functionality related to a lot of the test suite.
3"""
4from __future__ import annotations
5
6from past.builtins import chr as byteschr
7
8__all__ = [
9    "SyncTestCase",
10    "AsyncTestCase",
11    "AsyncBrokenTestCase",
12    "TrialTestCase",
13
14    "flush_logged_errors",
15    "skip",
16    "skipIf",
17
18    # Selected based on platform and re-exported for convenience.
19    "Popen",
20    "PIPE",
21]
22
23import sys
24import os, random, struct
25from contextlib import contextmanager
26import six
27import tempfile
28from tempfile import mktemp
29from functools import partial
30from unittest import case as _case
31from socket import (
32    AF_INET,
33    SOCK_STREAM,
34    SOMAXCONN,
35    socket,
36    error as socket_error,
37)
38from errno import (
39    EADDRINUSE,
40)
41
42import attr
43
44import treq
45
46from zope.interface import implementer
47
48from testtools import (
49    TestCase,
50    skip,
51    skipIf,
52)
53from testtools.twistedsupport import (
54    SynchronousDeferredRunTest,
55    AsynchronousDeferredRunTest,
56    AsynchronousDeferredRunTestForBrokenTwisted,
57    flush_logged_errors,
58)
59
60from twisted.application import service
61from twisted.plugin import IPlugin
62from twisted.internet import defer
63from twisted.internet.defer import inlineCallbacks, returnValue
64from twisted.internet.interfaces import IPullProducer
65from twisted.python import failure
66from twisted.python.filepath import FilePath
67from twisted.web.error import Error as WebError
68from twisted.internet.interfaces import (
69    IStreamServerEndpointStringParser,
70    IReactorSocket,
71)
72from twisted.internet.endpoints import AdoptedStreamServerEndpoint
73from twisted.trial.unittest import TestCase as _TrialTestCase
74
75from allmydata import uri
76from allmydata.interfaces import (
77    IMutableFileNode,
78    IImmutableFileNode,
79    NotEnoughSharesError,
80    ICheckable,
81    IMutableUploadable,
82    SDMF_VERSION,
83    MDMF_VERSION,
84    IAddressFamily,
85    NoSpace,
86)
87from allmydata.check_results import CheckResults, CheckAndRepairResults, \
88     DeepCheckResults, DeepCheckAndRepairResults
89from allmydata.storage_client import StubServer
90from allmydata.mutable.layout import unpack_header
91from allmydata.mutable.publish import MutableData
92from allmydata.storage.mutable import MutableShareFile
93from allmydata.util import hashutil, log, iputil
94from allmydata.util.assertutil import precondition
95from allmydata.util.consumer import download_to_data
96import allmydata.test.common_util as testutil
97from allmydata.immutable.upload import Uploader
98from allmydata.client import (
99    config_from_string,
100    create_client_from_config,
101)
102from allmydata.scripts.common import (
103    write_introducer,
104    )
105
106from ..crypto import (
107    ed25519,
108    rsa,
109)
110from .eliotutil import (
111    EliotLoggedRunTest,
112)
113from .common_util import ShouldFailMixin  # noqa: F401
114
115from subprocess import (
116    Popen,
117    PIPE,
118)
119
120# Is the process running as an OS user with elevated privileges (ie, root)?
121# We only know how to determine this for POSIX systems.
122superuser = getattr(os, "getuid", lambda: -1)() == 0
123
124EMPTY_CLIENT_CONFIG = config_from_string(
125    "/dev/null",
126    "tub.port",
127    ""
128)
129
130@attr.s
131class FakeDisk(object):
132    """
133    Just enough of a disk to be able to report free / used information.
134    """
135    total = attr.ib()
136    used = attr.ib()
137
138    def use(self, num_bytes):
139        """
140        Mark some amount of available bytes as used (and no longer available).
141
142        :param int num_bytes: The number of bytes to use.
143
144        :raise NoSpace: If there are fewer bytes available than ``num_bytes``.
145
146        :return: ``None``
147        """
148        if num_bytes > self.total - self.used:
149            raise NoSpace()
150        self.used += num_bytes
151
152    @property
153    def available(self):
154        return self.total - self.used
155
156    def get_disk_stats(self, whichdir, reserved_space):
157        avail = self.available
158        return {
159            'total': self.total,
160            'free_for_root': avail,
161            'free_for_nonroot': avail,
162            'used': self.used,
163            'avail': avail - reserved_space,
164        }
165
166
167@attr.s
168class MemoryIntroducerClient(object):
169    """
170    A model-only (no behavior) stand-in for ``IntroducerClient``.
171    """
172    tub = attr.ib()
173    introducer_furl = attr.ib()
174    nickname = attr.ib()
175    my_version = attr.ib()
176    oldest_supported = attr.ib()
177    sequencer = attr.ib()
178    cache_filepath = attr.ib()
179
180    subscribed_to : list[Subscription] = attr.ib(default=attr.Factory(list))
181    published_announcements : list[Announcement] = attr.ib(default=attr.Factory(list))
182
183
184    def setServiceParent(self, parent):
185        pass
186
187
188    def subscribe_to(self, service_name, cb, *args, **kwargs):
189        self.subscribed_to.append(Subscription(service_name, cb, args, kwargs))
190
191
192    def publish(self, service_name, ann, signing_key):
193        self.published_announcements.append(Announcement(
194            service_name,
195            ann,
196            ed25519.string_from_signing_key(signing_key),
197        ))
198
199
200@attr.s
201class Subscription(object):
202    """
203    A model of an introducer subscription.
204    """
205    service_name = attr.ib()
206    cb = attr.ib()
207    args = attr.ib()
208    kwargs = attr.ib()
209
210
211@attr.s
212class Announcement(object):
213    """
214    A model of an introducer announcement.
215    """
216    service_name = attr.ib()
217    ann = attr.ib()
218    signing_key_bytes = attr.ib(type=bytes)
219
220    @property
221    def signing_key(self):
222        return ed25519.signing_keypair_from_string(self.signing_key_bytes)[0]
223
224
225def get_published_announcements(client):
226    """
227    Get a flattened list of all announcements sent using all introducer
228    clients.
229    """
230    return list(
231        announcement
232        for introducer_client
233        in client.introducer_clients
234        for announcement
235        in introducer_client.published_announcements
236    )
237
238
239class UseTestPlugins(object):
240    """
241    A fixture which enables loading Twisted plugins from the Tahoe-LAFS test
242    suite.
243    """
244    def setUp(self):
245        """
246        Add the testing package ``plugins`` directory to the ``twisted.plugins``
247        aggregate package.
248        """
249        import twisted.plugins
250        testplugins = FilePath(__file__).sibling("plugins")
251        twisted.plugins.__path__.insert(0, testplugins.path)
252
253    def cleanUp(self):
254        """
255        Remove the testing package ``plugins`` directory from the
256        ``twisted.plugins`` aggregate package.
257        """
258        import twisted.plugins
259        testplugins = FilePath(__file__).sibling("plugins")
260        twisted.plugins.__path__.remove(testplugins.path)
261
262    def getDetails(self):
263        return {}
264
265
266@attr.s
267class UseNode(object):
268    """
269    A fixture which creates a client node.
270
271    :ivar dict[bytes, bytes] plugin_config: Configuration items to put in the
272        node's configuration.
273
274    :ivar bytes storage_plugin: The name of a storage plugin to enable.
275
276    :ivar FilePath basedir: The base directory of the node.
277
278    :ivar str introducer_furl: The introducer furl with which to
279        configure the client.
280
281    :ivar dict[bytes, bytes] node_config: Configuration items for the *node*
282        section of the configuration.
283
284    :ivar _Config config: The complete resulting configuration.
285    """
286    plugin_config = attr.ib()
287    storage_plugin = attr.ib()
288    basedir = attr.ib(validator=attr.validators.instance_of(FilePath))
289    introducer_furl = attr.ib(validator=attr.validators.instance_of(str),
290                              converter=six.ensure_str)
291    node_config : dict[bytes,bytes] = attr.ib(default=attr.Factory(dict))
292
293    config = attr.ib(default=None)
294    reactor = attr.ib(default=None)
295
296    def setUp(self):
297        self.assigner = SameProcessStreamEndpointAssigner()
298        self.assigner.setUp()
299
300        def format_config_items(config):
301            return "\n".join(
302                " = ".join((key, value))
303                for (key, value)
304                in list(config.items())
305            )
306
307        if self.plugin_config is None:
308            plugin_config_section = ""
309        else:
310            plugin_config_section = (
311                "[storageclient.plugins.{storage_plugin}]\n"
312                "{config}\n").format(
313                    storage_plugin=self.storage_plugin,
314                    config=format_config_items(self.plugin_config),
315                )
316
317        if self.storage_plugin is None:
318            plugins = ""
319        else:
320            plugins = "storage.plugins = {}".format(self.storage_plugin)
321
322        write_introducer(
323            self.basedir,
324            "default",
325            self.introducer_furl,
326        )
327
328        node_config = self.node_config.copy()
329        if "tub.port" not in node_config:
330            if "tub.location" in node_config:
331                raise ValueError(
332                    "UseNode fixture does not support specifying tub.location "
333                    "without tub.port"
334                )
335
336            # Don't use the normal port auto-assignment logic.  It produces
337            # collisions and makes tests fail spuriously.
338            tub_location, tub_endpoint = self.assigner.assign(self.reactor)
339            node_config.update({
340                "tub.port": tub_endpoint,
341                "tub.location": tub_location,
342            })
343
344        self.config = config_from_string(
345            self.basedir.asTextMode().path,
346            "tub.port",
347            "[node]\n"
348            "{node_config}\n"
349            "\n"
350            "[client]\n"
351            "{plugins}\n"
352            "{plugin_config_section}\n"
353            .format(
354                plugins=plugins,
355                node_config=format_config_items(node_config),
356                plugin_config_section=plugin_config_section,
357            )
358        )
359
360    def create_node(self):
361        return create_client_from_config(
362            self.config,
363            _introducer_factory=MemoryIntroducerClient,
364        )
365
366    def cleanUp(self):
367        self.assigner.tearDown()
368
369
370    def getDetails(self):
371        return {}
372
373
374
375@implementer(IPlugin, IStreamServerEndpointStringParser)
376class AdoptedServerPort(object):
377    """
378    Parse an ``adopt-socket:<fd>`` endpoint description by adopting ``fd`` as
379    a listening TCP port.
380    """
381    prefix = "adopt-socket"
382
383    def parseStreamServer(self, reactor, fd): # type: ignore # https://twistedmatrix.com/trac/ticket/10134
384        log.msg("Adopting {}".format(fd))
385        # AdoptedStreamServerEndpoint wants to own the file descriptor.  It
386        # will duplicate it and then close the one we pass in.  This means it
387        # is really only possible to adopt a particular file descriptor once.
388        #
389        # This wouldn't matter except one of the tests wants to stop one of
390        # the nodes and start it up again.  This results in exactly an attempt
391        # to adopt a particular file descriptor twice.
392        #
393        # So we'll dup it ourselves.  AdoptedStreamServerEndpoint can do
394        # whatever it wants to the result - the original will still be valid
395        # and reusable.
396        return AdoptedStreamServerEndpoint(reactor, os.dup(int(fd)), AF_INET)
397
398
399def really_bind(s, addr):
400    # Arbitrarily decide we'll try 100 times.  We don't want to try forever in
401    # case this is a persistent problem.  Trying is cheap, though, so we may
402    # as well try a lot.  Hopefully the OS isn't so bad at allocating a port
403    # for us that it takes more than 2 iterations.
404    for i in range(100):
405        try:
406            s.bind(addr)
407        except socket_error as e:
408            if e.errno == EADDRINUSE:
409                continue
410            raise
411        else:
412            return
413    raise Exception("Many bind attempts failed with EADDRINUSE")
414
415
416class SameProcessStreamEndpointAssigner(object):
417    """
418    A fixture which can assign streaming server endpoints for use *in this
419    process only*.
420
421    An effort is made to avoid address collisions for this port but the logic
422    for doing so is platform-dependent (sorry, Windows).
423
424    This is more reliable than trying to listen on a hard-coded non-zero port
425    number.  It is at least as reliable as trying to listen on port number
426    zero on Windows and more reliable than doing that on other platforms.
427    """
428    def setUp(self):
429        self._cleanups = []
430        # Make sure the `adopt-socket` endpoint is recognized.  We do this
431        # instead of providing a dropin because we don't want to make this
432        # endpoint available to random other applications.
433        f = UseTestPlugins()
434        f.setUp()
435        self._cleanups.append(f.cleanUp)
436
437    def tearDown(self):
438        for c in self._cleanups:
439            c()
440
441    def assign(self, reactor):
442        """
443        Make a new streaming server endpoint and return its string description.
444
445        This is intended to help write config files that will then be read and
446        used in this process.
447
448        :param reactor: The reactor which will be used to listen with the
449            resulting endpoint.  If it provides ``IReactorSocket`` then
450            resulting reliability will be extremely high.  If it doesn't,
451            resulting reliability will be pretty alright.
452
453        :return: A two-tuple of (location hint, port endpoint description) as
454            strings.
455        """
456        if sys.platform != "win32" and IReactorSocket.providedBy(reactor):
457            # On this platform, we can reliable pre-allocate a listening port.
458            # Once it is bound we know it will not fail later with EADDRINUSE.
459            s = socket(AF_INET, SOCK_STREAM)
460            # We need to keep ``s`` alive as long as the file descriptor we put in
461            # this string might still be used.  We could dup() the descriptor
462            # instead but then we've only inverted the cleanup problem: gone from
463            # don't-close-too-soon to close-just-late-enough.  So we'll leave
464            # ``s`` alive and use it as the cleanup mechanism.
465            self._cleanups.append(s.close)
466            s.setblocking(False)
467            really_bind(s, ("127.0.0.1", 0))
468            s.listen(SOMAXCONN)
469            host, port = s.getsockname()
470            location_hint = "tcp:%s:%d" % (host, port)
471            port_endpoint = "adopt-socket:fd=%d" % (s.fileno(),)
472        else:
473            # On other platforms, we blindly guess and hope we get lucky.
474            portnum = iputil.allocate_tcp_port()
475            location_hint = "tcp:127.0.0.1:%d" % (portnum,)
476            port_endpoint = "tcp:%d:interface=127.0.0.1" % (portnum,)
477
478        return location_hint, port_endpoint
479
480@implementer(IPullProducer)
481class DummyProducer(object):
482    def resumeProducing(self):
483        pass
484
485    def stopProducing(self):
486        pass
487
488@implementer(IImmutableFileNode)
489class FakeCHKFileNode(object):  # type: ignore # incomplete implementation
490    """I provide IImmutableFileNode, but all of my data is stored in a
491    class-level dictionary."""
492
493    def __init__(self, filecap, all_contents):
494        precondition(isinstance(filecap, (uri.CHKFileURI, uri.LiteralFileURI)), filecap)
495        self.all_contents = all_contents
496        self.my_uri = filecap
497        self.storage_index = self.my_uri.get_storage_index()
498
499    def get_uri(self):
500        return self.my_uri.to_string()
501    def get_write_uri(self):
502        return None
503    def get_readonly_uri(self):
504        return self.my_uri.to_string()
505    def get_cap(self):
506        return self.my_uri
507    def get_verify_cap(self):
508        return self.my_uri.get_verify_cap()
509    def get_repair_cap(self):
510        return self.my_uri.get_verify_cap()
511    def get_storage_index(self):
512        return self.storage_index
513
514    def check(self, monitor, verify=False, add_lease=False):
515        s = StubServer(b"\x00"*20)
516        r = CheckResults(self.my_uri, self.storage_index,
517                         healthy=True, recoverable=True,
518                         count_happiness=10,
519                         count_shares_needed=3,
520                         count_shares_expected=10,
521                         count_shares_good=10,
522                         count_good_share_hosts=10,
523                         count_recoverable_versions=1,
524                         count_unrecoverable_versions=0,
525                         servers_responding=[s],
526                         sharemap={1: [s]},
527                         count_wrong_shares=0,
528                         list_corrupt_shares=[],
529                         count_corrupt_shares=0,
530                         list_incompatible_shares=[],
531                         count_incompatible_shares=0,
532                         summary="",
533                         report=[],
534                         share_problems=[],
535                         servermap=None)
536        return defer.succeed(r)
537    def check_and_repair(self, monitor, verify=False, add_lease=False):
538        d = self.check(verify)
539        def _got(cr):
540            r = CheckAndRepairResults(self.storage_index)
541            r.pre_repair_results = r.post_repair_results = cr
542            return r
543        d.addCallback(_got)
544        return d
545
546    def is_mutable(self):
547        return False
548    def is_readonly(self):
549        return True
550    def is_unknown(self):
551        return False
552    def is_allowed_in_immutable_directory(self):
553        return True
554    def raise_error(self):
555        pass
556
557    def get_size(self):
558        if isinstance(self.my_uri, uri.LiteralFileURI):
559            return self.my_uri.get_size()
560        try:
561            data = self.all_contents[self.my_uri.to_string()]
562        except KeyError as le:
563            raise NotEnoughSharesError(le, 0, 3)
564        return len(data)
565    def get_current_size(self):
566        return defer.succeed(self.get_size())
567
568    def read(self, consumer, offset=0, size=None):
569        # we don't bother to call registerProducer/unregisterProducer,
570        # because it's a hassle to write a dummy Producer that does the right
571        # thing (we have to make sure that DummyProducer.resumeProducing
572        # writes the data into the consumer immediately, otherwise it will
573        # loop forever).
574
575        d = defer.succeed(None)
576        d.addCallback(self._read, consumer, offset, size)
577        return d
578
579    def _read(self, ignored, consumer, offset, size):
580        if isinstance(self.my_uri, uri.LiteralFileURI):
581            data = self.my_uri.data
582        else:
583            if self.my_uri.to_string() not in self.all_contents:
584                raise NotEnoughSharesError(None, 0, 3)
585            data = self.all_contents[self.my_uri.to_string()]
586        start = offset
587        if size is not None:
588            end = offset + size
589        else:
590            end = len(data)
591        consumer.write(data[start:end])
592        return consumer
593
594
595    def get_best_readable_version(self):
596        return defer.succeed(self)
597
598
599    def download_to_data(self):
600        return download_to_data(self)
601
602
603    download_best_version = download_to_data
604
605
606    def get_size_of_best_version(self):
607        return defer.succeed(self.get_size)
608
609
610def make_chk_file_cap(size):
611    return uri.CHKFileURI(key=os.urandom(16),
612                          uri_extension_hash=os.urandom(32),
613                          needed_shares=3,
614                          total_shares=10,
615                          size=size)
616def make_chk_file_uri(size):
617    return make_chk_file_cap(size).to_string()
618
619def create_chk_filenode(contents, all_contents):
620    filecap = make_chk_file_cap(len(contents))
621    n = FakeCHKFileNode(filecap, all_contents)
622    all_contents[filecap.to_string()] = contents
623    return n
624
625
626@implementer(IMutableFileNode, ICheckable)
627class FakeMutableFileNode(object):  # type: ignore # incomplete implementation
628    """I provide IMutableFileNode, but all of my data is stored in a
629    class-level dictionary."""
630
631    MUTABLE_SIZELIMIT = 10000
632
633    _public_key: rsa.PublicKey | None
634    _private_key: rsa.PrivateKey | None
635
636    def __init__(self,
637                 storage_broker,
638                 secret_holder,
639                 default_encoding_parameters,
640                 history,
641                 all_contents,
642                 keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None
643                ):
644        self.all_contents = all_contents
645        self.file_types: dict[bytes, int] = {} # storage index => MDMF_VERSION or SDMF_VERSION
646        self.init_from_cap(make_mutable_file_cap(keypair))
647        self._k = default_encoding_parameters['k']
648        self._segsize = default_encoding_parameters['max_segment_size']
649        if keypair is None:
650            self._public_key = self._private_key = None
651        else:
652            self._public_key, self._private_key = keypair
653
654    def create(self, contents, version=SDMF_VERSION):
655        if version == MDMF_VERSION and \
656            isinstance(self.my_uri, (uri.ReadonlySSKFileURI,
657                                 uri.WriteableSSKFileURI)):
658            self.init_from_cap(make_mdmf_mutable_file_cap())
659        self.file_types[self.storage_index] = version
660        initial_contents = self._get_initial_contents(contents)
661        data = initial_contents.read(initial_contents.get_size())
662        data = b"".join(data)
663        self.all_contents[self.storage_index] = data
664        return defer.succeed(self)
665    def _get_initial_contents(self, contents):
666        if contents is None:
667            return MutableData(b"")
668
669        if IMutableUploadable.providedBy(contents):
670            return contents
671
672        assert callable(contents), "%s should be callable, not %s" % \
673               (contents, type(contents))
674        return contents(self)
675    def init_from_cap(self, filecap):
676        assert isinstance(filecap, (uri.WriteableSSKFileURI,
677                                    uri.ReadonlySSKFileURI,
678                                    uri.WriteableMDMFFileURI,
679                                    uri.ReadonlyMDMFFileURI))
680        self.my_uri = filecap
681        self.storage_index = self.my_uri.get_storage_index()
682        if isinstance(filecap, (uri.WriteableMDMFFileURI,
683                                uri.ReadonlyMDMFFileURI)):
684            self.file_types[self.storage_index] = MDMF_VERSION
685
686        else:
687            self.file_types[self.storage_index] = SDMF_VERSION
688
689        return self
690    def get_cap(self):
691        return self.my_uri
692    def get_readcap(self):
693        return self.my_uri.get_readonly()
694    def get_uri(self):
695        return self.my_uri.to_string()
696    def get_write_uri(self):
697        if self.is_readonly():
698            return None
699        return self.my_uri.to_string()
700    def get_readonly(self):
701        return self.my_uri.get_readonly()
702    def get_readonly_uri(self):
703        return self.my_uri.get_readonly().to_string()
704    def get_verify_cap(self):
705        return self.my_uri.get_verify_cap()
706    def get_repair_cap(self):
707        if self.my_uri.is_readonly():
708            return None
709        return self.my_uri
710    def is_readonly(self):
711        return self.my_uri.is_readonly()
712    def is_mutable(self):
713        return self.my_uri.is_mutable()
714    def is_unknown(self):
715        return False
716    def is_allowed_in_immutable_directory(self):
717        return not self.my_uri.is_mutable()
718    def raise_error(self):
719        pass
720    def get_writekey(self):
721        return b"\x00"*16
722    def get_size(self):
723        return len(self.all_contents[self.storage_index])
724    def get_current_size(self):
725        return self.get_size_of_best_version()
726    def get_size_of_best_version(self):
727        return defer.succeed(len(self.all_contents[self.storage_index]))
728
729    def get_storage_index(self):
730        return self.storage_index
731
732    def get_servermap(self, mode):
733        return defer.succeed(None)
734
735    def get_version(self):
736        assert self.storage_index in self.file_types
737        return self.file_types[self.storage_index]
738
739    def check(self, monitor, verify=False, add_lease=False):
740        s = StubServer(b"\x00"*20)
741        r = CheckResults(self.my_uri, self.storage_index,
742                         healthy=True, recoverable=True,
743                         count_happiness=10,
744                         count_shares_needed=3,
745                         count_shares_expected=10,
746                         count_shares_good=10,
747                         count_good_share_hosts=10,
748                         count_recoverable_versions=1,
749                         count_unrecoverable_versions=0,
750                         servers_responding=[s],
751                         sharemap={b"seq1-abcd-sh0": [s]},
752                         count_wrong_shares=0,
753                         list_corrupt_shares=[],
754                         count_corrupt_shares=0,
755                         list_incompatible_shares=[],
756                         count_incompatible_shares=0,
757                         summary="",
758                         report=[],
759                         share_problems=[],
760                         servermap=None)
761        return defer.succeed(r)
762
763    def check_and_repair(self, monitor, verify=False, add_lease=False):
764        d = self.check(verify)
765        def _got(cr):
766            r = CheckAndRepairResults(self.storage_index)
767            r.pre_repair_results = r.post_repair_results = cr
768            return r
769        d.addCallback(_got)
770        return d
771
772    def deep_check(self, verify=False, add_lease=False):
773        d = self.check(verify)
774        def _done(r):
775            dr = DeepCheckResults(self.storage_index)
776            dr.add_check(r, [])
777            return dr
778        d.addCallback(_done)
779        return d
780
781    def deep_check_and_repair(self, verify=False, add_lease=False):
782        d = self.check_and_repair(verify)
783        def _done(r):
784            dr = DeepCheckAndRepairResults(self.storage_index)
785            dr.add_check(r, [])
786            return dr
787        d.addCallback(_done)
788        return d
789
790    def download_best_version(self):
791        return defer.succeed(self._download_best_version())
792
793
794    def _download_best_version(self, ignored=None):
795        if isinstance(self.my_uri, uri.LiteralFileURI):
796            return self.my_uri.data
797        if self.storage_index not in self.all_contents:
798            raise NotEnoughSharesError(None, 0, 3)
799        return self.all_contents[self.storage_index]
800
801
802    def overwrite(self, new_contents):
803        assert not self.is_readonly()
804        new_data = new_contents.read(new_contents.get_size())
805        new_data = b"".join(new_data)
806        self.all_contents[self.storage_index] = new_data
807        return defer.succeed(None)
808    def modify(self, modifier):
809        # this does not implement FileTooLargeError, but the real one does
810        return defer.maybeDeferred(self._modify, modifier)
811    def _modify(self, modifier):
812        assert not self.is_readonly()
813        old_contents = self.all_contents[self.storage_index]
814        new_data = modifier(old_contents, None, True)
815        self.all_contents[self.storage_index] = new_data
816        return None
817
818    # As actually implemented, MutableFilenode and MutableFileVersion
819    # are distinct. However, nothing in the webapi uses (yet) that
820    # distinction -- it just uses the unified download interface
821    # provided by get_best_readable_version and read. When we start
822    # doing cooler things like LDMF, we will want to revise this code to
823    # be less simplistic.
824    def get_best_readable_version(self):
825        return defer.succeed(self)
826
827
828    def get_best_mutable_version(self):
829        return defer.succeed(self)
830
831    # Ditto for this, which is an implementation of IWriteable.
832    # XXX: Declare that the same is implemented.
833    def update(self, data, offset):
834        assert not self.is_readonly()
835        def modifier(old, servermap, first_time):
836            new = old[:offset] + b"".join(data.read(data.get_size()))
837            new += old[len(new):]
838            return new
839        return self.modify(modifier)
840
841
842    def read(self, consumer, offset=0, size=None):
843        data = self._download_best_version()
844        if size:
845            data = data[offset:offset+size]
846        consumer.write(data)
847        return defer.succeed(consumer)
848
849
850def make_mutable_file_cap(
851        keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None = None,
852) -> uri.WriteableSSKFileURI:
853    """
854    Create a local representation of a mutable object.
855
856    :param keypair: If None, a random keypair will be generated for the new
857        object.  Otherwise, this is the keypair for that object.
858    """
859    if keypair is None:
860        writekey = os.urandom(16)
861        fingerprint = os.urandom(32)
862    else:
863        pubkey, privkey = keypair
864        pubkey_s = rsa.der_string_from_verifying_key(pubkey)
865        privkey_s = rsa.der_string_from_signing_key(privkey)
866        writekey = hashutil.ssk_writekey_hash(privkey_s)
867        fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s)
868
869    return uri.WriteableSSKFileURI(
870        writekey=writekey, fingerprint=fingerprint,
871    )
872
873def make_mdmf_mutable_file_cap():
874    return uri.WriteableMDMFFileURI(writekey=os.urandom(16),
875                                   fingerprint=os.urandom(32))
876
877def make_mutable_file_uri(mdmf=False):
878    if mdmf:
879        uri = make_mdmf_mutable_file_cap()
880    else:
881        uri = make_mutable_file_cap()
882
883    return uri.to_string()
884
885def make_verifier_uri():
886    return uri.SSKVerifierURI(storage_index=os.urandom(16),
887                              fingerprint=os.urandom(32)).to_string()
888
889def create_mutable_filenode(contents, mdmf=False, all_contents=None):
890    # XXX: All of these arguments are kind of stupid.
891    if mdmf:
892        cap = make_mdmf_mutable_file_cap()
893    else:
894        cap = make_mutable_file_cap()
895
896    encoding_params = {}
897    encoding_params['k'] = 3
898    encoding_params['max_segment_size'] = 128*1024
899
900    filenode = FakeMutableFileNode(None, None, encoding_params, None,
901                                   all_contents, None)
902    filenode.init_from_cap(cap)
903    if mdmf:
904        filenode.create(MutableData(contents), version=MDMF_VERSION)
905    else:
906        filenode.create(MutableData(contents), version=SDMF_VERSION)
907    return filenode
908
909
910class LoggingServiceParent(service.MultiService):
911    def log(self, *args, **kwargs):
912        return log.msg(*args, **kwargs)
913
914
915TEST_DATA=b"\x02"*(Uploader.URI_LIT_SIZE_THRESHOLD+1)
916
917
918class WebErrorMixin(object):
919    def explain_web_error(self, f):
920        # an error on the server side causes the client-side getPage() to
921        # return a failure(t.web.error.Error), and its str() doesn't show the
922        # response body, which is where the useful information lives. Attach
923        # this method as an errback handler, and it will reveal the hidden
924        # message.
925        f.trap(WebError)
926        print("Web Error:", f.value, ":", f.value.response)
927        return f
928
929    def _shouldHTTPError(self, res, which, validator):
930        if isinstance(res, failure.Failure):
931            res.trap(WebError)
932            return validator(res)
933        else:
934            self.fail("%s was supposed to Error, not get '%s'" % (which, res))
935
936    def shouldHTTPError(self, which,
937                        code=None, substring=None, response_substring=None,
938                        callable=None, *args, **kwargs):
939        # returns a Deferred with the response body
940        if isinstance(substring, bytes):
941            substring = str(substring, "ascii")
942        if isinstance(response_substring, str):
943            response_substring = response_substring.encode("ascii")
944        assert substring is None or isinstance(substring, str)
945        assert response_substring is None or isinstance(response_substring, bytes)
946        assert callable
947        def _validate(f):
948            if code is not None:
949                self.failUnlessEqual(f.value.status, b"%d" % code, which)
950            if substring:
951                code_string = str(f)
952                self.failUnless(substring in code_string,
953                                "%s: substring '%s' not in '%s'"
954                                % (which, substring, code_string))
955            response_body = f.value.response
956            if response_substring:
957                self.failUnless(response_substring in response_body,
958                                "%r: response substring %r not in %r"
959                                % (which, response_substring, response_body))
960            return response_body
961        d = defer.maybeDeferred(callable, *args, **kwargs)
962        d.addBoth(self._shouldHTTPError, which, _validate)
963        return d
964
965    @inlineCallbacks
966    def assertHTTPError(self, url, code, response_substring,
967                        method="get", persistent=False,
968                        **args):
969        response = yield treq.request(method, url, persistent=persistent,
970                                      **args)
971        body = yield response.content()
972        self.assertEquals(response.code, code)
973        if response_substring is not None:
974            if isinstance(response_substring, str):
975                response_substring = response_substring.encode("utf-8")
976            self.assertIn(response_substring, body)
977        returnValue(body)
978
979class ErrorMixin(WebErrorMixin):
980    def explain_error(self, f):
981        if f.check(defer.FirstError):
982            print("First Error:", f.value.subFailure)
983        return f
984
985def corrupt_field(data, offset, size, debug=False):
986    if random.random() < 0.5:
987        newdata = testutil.flip_one_bit(data, offset, size)
988        if debug:
989            log.msg("testing: corrupting offset %d, size %d flipping one bit orig: %r, newdata: %r" % (offset, size, data[offset:offset+size], newdata[offset:offset+size]))
990        return newdata
991    else:
992        newval = testutil.insecurerandstr(size)
993        if debug:
994            log.msg("testing: corrupting offset %d, size %d randomizing field, orig: %r, newval: %r" % (offset, size, data[offset:offset+size], newval))
995        return data[:offset]+newval+data[offset+size:]
996
997def _corrupt_nothing(data, debug=False):
998    """Leave the data pristine. """
999    return data
1000
1001def _corrupt_file_version_number(data, debug=False):
1002    """Scramble the file data -- the share file version number have one bit
1003    flipped or else will be changed to a random value."""
1004    return corrupt_field(data, 0x00, 4)
1005
1006def _corrupt_size_of_file_data(data, debug=False):
1007    """Scramble the file data -- the field showing the size of the share data
1008    within the file will be set to one smaller."""
1009    return corrupt_field(data, 0x04, 4)
1010
1011def _corrupt_sharedata_version_number(data, debug=False):
1012    """Scramble the file data -- the share data version number will have one
1013    bit flipped or else will be changed to a random value, but not 1 or 2."""
1014    return corrupt_field(data, 0x0c, 4)
1015    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1016    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1017    newsharevernum = sharevernum
1018    while newsharevernum in (1, 2):
1019        newsharevernum = random.randrange(0, 2**32)
1020    newsharevernumbytes = struct.pack(">L", newsharevernum)
1021    return data[:0x0c] + newsharevernumbytes + data[0x0c+4:]
1022
1023def _corrupt_sharedata_version_number_to_plausible_version(data, debug=False):
1024    """Scramble the file data -- the share data version number will be
1025    changed to 2 if it is 1 or else to 1 if it is 2."""
1026    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1027    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1028    if sharevernum == 1:
1029        newsharevernum = 2
1030    else:
1031        newsharevernum = 1
1032    newsharevernumbytes = struct.pack(">L", newsharevernum)
1033    return data[:0x0c] + newsharevernumbytes + data[0x0c+4:]
1034
1035def _corrupt_segment_size(data, debug=False):
1036    """Scramble the file data -- the field showing the size of the segment
1037    will have one bit flipped or else be changed to a random value."""
1038    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1039    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1040    if sharevernum == 1:
1041        return corrupt_field(data, 0x0c+0x04, 4, debug=False)
1042    else:
1043        return corrupt_field(data, 0x0c+0x04, 8, debug=False)
1044
1045def _corrupt_size_of_sharedata(data, debug=False):
1046    """Scramble the file data -- the field showing the size of the data
1047    within the share data will have one bit flipped or else will be changed
1048    to a random value."""
1049    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1050    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1051    if sharevernum == 1:
1052        return corrupt_field(data, 0x0c+0x08, 4)
1053    else:
1054        return corrupt_field(data, 0x0c+0x0c, 8)
1055
1056def _corrupt_offset_of_sharedata(data, debug=False):
1057    """Scramble the file data -- the field showing the offset of the data
1058    within the share data will have one bit flipped or else be changed to a
1059    random value."""
1060    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1061    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1062    if sharevernum == 1:
1063        return corrupt_field(data, 0x0c+0x0c, 4)
1064    else:
1065        return corrupt_field(data, 0x0c+0x14, 8)
1066
1067def _corrupt_offset_of_ciphertext_hash_tree(data, debug=False):
1068    """Scramble the file data -- the field showing the offset of the
1069    ciphertext hash tree within the share data will have one bit flipped or
1070    else be changed to a random value.
1071    """
1072    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1073    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1074    if sharevernum == 1:
1075        return corrupt_field(data, 0x0c+0x14, 4, debug=False)
1076    else:
1077        return corrupt_field(data, 0x0c+0x24, 8, debug=False)
1078
1079def _corrupt_offset_of_block_hashes(data, debug=False):
1080    """Scramble the file data -- the field showing the offset of the block
1081    hash tree within the share data will have one bit flipped or else will be
1082    changed to a random value."""
1083    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1084    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1085    if sharevernum == 1:
1086        return corrupt_field(data, 0x0c+0x18, 4)
1087    else:
1088        return corrupt_field(data, 0x0c+0x2c, 8)
1089
1090def _corrupt_offset_of_block_hashes_to_truncate_crypttext_hashes(data, debug=False):
1091    """Scramble the file data -- the field showing the offset of the block
1092    hash tree within the share data will have a multiple of hash size
1093    subtracted from it, thus causing the downloader to download an incomplete
1094    crypttext hash tree."""
1095    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1096    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1097    if sharevernum == 1:
1098        curval = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
1099        newval = random.randrange(0, max(1, (curval//hashutil.CRYPTO_VAL_SIZE)//2))*hashutil.CRYPTO_VAL_SIZE
1100        newvalstr = struct.pack(">L", newval)
1101        return data[:0x0c+0x18]+newvalstr+data[0x0c+0x18+4:]
1102    else:
1103        curval = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
1104        newval = random.randrange(0, max(1, (curval//hashutil.CRYPTO_VAL_SIZE)//2))*hashutil.CRYPTO_VAL_SIZE
1105        newvalstr = struct.pack(">Q", newval)
1106        return data[:0x0c+0x2c]+newvalstr+data[0x0c+0x2c+8:]
1107
1108def _corrupt_offset_of_share_hashes(data, debug=False):
1109    """Scramble the file data -- the field showing the offset of the share
1110    hash tree within the share data will have one bit flipped or else will be
1111    changed to a random value."""
1112    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1113    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1114    if sharevernum == 1:
1115        return corrupt_field(data, 0x0c+0x1c, 4)
1116    else:
1117        return corrupt_field(data, 0x0c+0x34, 8)
1118
1119def _corrupt_offset_of_uri_extension(data, debug=False):
1120    """Scramble the file data -- the field showing the offset of the uri
1121    extension will have one bit flipped or else will be changed to a random
1122    value."""
1123    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1124    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1125    if sharevernum == 1:
1126        return corrupt_field(data, 0x0c+0x20, 4)
1127    else:
1128        return corrupt_field(data, 0x0c+0x3c, 8)
1129
1130def _corrupt_offset_of_uri_extension_to_force_short_read(data, debug=False):
1131    """Scramble the file data -- the field showing the offset of the uri
1132    extension will be set to the size of the file minus 3. This means when
1133    the client tries to read the length field from that location it will get
1134    a short read -- the result string will be only 3 bytes long, not the 4 or
1135    8 bytes necessary to do a successful struct.unpack."""
1136    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1137    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1138    # The "-0x0c" in here is to skip the server-side header in the share
1139    # file, which the client doesn't see when seeking and reading.
1140    if sharevernum == 1:
1141        if debug:
1142            log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x2c, 4, struct.unpack(">L", data[0x2c:0x2c+4])[0], len(data)-0x0c-3, len(data)))
1143        return data[:0x2c] + struct.pack(">L", len(data)-0x0c-3) + data[0x2c+4:]
1144    else:
1145        if debug:
1146            log.msg("testing: corrupting offset %d, size %d, changing %d to %d (len(data) == %d)" % (0x48, 8, struct.unpack(">Q", data[0x48:0x48+8])[0], len(data)-0x0c-3, len(data)))
1147        return data[:0x48] + struct.pack(">Q", len(data)-0x0c-3) + data[0x48+8:]
1148
1149def _corrupt_mutable_share_data(data, debug=False):
1150    prefix = data[:32]
1151    assert MutableShareFile.is_valid_header(prefix), "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableShareFile.MAGIC)
1152    data_offset = MutableShareFile.DATA_OFFSET
1153    sharetype = data[data_offset:data_offset+1]
1154    assert sharetype == b"\x00", "non-SDMF mutable shares not supported"
1155    (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
1156     ig_datalen, offsets) = unpack_header(data[data_offset:])
1157    assert version == 0, "this function only handles v0 SDMF files"
1158    start = data_offset + offsets["share_data"]
1159    length = data_offset + offsets["enc_privkey"] - start
1160    return corrupt_field(data, start, length)
1161
1162def _corrupt_share_data(data, debug=False):
1163    """Scramble the file data -- the field containing the share data itself
1164    will have one bit flipped or else will be changed to a random value."""
1165    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1166    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways, not v%d." % sharevernum
1167    if sharevernum == 1:
1168        sharedatasize = struct.unpack(">L", data[0x0c+0x08:0x0c+0x08+4])[0]
1169
1170        return corrupt_field(data, 0x0c+0x24, sharedatasize)
1171    else:
1172        sharedatasize = struct.unpack(">Q", data[0x0c+0x08:0x0c+0x0c+8])[0]
1173
1174        return corrupt_field(data, 0x0c+0x44, sharedatasize)
1175
1176def _corrupt_share_data_last_byte(data, debug=False):
1177    """Scramble the file data -- flip all bits of the last byte."""
1178    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1179    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways, not v%d." % sharevernum
1180    if sharevernum == 1:
1181        sharedatasize = struct.unpack(">L", data[0x0c+0x08:0x0c+0x08+4])[0]
1182        offset = 0x0c+0x24+sharedatasize-1
1183    else:
1184        sharedatasize = struct.unpack(">Q", data[0x0c+0x08:0x0c+0x0c+8])[0]
1185        offset = 0x0c+0x44+sharedatasize-1
1186
1187    newdata = data[:offset] + byteschr(ord(data[offset:offset+1])^0xFF) + data[offset+1:]
1188    if debug:
1189        log.msg("testing: flipping all bits of byte at offset %d: %r, newdata: %r" % (offset, data[offset], newdata[offset]))
1190    return newdata
1191
1192def _corrupt_crypttext_hash_tree(data, debug=False):
1193    """Scramble the file data -- the field containing the crypttext hash tree
1194    will have one bit flipped or else will be changed to a random value.
1195    """
1196    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1197    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1198    if sharevernum == 1:
1199        crypttexthashtreeoffset = struct.unpack(">L", data[0x0c+0x14:0x0c+0x14+4])[0]
1200        blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
1201    else:
1202        crypttexthashtreeoffset = struct.unpack(">Q", data[0x0c+0x24:0x0c+0x24+8])[0]
1203        blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
1204
1205    return corrupt_field(data, 0x0c+crypttexthashtreeoffset, blockhashesoffset-crypttexthashtreeoffset, debug=debug)
1206
1207def _corrupt_crypttext_hash_tree_byte_x221(data, debug=False):
1208    """Scramble the file data -- the byte at offset 0x221 will have its 7th
1209    (b1) bit flipped.
1210    """
1211    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1212    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1213    if debug:
1214        log.msg("original data: %r" % (data,))
1215    return data[:0x0c+0x221] + byteschr(ord(data[0x0c+0x221:0x0c+0x221+1])^0x02) + data[0x0c+0x2210+1:]
1216
1217def _corrupt_block_hashes(data, debug=False):
1218    """Scramble the file data -- the field containing the block hash tree
1219    will have one bit flipped or else will be changed to a random value.
1220    """
1221    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1222    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1223    if sharevernum == 1:
1224        blockhashesoffset = struct.unpack(">L", data[0x0c+0x18:0x0c+0x18+4])[0]
1225        sharehashesoffset = struct.unpack(">L", data[0x0c+0x1c:0x0c+0x1c+4])[0]
1226    else:
1227        blockhashesoffset = struct.unpack(">Q", data[0x0c+0x2c:0x0c+0x2c+8])[0]
1228        sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0]
1229
1230    return corrupt_field(data, 0x0c+blockhashesoffset, sharehashesoffset-blockhashesoffset)
1231
1232def _corrupt_share_hashes(data, debug=False):
1233    """Scramble the file data -- the field containing the share hash chain
1234    will have one bit flipped or else will be changed to a random value.
1235    """
1236    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1237    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1238    if sharevernum == 1:
1239        sharehashesoffset = struct.unpack(">L", data[0x0c+0x1c:0x0c+0x1c+4])[0]
1240        uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
1241    else:
1242        sharehashesoffset = struct.unpack(">Q", data[0x0c+0x34:0x0c+0x34+8])[0]
1243        uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
1244
1245    return corrupt_field(data, 0x0c+sharehashesoffset, uriextoffset-sharehashesoffset)
1246
1247def _corrupt_length_of_uri_extension(data, debug=False):
1248    """Scramble the file data -- the field showing the length of the uri
1249    extension will have one bit flipped or else will be changed to a random
1250    value."""
1251    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1252    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1253    if sharevernum == 1:
1254        uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
1255        return corrupt_field(data, uriextoffset, 4)
1256    else:
1257        uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
1258        return corrupt_field(data, 0x0c+uriextoffset, 8)
1259
1260def _corrupt_uri_extension(data, debug=False):
1261    """Scramble the file data -- the field containing the uri extension will
1262    have one bit flipped or else will be changed to a random value."""
1263    sharevernum = struct.unpack(">L", data[0x0c:0x0c+4])[0]
1264    assert sharevernum in (1, 2), "This test is designed to corrupt immutable shares of v1 or v2 in specific ways."
1265    if sharevernum == 1:
1266        uriextoffset = struct.unpack(">L", data[0x0c+0x20:0x0c+0x20+4])[0]
1267        uriextlen = struct.unpack(">L", data[0x0c+uriextoffset:0x0c+uriextoffset+4])[0]
1268    else:
1269        uriextoffset = struct.unpack(">Q", data[0x0c+0x3c:0x0c+0x3c+8])[0]
1270        uriextlen = struct.unpack(">Q", data[0x0c+uriextoffset:0x0c+uriextoffset+8])[0]
1271
1272    return corrupt_field(data, 0x0c+uriextoffset, uriextlen)
1273
1274
1275
1276@attr.s
1277@implementer(IAddressFamily)
1278class ConstantAddresses(object):
1279    """
1280    Pretend to provide support for some address family but just hand out
1281    canned responses.
1282    """
1283    _listener = attr.ib(default=None)
1284    _handler = attr.ib(default=None)
1285
1286    def get_listener(self):
1287        if self._listener is None:
1288            raise Exception("{!r} has no listener.")
1289        return self._listener
1290
1291    def get_client_endpoint(self):
1292        if self._handler is None:
1293            raise Exception("{!r} has no client endpoint.")
1294        return self._handler
1295
1296@contextmanager
1297def disable_modules(*names):
1298    """
1299    A context manager which makes modules appear to be missing while it is
1300    active.
1301
1302    :param *names: The names of the modules to disappear.  Only top-level
1303        modules are supported (that is, "." is not allowed in any names).
1304        This is an implementation shortcoming which could be lifted if
1305        desired.
1306    """
1307    if any("." in name for name in names):
1308        raise ValueError("Names containing '.' are not supported.")
1309    missing = object()
1310    modules = list(sys.modules.get(n, missing) for n in names)
1311    for n in names:
1312        sys.modules[n] = None
1313    yield
1314    for n, original in zip(names, modules):
1315        if original is missing:
1316            del sys.modules[n]
1317        else:
1318            sys.modules[n] = original
1319
1320class _TestCaseMixin(object):
1321    """
1322    A mixin for ``TestCase`` which collects helpful behaviors for subclasses.
1323
1324    Those behaviors are:
1325
1326    * All of the features of testtools TestCase.
1327    * Each test method will be run in a unique Eliot action context which
1328      identifies the test and collects all Eliot log messages emitted by that
1329      test (including setUp and tearDown messages).
1330    * trial-compatible mktemp method
1331    * unittest2-compatible assertRaises helper
1332    * Automatic cleanup of tempfile.tempdir mutation (once pervasive through
1333      the Tahoe-LAFS test suite, perhaps gone now but someone should verify
1334      this).
1335    """
1336    def setUp(self):
1337        # Restore the original temporary directory.  Node ``init_tempdir``
1338        # mangles it and many tests manage to get that method called.
1339        self.addCleanup(
1340            partial(setattr, tempfile, "tempdir", tempfile.tempdir),
1341        )
1342        return super(_TestCaseMixin, self).setUp()
1343
1344    class _DummyCase(_case.TestCase):
1345        def dummy(self):
1346            pass
1347    _dummyCase = _DummyCase("dummy")
1348
1349    def mktemp(self):
1350        return mktemp()
1351
1352    def assertRaises(self, *a, **kw):
1353        return self._dummyCase.assertRaises(*a, **kw)
1354
1355    def failUnless(self, *args, **kwargs):
1356        """Backwards compatibility method."""
1357        self.assertTrue(*args, **kwargs)
1358
1359    def failIf(self, *args, **kwargs):
1360        """Backwards compatibility method."""
1361        self.assertFalse(*args, **kwargs)
1362
1363    def failIfEqual(self, *args, **kwargs):
1364        """Backwards compatibility method."""
1365        self.assertNotEqual(*args, **kwargs)
1366
1367    def failUnlessEqual(self, *args, **kwargs):
1368        """Backwards compatibility method."""
1369        self.assertEqual(*args, **kwargs)
1370
1371    def failUnlessReallyEqual(self, *args, **kwargs):
1372        """Backwards compatibility method."""
1373        self.assertReallyEqual(*args, **kwargs)
1374
1375
1376class SyncTestCase(_TestCaseMixin, TestCase):
1377    """
1378    A ``TestCase`` which can run tests that may return an already-fired
1379    ``Deferred``.
1380    """
1381    run_tests_with = EliotLoggedRunTest.make_factory(
1382        SynchronousDeferredRunTest,
1383    )
1384
1385
1386class AsyncTestCase(_TestCaseMixin, TestCase):
1387    """
1388    A ``TestCase`` which can run tests that may return a Deferred that will
1389    only fire if the global reactor is running.
1390    """
1391    run_tests_with = EliotLoggedRunTest.make_factory(
1392        AsynchronousDeferredRunTest.make_factory(timeout=60.0),
1393    )
1394
1395
1396class AsyncBrokenTestCase(_TestCaseMixin, TestCase):
1397    """
1398    A ``TestCase`` like ``AsyncTestCase`` but which spins the reactor a little
1399    longer than apparently necessary to clean out lingering unaccounted for
1400    event sources.
1401
1402    Tests which require this behavior are broken and should be fixed so they
1403    pass with ``AsyncTestCase``.
1404    """
1405    run_tests_with = EliotLoggedRunTest.make_factory(
1406        AsynchronousDeferredRunTestForBrokenTwisted.make_factory(timeout=60.0),
1407    )
1408
1409
1410class TrialTestCase(_TrialTestCase):
1411    """
1412    A twisted.trial.unittest.TestCaes with Tahoe required fixes
1413    applied. Currently these are:
1414
1415      - ensure that .fail() passes a bytes msg on Python2
1416    """
1417
1418    def fail(self, msg):
1419        """
1420        Ensure our msg is a native string on Python2. If it was Unicode,
1421        we encode it as utf8 and hope for the best. On Python3 we take
1422        no action.
1423
1424        This is necessary because Twisted passes the 'msg' argument
1425        along to the constructor of an exception; on Python2,
1426        Exception will accept a `unicode` instance but will fail if
1427        you try to turn that Exception instance into a string.
1428        """
1429
1430        return super(TrialTestCase, self).fail(msg)
Note: See TracBrowser for help on using the repository browser.