source: trunk/src/allmydata/interfaces.py

Last change on this file was f47b45a, checked in by Alexandre Detiste <alexandre.detiste@…>, at 2024-02-28T00:25:06Z

remove static native_str()

  • Property mode set to 100644
File size: 130.2 KB
Line 
1"""
2Interfaces for Tahoe-LAFS.
3
4Ported to Python 3.
5
6Note that for RemoteInterfaces, the __remote_name__ needs to be a native string because of https://github.com/warner/foolscap/blob/43f4485a42c9c28e2c79d655b3a9e24d4e6360ca/src/foolscap/remoteinterface.py#L67
7"""
8
9from typing import Dict
10
11from zope.interface import Interface, Attribute
12from twisted.plugin import (
13    IPlugin,
14)
15from twisted.internet.defer import Deferred
16from foolscap.api import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
17     ChoiceOf, IntegerConstraint, Any, RemoteInterface, Referenceable
18
19HASH_SIZE=32
20SALT_SIZE=16
21
22SDMF_VERSION=0
23MDMF_VERSION=1
24
25Hash = StringConstraint(maxLength=HASH_SIZE,
26                        minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
27Nodeid = StringConstraint(maxLength=20,
28                          minLength=20) # binary format 20-byte SHA1 hash
29FURL = StringConstraint(1000)
30StorageIndex = StringConstraint(16)
31URI = StringConstraint(300) # kind of arbitrary
32
33MAX_BUCKETS = 256  # per peer -- zfec offers at most 256 shares per file
34
35# The default size for segments of new CHK ("immutable") uploads.
36DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE = 1024*1024
37
38ShareData = StringConstraint(None)
39URIExtensionData = StringConstraint(1000)
40Number = IntegerConstraint(8) # 2**(8*8) == 16EiB ~= 18e18 ~= 18 exabytes
41Offset = Number
42ReadSize = int # the 'int' constraint is 2**31 == 2Gib -- large files are processed in not-so-large increments
43WriteEnablerSecret = Hash # used to protect mutable share modifications
44LeaseRenewSecret = Hash # used to protect lease renewal requests
45LeaseCancelSecret = Hash # was used to protect lease cancellation requests
46
47class NoSpace(Exception):
48    """Storage space was not available for a space-allocating operation."""
49
50class DataTooLargeError(Exception):
51    """The write went past the expected size of the bucket."""
52
53
54class ConflictingWriteError(Exception):
55    """Two writes happened to same immutable with different data."""
56
57
58class RIBucketWriter(RemoteInterface):
59    """ Objects of this kind live on the server side. """
60    def write(offset=Offset, data=ShareData):
61        return None
62
63    def close():
64        """
65        If the data that has been written is incomplete or inconsistent then
66        the server will throw the data away, else it will store it for future
67        retrieval.
68        """
69        return None
70
71    def abort():
72        """Abandon all the data that has been written.
73        """
74        return None
75
76
77class RIBucketReader(RemoteInterface):
78    def read(offset=Offset, length=ReadSize):
79        return ShareData
80
81    def advise_corrupt_share(reason=bytes):
82        """Clients who discover hash failures in shares that they have
83        downloaded from me will use this method to inform me about the
84        failures. I will record their concern so that my operator can
85        manually inspect the shares in question. I return None.
86
87        This is a wrapper around RIStorageServer.advise_corrupt_share()
88        that is tied to a specific share, and therefore does not need the
89        extra share-identifying arguments. Please see that method for full
90        documentation.
91        """
92
93
94TestVector = ListOf(TupleOf(Offset, ReadSize, bytes, bytes))
95# elements are (offset, length, operator, specimen)
96# operator must be b"eq", typically length==len(specimen), but one can ensure
97# writes don't happen to empty shares by setting length to 1 and specimen to
98# b"". The operator is still used for wire compatibility with old versions.
99DataVector = ListOf(TupleOf(Offset, ShareData))
100# (offset, data). This limits us to 30 writes of 1MiB each per call
101TestAndWriteVectorsForShares = DictOf(int,
102                                      TupleOf(TestVector,
103                                              DataVector,
104                                              ChoiceOf(None, Offset), # new_length
105                                              ))
106ReadVector = ListOf(TupleOf(Offset, ReadSize))
107ReadData = ListOf(ShareData)
108# returns data[offset:offset+length] for each element of TestVector
109
110
111class RIStorageServer(RemoteInterface):
112    __remote_name__ = "RIStorageServer.tahoe.allmydata.com"
113
114    def get_version():
115        """
116        Return a dictionary of version information.
117        """
118        return DictOf(bytes, Any())
119
120    def allocate_buckets(storage_index=StorageIndex,
121                         renew_secret=LeaseRenewSecret,
122                         cancel_secret=LeaseCancelSecret,
123                         sharenums=SetOf(int, maxLength=MAX_BUCKETS),
124                         allocated_size=Offset, canary=Referenceable):
125        """
126        @param storage_index: the index of the bucket to be created or
127                              increfed.
128        @param sharenums: these are the share numbers (probably between 0 and
129                          99) that the sender is proposing to store on this
130                          server.
131        @param renew_secret: This is the secret used to protect bucket refresh
132                             This secret is generated by the client and
133                             stored for later comparison by the server. Each
134                             server is given a different secret.
135        @param cancel_secret: This no longer allows lease cancellation, but
136                              must still be a unique value identifying the
137                              lease. XXX stop relying on it to be unique.
138        @param canary: If the canary is lost before close(), the bucket is
139                       deleted.
140        @return: tuple of (alreadygot, allocated), where alreadygot is what we
141                 already have and allocated is what we hereby agree to accept.
142                 New leases are added for shares in both lists.
143        """
144        return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
145                       DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
146
147    def add_lease(storage_index=StorageIndex,
148                  renew_secret=LeaseRenewSecret,
149                  cancel_secret=LeaseCancelSecret):
150        """
151        Add a new lease on the given bucket. If the renew_secret matches an
152        existing lease, that lease will be renewed instead. If there is no
153        bucket for the given storage_index, return silently. (note that in
154        tahoe-1.3.0 and earlier, IndexError was raised if there was no
155        bucket)
156        """
157        return Any() # returns None now, but future versions might change
158
159    def get_buckets(storage_index=StorageIndex):
160        return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
161
162    def slot_readv(storage_index=StorageIndex,
163                   shares=ListOf(int), readv=ReadVector):
164        """Read a vector from the numbered shares associated with the given
165        storage index. An empty shares list means to return data from all
166        known shares. Returns a dictionary with one key per share."""
167        return DictOf(int, ReadData) # shnum -> results
168
169    def slot_testv_and_readv_and_writev(storage_index=StorageIndex,
170                                        secrets=TupleOf(WriteEnablerSecret,
171                                                        LeaseRenewSecret,
172                                                        LeaseCancelSecret),
173                                        tw_vectors=TestAndWriteVectorsForShares,
174                                        r_vector=ReadVector,
175                                        ):
176        """
177        General-purpose test-read-and-set operation for mutable slots:
178        (1) For submitted shnums, compare the test vectors against extant
179            shares, or against an empty share for shnums that do not exist.
180        (2) Use the read vectors to extract "old data" from extant shares.
181        (3) If all tests in (1) passed, then apply the write vectors
182            (possibly creating new shares).
183        (4) Return whether the tests passed, and the "old data", which does
184            not include any modifications made by the writes.
185
186        The operation does not interleave with other operations on the same
187        shareset.
188
189        This method is, um, large. The goal is to allow clients to update all
190        the shares associated with a mutable file in a single round trip.
191
192        @param storage_index: the index of the bucket to be created or
193                              increfed.
194        @param write_enabler: a secret that is stored along with the slot.
195                              Writes are accepted from any caller who can
196                              present the matching secret. A different secret
197                              should be used for each slot*server pair.
198        @param renew_secret: This is the secret used to protect bucket refresh
199                             This secret is generated by the client and
200                             stored for later comparison by the server. Each
201                             server is given a different secret.
202        @param cancel_secret: This no longer allows lease cancellation, but
203                              must still be a unique value identifying the
204                              lease. XXX stop relying on it to be unique.
205
206        The 'secrets' argument is a tuple of (write_enabler, renew_secret,
207        cancel_secret). The first is required to perform any write. The
208        latter two are used when allocating new shares. To simply acquire a
209        new lease on existing shares, use an empty testv and an empty writev.
210
211        Each share can have a separate test vector (i.e. a list of
212        comparisons to perform). If all vectors for all shares pass, then all
213        writes for all shares are recorded. Each comparison is a 4-tuple of
214        (offset, length, operator, specimen), which effectively does a
215        bool( (read(offset, length)) OPERATOR specimen ) and only performs
216        the write if all these evaluate to True. Basic test-and-set uses 'eq'.
217        Write-if-newer uses a seqnum and (offset, length, 'lt', specimen).
218        Write-if-same-or-newer uses 'le'.
219
220        Reads from the end of the container are truncated, and missing shares
221        behave like empty ones, so to assert that a share doesn't exist (for
222        use when creating a new share), use (0, 1, 'eq', '').
223
224        The write vector will be applied to the given share, expanding it if
225        necessary. A write vector applied to a share number that did not
226        exist previously will cause that share to be created. Write vectors
227        must not overlap (if they do, this will either cause an error or
228        apply them in an unspecified order). Duplicate write vectors, with
229        the same offset and data, are currently tolerated but are not
230        desirable.
231
232        In Tahoe-LAFS v1.8.3 or later (except 1.9.0a1), if you send a write
233        vector whose offset is beyond the end of the current data, the space
234        between the end of the current data and the beginning of the write
235        vector will be filled with zero bytes. In earlier versions the
236        contents of this space was unspecified (and might end up containing
237        secrets). Storage servers with the new zero-filling behavior will
238        advertise a true value for the 'fills-holes-with-zero-bytes' key
239        (under 'http://allmydata.org/tahoe/protocols/storage/v1') in their
240        version information.
241
242        Each write vector is accompanied by a 'new_length' argument, which
243        can be used to truncate the data. If new_length is not None and it is
244        less than the current size of the data (after applying all write
245        vectors), then the data will be truncated to new_length. If
246        new_length==0, the share will be deleted.
247
248        In Tahoe-LAFS v1.8.2 and earlier, new_length could also be used to
249        enlarge the file by sending a number larger than the size of the data
250        after applying all write vectors. That behavior was not used, and as
251        of Tahoe-LAFS v1.8.3 it no longer works and the new_length is ignored
252        in that case.
253
254        If a storage client knows that the server supports zero-filling, for
255        example from the 'fills-holes-with-zero-bytes' key in its version
256        information, it can extend the file efficiently by writing a single
257        zero byte just before the new end-of-file. Otherwise it must
258        explicitly write zeroes to all bytes between the old and new
259        end-of-file. In any case it should avoid sending new_length larger
260        than the size of the data after applying all write vectors.
261
262        The read vector is used to extract data from all known shares,
263        *before* any writes have been applied. The same read vector is used
264        for all shares. This captures the state that was tested by the test
265        vector, for extant shares.
266
267        This method returns two values: a boolean and a dict. The boolean is
268        True if the write vectors were applied, False if not. The dict is
269        keyed by share number, and each value contains a list of strings, one
270        for each element of the read vector.
271
272        If the write_enabler is wrong, this will raise BadWriteEnablerError.
273        To enable share migration (using update_write_enabler), the exception
274        will have the nodeid used for the old write enabler embedded in it,
275        in the following string::
276
277         The write enabler was recorded by nodeid '%s'.
278
279        Note that the nodeid here is encoded using the same base32 encoding
280        used by Foolscap and allmydata.util.idlib.nodeid_b2a().
281        """
282        return TupleOf(bool, DictOf(int, ReadData))
283
284    def advise_corrupt_share(share_type=bytes, storage_index=StorageIndex,
285                             shnum=int, reason=bytes):
286        """Clients who discover hash failures in shares that they have
287        downloaded from me will use this method to inform me about the
288        failures. I will record their concern so that my operator can
289        manually inspect the shares in question. I return None.
290
291        'share_type' is either 'mutable' or 'immutable'. 'storage_index' is a
292        (binary) storage index string, and 'shnum' is the integer share
293        number. 'reason' is a human-readable explanation of the problem,
294        probably including some expected hash values and the computed ones
295        that did not match. Corruption advisories for mutable shares should
296        include a hash of the public key (the same value that appears in the
297        mutable-file verify-cap), since the current share format does not
298        store that on disk.
299        """
300
301# The result of IStorageServer.get_version():
302VersionMessage = Dict[bytes, object]
303
304
305class IStorageServer(Interface):
306    """
307    An object capable of storing shares for a storage client.
308    """
309    def get_version() -> Deferred[VersionMessage]:
310        """
311        :see: ``RIStorageServer.get_version``
312        """
313
314    def allocate_buckets(
315            storage_index,
316            renew_secret,
317            cancel_secret,
318            sharenums,
319            allocated_size,
320            canary,
321    ):
322        """
323        :see: ``RIStorageServer.allocate_buckets``
324        """
325
326    def add_lease(
327            storage_index,
328            renew_secret,
329            cancel_secret,
330    ):
331        """
332        :see: ``RIStorageServer.add_lease``
333        """
334
335    def get_buckets(
336            storage_index,
337    ):
338        """
339        :see: ``RIStorageServer.get_buckets``
340        """
341
342    def slot_readv(
343            storage_index,
344            shares,
345            readv,
346    ):
347        """
348        :see: ``RIStorageServer.slot_readv``
349        """
350
351    def slot_testv_and_readv_and_writev(
352            storage_index,
353            secrets,
354            tw_vectors,
355            r_vector,
356    ):
357        """
358        :see: ``RIStorageServer.slot_testv_readv_and_writev``
359
360        While the interface mostly matches, test vectors are simplified.
361        Instead of a tuple ``(offset, read_size, operator, expected_data)`` in
362        the original, for this method you need only pass in
363        ``(offset, read_size, expected_data)``, with the operator implicitly
364        being ``b"eq"``.
365        """
366
367    def advise_corrupt_share(
368            share_type,
369            storage_index,
370            shnum,
371            reason,
372    ):
373        """
374        :see: ``RIStorageServer.advise_corrupt_share``
375        """
376
377
378class IStorageBucketWriter(Interface):
379    """
380    Objects of this kind live on the client side.
381    """
382    def put_block(segmentnum, data):
383        """
384        @param segmentnum=int
385        @param data=ShareData: For most segments, this data will be 'blocksize'
386        bytes in length. The last segment might be shorter.
387        @return: a Deferred that fires (with None) when the operation completes
388        """
389
390    def put_crypttext_hashes(hashes):
391        """
392        @param hashes=ListOf(Hash)
393        @return: a Deferred that fires (with None) when the operation completes
394        """
395
396    def put_block_hashes(blockhashes):
397        """
398        @param blockhashes=ListOf(Hash)
399        @return: a Deferred that fires (with None) when the operation completes
400        """
401
402    def put_share_hashes(sharehashes):
403        """
404        @param sharehashes=ListOf(TupleOf(int, Hash))
405        @return: a Deferred that fires (with None) when the operation completes
406        """
407
408    def put_uri_extension(data):
409        """This block of data contains integrity-checking information (hashes
410        of plaintext, crypttext, and shares), as well as encoding parameters
411        that are necessary to recover the data. This is a serialized dict
412        mapping strings to other strings. The hash of this data is kept in
413        the URI and verified before any of the data is used. All buckets for
414        a given file contain identical copies of this data.
415
416        The serialization format is specified with the following pseudocode:
417        for k in sorted(dict.keys()):
418            assert re.match(r'^[a-zA-Z_\-]+$', k)
419            write(k + ':' + netstring(dict[k]))
420
421        @param data=URIExtensionData
422        @return: a Deferred that fires (with None) when the operation completes
423        """
424
425    def close():
426        """Finish writing and close the bucket. The share is not finalized
427        until this method is called: if the uploading client disconnects
428        before calling close(), the partially-written share will be
429        discarded.
430
431        @return: a Deferred that fires (with None) when the operation completes
432        """
433
434
435class IStorageBucketReader(Interface):
436    def get_block_data(blocknum, blocksize, size):
437        """Most blocks will be the same size. The last block might be shorter
438        than the others.
439
440        @param blocknum=int
441        @param blocksize=int
442        @param size=int
443        @return: ShareData
444        """
445
446    def get_crypttext_hashes():
447        """
448        @return: ListOf(Hash)
449        """
450
451    def get_block_hashes(at_least_these=()):
452        """
453        @param at_least_these=SetOf(int)
454        @return: ListOf(Hash)
455        """
456
457    def get_share_hashes():
458        """
459        @return: ListOf(TupleOf(int, Hash))
460        """
461
462    def get_uri_extension():
463        """
464        @return: URIExtensionData
465        """
466
467
468class IStorageBroker(Interface):
469    def get_servers_for_psi(peer_selection_index):
470        """
471        @return: list of IServer instances
472        """
473    def get_connected_servers():
474        """
475        @return: frozenset of connected IServer instances
476        """
477    def get_known_servers():
478        """
479        @return: frozenset of IServer instances
480        """
481    def get_all_serverids():
482        """
483        @return: frozenset of serverid strings
484        """
485    def get_nickname_for_serverid(serverid):
486        """
487        @return: unicode nickname, or None
488        """
489
490
491class IDisplayableServer(Interface):
492    def get_nickname():
493        pass
494
495    def get_name():
496        pass
497
498    def get_longname():
499        pass
500
501
502class IServer(IDisplayableServer):
503    """I live in the client, and represent a single server."""
504    def start_connecting(trigger_cb):
505        pass
506
507    def upload_permitted():
508        """
509        :return: True if we should use this server for uploads, False
510            otherwise.
511        """
512
513    def get_storage_server():
514        """
515        Once a server is connected, I return an ``IStorageServer``.
516        Before a server is connected for the first time, I return None.
517
518        Note that the ``IStorageServer`` I return will start producing
519        DeadReferenceErrors once the connection is lost.
520        """
521
522
523class IMutableSlotWriter(Interface):
524    """
525    The interface for a writer around a mutable slot on a remote server.
526    """
527    def set_checkstring(seqnum_or_checkstring, root_hash=None, salt=None):
528        """
529        Set the checkstring that I will pass to the remote server when
530        writing.
531
532            @param checkstring A packed checkstring to use.
533
534        Note that implementations can differ in which semantics they
535        wish to support for set_checkstring -- they can, for example,
536        build the checkstring themselves from its constituents, or
537        some other thing.
538        """
539
540    def get_checkstring():
541        """
542        Get the checkstring that I think currently exists on the remote
543        server.
544        """
545
546    def put_block(data, segnum, salt):
547        """
548        Add a block and salt to the share.
549        """
550
551    def put_encprivkey(encprivkey):
552        """
553        Add the encrypted private key to the share.
554        """
555
556    def put_blockhashes(blockhashes):
557        """
558        @param blockhashes=list
559        Add the block hash tree to the share.
560        """
561
562    def put_sharehashes(sharehashes):
563        """
564        @param sharehashes=dict
565        Add the share hash chain to the share.
566        """
567
568    def get_signable():
569        """
570        Return the part of the share that needs to be signed.
571        """
572
573    def put_signature(signature):
574        """
575        Add the signature to the share.
576        """
577
578    def put_verification_key(verification_key):
579        """
580        Add the verification key to the share.
581        """
582
583    def finish_publishing():
584        """
585        Do anything necessary to finish writing the share to a remote
586        server. I require that no further publishing needs to take place
587        after this method has been called.
588        """
589
590
591class IURI(Interface):
592    def init_from_string(uri):
593        """Accept a string (as created by my to_string() method) and populate
594        this instance with its data. I am not normally called directly,
595        please use the module-level uri.from_string() function to convert
596        arbitrary URI strings into IURI-providing instances."""
597
598    def is_readonly():
599        """Return False if this URI be used to modify the data. Return True
600        if this URI cannot be used to modify the data."""
601
602    def is_mutable():
603        """Return True if the data can be modified by *somebody* (perhaps
604        someone who has a more powerful URI than this one)."""
605
606    # TODO: rename to get_read_cap()
607    def get_readonly():
608        """Return another IURI instance that represents a read-only form of
609        this one. If is_readonly() is True, this returns self."""
610
611    def get_verify_cap():
612        """Return an instance that provides IVerifierURI, which can be used
613        to check on the availability of the file or directory, without
614        providing enough capabilities to actually read or modify the
615        contents. This may return None if the file does not need checking or
616        verification (e.g. LIT URIs).
617        """
618
619    def to_string():
620        """Return a string of printable ASCII characters, suitable for
621        passing into init_from_string."""
622
623
624class IVerifierURI(IURI):
625    def init_from_string(uri):
626        """Accept a string (as created by my to_string() method) and populate
627        this instance with its data. I am not normally called directly,
628        please use the module-level uri.from_string() function to convert
629        arbitrary URI strings into IURI-providing instances."""
630
631    def to_string():
632        """Return a string of printable ASCII characters, suitable for
633        passing into init_from_string."""
634
635
636class IDirnodeURI(Interface):
637    """I am a URI that represents a dirnode."""
638
639
640class IFileURI(Interface):
641    """I am a URI that represents a filenode."""
642    def get_size():
643        """Return the length (in bytes) of the file that I represent."""
644
645
646class IImmutableFileURI(IFileURI):
647    pass
648
649class IMutableFileURI(Interface):
650    pass
651
652class IDirectoryURI(Interface):
653    pass
654
655class IReadonlyDirectoryURI(Interface):
656    pass
657
658
659class CapConstraintError(Exception):
660    """A constraint on a cap was violated."""
661
662class MustBeDeepImmutableError(CapConstraintError):
663    """Mutable children cannot be added to an immutable directory.
664    Also, caps obtained from an immutable directory can trigger this error
665    if they are later found to refer to a mutable object and then used."""
666
667class MustBeReadonlyError(CapConstraintError):
668    """Known write caps cannot be specified in a ro_uri field. Also,
669    caps obtained from a ro_uri field can trigger this error if they
670    are later found to be write caps and then used."""
671
672class MustNotBeUnknownRWError(CapConstraintError):
673    """Cannot add an unknown child cap specified in a rw_uri field."""
674
675
676class IReadable(Interface):
677    """I represent a readable object -- either an immutable file, or a
678    specific version of a mutable file.
679    """
680
681    def is_readonly():
682        """Return True if this reference provides mutable access to the given
683        file or directory (i.e. if you can modify it), or False if not. Note
684        that even if this reference is read-only, someone else may hold a
685        read-write reference to it.
686
687        For an IReadable returned by get_best_readable_version(), this will
688        always return True, but for instances of subinterfaces such as
689        IMutableFileVersion, it may return False."""
690
691    def is_mutable():
692        """Return True if this file or directory is mutable (by *somebody*,
693        not necessarily you), False if it is is immutable. Note that a file
694        might be mutable overall, but your reference to it might be
695        read-only. On the other hand, all references to an immutable file
696        will be read-only; there are no read-write references to an immutable
697        file."""
698
699    def get_storage_index():
700        """Return the storage index of the file."""
701
702    def get_size():
703        """Return the length (in bytes) of this readable object."""
704
705    def download_to_data():
706        """Download all of the file contents. I return a Deferred that fires
707        with the contents as a byte string.
708        """
709
710    def read(consumer, offset=0, size=None):
711        """Download a portion (possibly all) of the file's contents, making
712        them available to the given IConsumer. Return a Deferred that fires
713        (with the consumer) when the consumer is unregistered (either because
714        the last byte has been given to it, or because the consumer threw an
715        exception during write(), possibly because it no longer wants to
716        receive data). The portion downloaded will start at 'offset' and
717        contain 'size' bytes (or the remainder of the file if size==None). It
718        is an error to read beyond the end of the file: callers must use
719        get_size() and clip any non-default offset= and size= parameters. It
720        is permissible to read zero bytes.
721
722        The consumer will be used in non-streaming mode: an IPullProducer
723        will be attached to it.
724
725        The consumer will not receive data right away: several network trips
726        must occur first. The order of events will be::
727
728         consumer.registerProducer(p, streaming)
729          (if streaming == False)::
730           consumer does p.resumeProducing()
731            consumer.write(data)
732           consumer does p.resumeProducing()
733            consumer.write(data).. (repeat until all data is written)
734         consumer.unregisterProducer()
735         deferred.callback(consumer)
736
737        If a download error occurs, or an exception is raised by
738        consumer.registerProducer() or consumer.write(), I will call
739        consumer.unregisterProducer() and then deliver the exception via
740        deferred.errback(). To cancel the download, the consumer should call
741        p.stopProducing(), which will result in an exception being delivered
742        via deferred.errback().
743
744        See src/allmydata/util/consumer.py for an example of a simple
745        download-to-memory consumer.
746        """
747
748class IPeerSelector(Interface):
749    """
750    I select peers for an upload, maximizing some measure of health.
751
752    I keep track of the state of a grid relative to a file. This means
753    that I know about all of the peers that parts of that file could be
754    placed on, and about shares that have been placed on those peers.
755    Given this, I assign shares to peers in a way that maximizes the
756    file's health according to whichever definition of health I am
757    programmed with. I tell the uploader whether or not my assignment is
758    healthy. I keep track of failures during the process and update my
759    conclusions appropriately.
760    """
761    def add_peer_with_share(peerid, shnum):
762        """
763        Update my internal state to reflect the fact that peer peerid
764        holds share shnum. Called for shares that are detected before
765        peer selection begins.
766        """
767
768    def add_peers(peerids=set):
769        """
770        Update my internal state to include the peers in peerids as
771        potential candidates for storing a file.
772        """
773
774    def mark_readonly_peer(peerid):
775        """
776        Mark the peer peerid as full. This means that any
777        peer-with-share relationships I know about for peerid remain
778        valid, but that peerid will not be assigned any new shares.
779        """
780
781    def mark_bad_peer(peerid):
782        """
783        Mark the peer peerid as bad. This is typically called when an
784        error is encountered when communicating with a peer. I will
785        disregard any existing peer => share relationships associated
786        with peerid, and will not attempt to assign it any more shares.
787        """
788
789    def get_share_placements():
790        """
791        Return the share-placement map (a dict) which maps shares to
792        server-ids
793        """
794
795
796class IWriteable(Interface):
797    """
798    I define methods that callers can use to update SDMF and MDMF
799    mutable files on a Tahoe-LAFS grid.
800    """
801    # XXX: For the moment, we have only this. It is possible that we
802    #      want to move overwrite() and modify() in here too.
803    def update(data, offset):
804        """
805        I write the data from my data argument to the MDMF file,
806        starting at offset. I continue writing data until my data
807        argument is exhausted, appending data to the file as necessary.
808        """
809        # assert IMutableUploadable.providedBy(data)
810        # to append data: offset=node.get_size_of_best_version()
811        # do we want to support compacting MDMF?
812        # for an MDMF file, this can be done with O(data.get_size())
813        # memory. For an SDMF file, any modification takes
814        # O(node.get_size_of_best_version()).
815
816
817class IMutableFileVersion(IReadable):
818    """I provide access to a particular version of a mutable file. The
819    access is read/write if I was obtained from a filenode derived from
820    a write cap, or read-only if the filenode was derived from a read cap.
821    """
822
823    def get_sequence_number():
824        """Return the sequence number of this version."""
825
826    def get_servermap():
827        """Return the IMutableFileServerMap instance that was used to create
828        this object.
829        """
830
831    def get_writekey():
832        """Return this filenode's writekey, or None if the node does not have
833        write-capability. This may be used to assist with data structures
834        that need to make certain data available only to writers, such as the
835        read-write child caps in dirnodes. The recommended process is to have
836        reader-visible data be submitted to the filenode in the clear (where
837        it will be encrypted by the filenode using the readkey), but encrypt
838        writer-visible data using this writekey.
839        """
840
841    def overwrite(new_contents):
842        """Replace the contents of the mutable file, provided that no other
843        node has published (or is attempting to publish, concurrently) a
844        newer version of the file than this one.
845
846        I will avoid modifying any share that is different than the version
847        given by get_sequence_number(). However, if another node is writing
848        to the file at the same time as me, I may manage to update some shares
849        while they update others. If I see any evidence of this, I will signal
850        UncoordinatedWriteError, and the file will be left in an inconsistent
851        state (possibly the version you provided, possibly the old version,
852        possibly somebody else's version, and possibly a mix of shares from
853        all of these).
854
855        The recommended response to UncoordinatedWriteError is to either
856        return it to the caller (since they failed to coordinate their
857        writes), or to attempt some sort of recovery. It may be sufficient to
858        wait a random interval (with exponential backoff) and repeat your
859        operation. If I do not signal UncoordinatedWriteError, then I was
860        able to write the new version without incident.
861
862        I return a Deferred that fires (with a PublishStatus object) when the
863        update has completed.
864        """
865
866    def modify(modifier_cb):
867        """Modify the contents of the file, by downloading this version,
868        applying the modifier function (or bound method), then uploading
869        the new version. This will succeed as long as no other node
870        publishes a version between the download and the upload.
871        I return a Deferred that fires (with a PublishStatus object) when
872        the update is complete.
873
874        The modifier callable will be given three arguments: a string (with
875        the old contents), a 'first_time' boolean, and a servermap. As with
876        download_to_data(), the old contents will be from this version,
877        but the modifier can use the servermap to make other decisions
878        (such as refusing to apply the delta if there are multiple parallel
879        versions, or if there is evidence of a newer unrecoverable version).
880        'first_time' will be True the first time the modifier is called,
881        and False on any subsequent calls.
882
883        The callable should return a string with the new contents. The
884        callable must be prepared to be called multiple times, and must
885        examine the input string to see if the change that it wants to make
886        is already present in the old version. If it does not need to make
887        any changes, it can either return None, or return its input string.
888
889        If the modifier raises an exception, it will be returned in the
890        errback.
891        """
892
893
894# The hierarchy looks like this:
895#  IFilesystemNode
896#   IFileNode
897#    IMutableFileNode
898#    IImmutableFileNode
899#   IDirectoryNode
900
901class IFilesystemNode(Interface):
902    def get_cap():
903        """Return the strongest 'cap instance' associated with this node.
904        (writecap for writeable-mutable files/directories, readcap for
905        immutable or readonly-mutable files/directories). To convert this
906        into a string, call .to_string() on the result."""
907
908    def get_readcap():
909        """Return a readonly cap instance for this node. For immutable or
910        readonly nodes, get_cap() and get_readcap() return the same thing."""
911
912    def get_repair_cap():
913        """Return an IURI instance that can be used to repair the file, or
914        None if this node cannot be repaired (either because it is not
915        distributed, like a LIT file, or because the node does not represent
916        sufficient authority to create a repair-cap, like a read-only RSA
917        mutable file node [which cannot create the correct write-enablers]).
918        """
919
920    def get_verify_cap():
921        """Return an IVerifierURI instance that represents the
922        'verifiy/refresh capability' for this node. The holder of this
923        capability will be able to renew the lease for this node, protecting
924        it from garbage-collection. They will also be able to ask a server if
925        it holds a share for the file or directory.
926        """
927
928    def get_uri():
929        """Return the URI string corresponding to the strongest cap associated
930        with this node. If this node is read-only, the URI will only offer
931        read-only access. If this node is read-write, the URI will offer
932        read-write access.
933
934        If you have read-write access to a node and wish to share merely
935        read-only access with others, use get_readonly_uri().
936        """
937
938    def get_write_uri():
939        """Return the URI string that can be used by others to get write
940        access to this node, if it is writeable. If this is a read-only node,
941        return None."""
942
943    def get_readonly_uri():
944        """Return the URI string that can be used by others to get read-only
945        access to this node. The result is a read-only URI, regardless of
946        whether this node is read-only or read-write.
947
948        If you have merely read-only access to this node, get_readonly_uri()
949        will return the same thing as get_uri().
950        """
951
952    def get_storage_index():
953        """Return a string with the (binary) storage index in use on this
954        download. This may be None if there is no storage index (i.e. LIT
955        files and directories)."""
956
957    def is_readonly():
958        """Return True if this reference provides mutable access to the given
959        file or directory (i.e. if you can modify it), or False if not. Note
960        that even if this reference is read-only, someone else may hold a
961        read-write reference to it."""
962
963    def is_mutable():
964        """Return True if this file or directory is mutable (by *somebody*,
965        not necessarily you), False if it is is immutable. Note that a file
966        might be mutable overall, but your reference to it might be
967        read-only. On the other hand, all references to an immutable file
968        will be read-only; there are no read-write references to an immutable
969        file.
970        """
971
972    def is_unknown():
973        """Return True if this is an unknown node."""
974
975    def is_allowed_in_immutable_directory():
976        """Return True if this node is allowed as a child of a deep-immutable
977        directory. This is true if either the node is of a known-immutable type,
978        or it is unknown and read-only.
979        """
980
981    def raise_error():
982        """Raise any error associated with this node."""
983
984    # XXX: These may not be appropriate outside the context of an IReadable.
985    def get_size():
986        """Return the length (in bytes) of the data this node represents. For
987        directory nodes, I return the size of the backing store. I return
988        synchronously and do not consult the network, so for mutable objects,
989        I will return the most recently observed size for the object, or None
990        if I don't remember a size. Use get_current_size, which returns a
991        Deferred, if you want more up-to-date information."""
992
993    def get_current_size():
994        """I return a Deferred that fires with the length (in bytes) of the
995        data this node represents.
996        """
997
998
999class IFileNode(IFilesystemNode):
1000    """I am a node that represents a file: a sequence of bytes. I am not a
1001    container, like IDirectoryNode."""
1002    def get_best_readable_version():
1003        """Return a Deferred that fires with an IReadable for the 'best'
1004        available version of the file. The IReadable provides only read
1005        access, even if this filenode was derived from a write cap.
1006
1007        For an immutable file, there is only one version. For a mutable
1008        file, the 'best' version is the recoverable version with the
1009        highest sequence number. If no uncoordinated writes have occurred,
1010        and if enough shares are available, then this will be the most
1011        recent version that has been uploaded. If no version is recoverable,
1012        the Deferred will errback with an UnrecoverableFileError.
1013        """
1014
1015    def download_best_version():
1016        """Download the contents of the version that would be returned
1017        by get_best_readable_version(). This is equivalent to calling
1018        download_to_data() on the IReadable given by that method.
1019
1020        I return a Deferred that fires with a byte string when the file
1021        has been fully downloaded. To support streaming download, use
1022        the 'read' method of IReadable. If no version is recoverable,
1023        the Deferred will errback with an UnrecoverableFileError.
1024        """
1025
1026    def get_size_of_best_version():
1027        """Find the size of the version that would be returned by
1028        get_best_readable_version().
1029
1030        I return a Deferred that fires with an integer. If no version
1031        is recoverable, the Deferred will errback with an
1032        UnrecoverableFileError.
1033        """
1034
1035
1036class IImmutableFileNode(IFileNode, IReadable):
1037    """I am a node representing an immutable file. Immutable files have
1038    only one version"""
1039
1040
1041class IMutableFileNode(IFileNode):
1042    """I provide access to a 'mutable file', which retains its identity
1043    regardless of what contents are put in it.
1044
1045    The consistency-vs-availability problem means that there might be
1046    multiple versions of a file present in the grid, some of which might be
1047    unrecoverable (i.e. have fewer than 'k' shares). These versions are
1048    loosely ordered: each has a sequence number and a hash, and any version
1049    with seqnum=N was uploaded by a node that has seen at least one version
1050    with seqnum=N-1.
1051
1052    The 'servermap' (an instance of IMutableFileServerMap) is used to
1053    describe the versions that are known to be present in the grid, and which
1054    servers are hosting their shares. It is used to represent the 'state of
1055    the world', and is used for this purpose by my test-and-set operations.
1056    Downloading the contents of the mutable file will also return a
1057    servermap. Uploading a new version into the mutable file requires a
1058    servermap as input, and the semantics of the replace operation is
1059    'replace the file with my new version if it looks like nobody else has
1060    changed the file since my previous download'. Because the file is
1061    distributed, this is not a perfect test-and-set operation, but it will do
1062    its best. If the replace process sees evidence of a simultaneous write,
1063    it will signal an UncoordinatedWriteError, so that the caller can take
1064    corrective action.
1065
1066
1067    Most readers will want to use the 'best' current version of the file, and
1068    should use my 'download_best_version()' method.
1069
1070    To unconditionally replace the file, callers should use overwrite(). This
1071    is the mode that user-visible mutable files will probably use.
1072
1073    To apply some delta to the file, call modify() with a callable modifier
1074    function that can apply the modification that you want to make. This is
1075    the mode that dirnodes will use, since most directory modification
1076    operations can be expressed in terms of deltas to the directory state.
1077
1078
1079    Three methods are available for users who need to perform more complex
1080    operations. The first is get_servermap(), which returns an up-to-date
1081    servermap using a specified mode. The second is download_version(), which
1082    downloads a specific version (not necessarily the 'best' one). The third
1083    is 'upload', which accepts new contents and a servermap (which must have
1084    been updated with MODE_WRITE). The upload method will attempt to apply
1085    the new contents as long as no other node has modified the file since the
1086    servermap was updated. This might be useful to a caller who wants to
1087    merge multiple versions into a single new one.
1088
1089    Note that each time the servermap is updated, a specific 'mode' is used,
1090    which determines how many peers are queried. To use a servermap for my
1091    replace() method, that servermap must have been updated in MODE_WRITE.
1092    These modes are defined in allmydata.mutable.common, and consist of
1093    MODE_READ, MODE_WRITE, MODE_ANYTHING, and MODE_CHECK. Please look in
1094    allmydata/mutable/servermap.py for details about the differences.
1095
1096    Mutable files are currently limited in size (about 3.5MB max) and can
1097    only be retrieved and updated all-at-once, as a single big string. Future
1098    versions of our mutable files will remove this restriction.
1099    """
1100    def get_best_mutable_version():
1101        """Return a Deferred that fires with an IMutableFileVersion for
1102        the 'best' available version of the file. The best version is
1103        the recoverable version with the highest sequence number. If no
1104        uncoordinated writes have occurred, and if enough shares are
1105        available, then this will be the most recent version that has
1106        been uploaded.
1107
1108        If no version is recoverable, the Deferred will errback with an
1109        UnrecoverableFileError.
1110        """
1111
1112    def overwrite(new_contents):
1113        """Unconditionally replace the contents of the mutable file with new
1114        ones. This simply chains get_servermap(MODE_WRITE) and upload(). This
1115        is only appropriate to use when the new contents of the file are
1116        completely unrelated to the old ones, and you do not care about other
1117        clients' changes.
1118
1119        I return a Deferred that fires (with a PublishStatus object) when the
1120        update has completed.
1121        """
1122
1123    def modify(modifier_cb):
1124        """Modify the contents of the file, by downloading the current
1125        version, applying the modifier function (or bound method), then
1126        uploading the new version. I return a Deferred that fires (with a
1127        PublishStatus object) when the update is complete.
1128
1129        The modifier callable will be given three arguments: a string (with
1130        the old contents), a 'first_time' boolean, and a servermap. As with
1131        download_best_version(), the old contents will be from the best
1132        recoverable version, but the modifier can use the servermap to make
1133        other decisions (such as refusing to apply the delta if there are
1134        multiple parallel versions, or if there is evidence of a newer
1135        unrecoverable version). 'first_time' will be True the first time the
1136        modifier is called, and False on any subsequent calls.
1137
1138        The callable should return a string with the new contents. The
1139        callable must be prepared to be called multiple times, and must
1140        examine the input string to see if the change that it wants to make
1141        is already present in the old version. If it does not need to make
1142        any changes, it can either return None, or return its input string.
1143
1144        If the modifier raises an exception, it will be returned in the
1145        errback.
1146        """
1147
1148    def get_servermap(mode):
1149        """Return a Deferred that fires with an IMutableFileServerMap
1150        instance, updated using the given mode.
1151        """
1152
1153    def download_version(servermap, version):
1154        """Download a specific version of the file, using the servermap
1155        as a guide to where the shares are located.
1156
1157        I return a Deferred that fires with the requested contents, or
1158        errbacks with UnrecoverableFileError. Note that a servermap that was
1159        updated with MODE_ANYTHING or MODE_READ may not know about shares for
1160        all versions (those modes stop querying servers as soon as they can
1161        fulfil their goals), so you may want to use MODE_CHECK (which checks
1162        everything) to get increased visibility.
1163        """
1164
1165    def upload(new_contents, servermap):
1166        """Replace the contents of the file with new ones. This requires a
1167        servermap that was previously updated with MODE_WRITE.
1168
1169        I attempt to provide test-and-set semantics, in that I will avoid
1170        modifying any share that is different than the version I saw in the
1171        servermap. However, if another node is writing to the file at the
1172        same time as me, I may manage to update some shares while they update
1173        others. If I see any evidence of this, I will signal
1174        UncoordinatedWriteError, and the file will be left in an inconsistent
1175        state (possibly the version you provided, possibly the old version,
1176        possibly somebody else's version, and possibly a mix of shares from
1177        all of these).
1178
1179        The recommended response to UncoordinatedWriteError is to either
1180        return it to the caller (since they failed to coordinate their
1181        writes), or to attempt some sort of recovery. It may be sufficient to
1182        wait a random interval (with exponential backoff) and repeat your
1183        operation. If I do not signal UncoordinatedWriteError, then I was
1184        able to write the new version without incident.
1185
1186        I return a Deferred that fires (with a PublishStatus object) when the
1187        publish has completed. I will update the servermap in-place with the
1188        location of all new shares.
1189        """
1190
1191    def get_writekey():
1192        """Return this filenode's writekey, or None if the node does not have
1193        write-capability. This may be used to assist with data structures
1194        that need to make certain data available only to writers, such as the
1195        read-write child caps in dirnodes. The recommended process is to have
1196        reader-visible data be submitted to the filenode in the clear (where
1197        it will be encrypted by the filenode using the readkey), but encrypt
1198        writer-visible data using this writekey.
1199        """
1200
1201    def get_version():
1202        """Returns the mutable file protocol version."""
1203
1204
1205class NotEnoughSharesError(Exception):
1206    """Download was unable to get enough shares"""
1207
1208class NoSharesError(Exception):
1209    """Download was unable to get any shares at all."""
1210
1211class DownloadStopped(Exception):
1212    pass
1213
1214class UploadUnhappinessError(Exception):
1215    """Upload was unable to satisfy 'servers_of_happiness'"""
1216
1217class UnableToFetchCriticalDownloadDataError(Exception):
1218    """I was unable to fetch some piece of critical data that is supposed to
1219    be identically present in all shares."""
1220
1221class NoServersError(Exception):
1222    """Upload wasn't given any servers to work with, usually indicating a
1223    network or Introducer problem."""
1224
1225class ExistingChildError(Exception):
1226    """A directory node was asked to add or replace a child that already
1227    exists, and overwrite= was set to False."""
1228
1229class NoSuchChildError(Exception):
1230    """A directory node was asked to fetch a child that does not exist."""
1231    def __str__(self):
1232        # avoid UnicodeEncodeErrors when converting to str
1233        return self.__repr__()
1234
1235class ChildOfWrongTypeError(Exception):
1236    """An operation was attempted on a child of the wrong type (file or directory)."""
1237
1238
1239class IDirectoryNode(IFilesystemNode):
1240    """I represent a filesystem node that is a container, with a
1241    name-to-child mapping, holding the tahoe equivalent of a directory. All
1242    child names are unicode strings, and all children are some sort of
1243    IFilesystemNode (a file, subdirectory, or unknown node).
1244    """
1245
1246    def get_uri():
1247        """
1248        The dirnode ('1') URI returned by this method can be used in
1249        set_uri() on a different directory ('2') to 'mount' a reference to
1250        this directory ('1') under the other ('2'). This URI is just a
1251        string, so it can be passed around through email or other out-of-band
1252        protocol.
1253        """
1254
1255    def get_readonly_uri():
1256        """
1257        The dirnode ('1') URI returned by this method can be used in
1258        set_uri() on a different directory ('2') to 'mount' a reference to
1259        this directory ('1') under the other ('2'). This URI is just a
1260        string, so it can be passed around through email or other out-of-band
1261        protocol.
1262        """
1263
1264    def list():
1265        """I return a Deferred that fires with a dictionary mapping child
1266        name (a unicode string) to (node, metadata_dict) tuples, in which
1267        'node' is an IFilesystemNode and 'metadata_dict' is a dictionary of
1268        metadata."""
1269
1270    def has_child(name):
1271        """I return a Deferred that fires with a boolean, True if there
1272        exists a child of the given name, False if not. The child name must
1273        be a unicode string."""
1274
1275    def get(name):
1276        """I return a Deferred that fires with a specific named child node,
1277        which is an IFilesystemNode. The child name must be a unicode string.
1278        I raise NoSuchChildError if I do not have a child by that name."""
1279
1280    def get_metadata_for(name):
1281        """I return a Deferred that fires with the metadata dictionary for
1282        a specific named child node. The child name must be a unicode string.
1283        This metadata is stored in the *edge*, not in the child, so it is
1284        attached to the parent dirnode rather than the child node.
1285        I raise NoSuchChildError if I do not have a child by that name."""
1286
1287    def set_metadata_for(name, metadata):
1288        """I replace any existing metadata for the named child with the new
1289        metadata. The child name must be a unicode string. This metadata is
1290        stored in the *edge*, not in the child, so it is attached to the
1291        parent dirnode rather than the child node. I return a Deferred
1292        (that fires with this dirnode) when the operation is complete.
1293        I raise NoSuchChildError if I do not have a child by that name."""
1294
1295    def get_child_at_path(path):
1296        """Transform a child path into an IFilesystemNode.
1297
1298        I perform a recursive series of 'get' operations to find the named
1299        descendant node. I return a Deferred that fires with the node, or
1300        errbacks with NoSuchChildError if the node could not be found.
1301
1302        The path can be either a single string (slash-separated) or a list of
1303        path-name elements. All elements must be unicode strings.
1304        """
1305
1306    def get_child_and_metadata_at_path(path):
1307        """Transform a child path into an IFilesystemNode and metadata.
1308
1309        I am like get_child_at_path(), but my Deferred fires with a tuple of
1310        (node, metadata). The metadata comes from the last edge. If the path
1311        is empty, the metadata will be an empty dictionary.
1312        """
1313
1314    def set_uri(name, writecap, readcap=None, metadata=None, overwrite=True):
1315        """I add a child (by writecap+readcap) at the specific name. I return
1316        a Deferred that fires when the operation finishes. If overwrite= is
1317        True, I will replace any existing child of the same name, otherwise
1318        an existing child will cause me to return ExistingChildError. The
1319        child name must be a unicode string.
1320
1321        The child caps could be for a file, or for a directory. If you have
1322        both the writecap and readcap, you should provide both arguments.
1323        If you have only one cap and don't know whether it is read-only,
1324        provide it as the writecap argument and leave the readcap as None.
1325        If you have only one cap that is known to be read-only, provide it
1326        as the readcap argument and leave the writecap as None.
1327        The filecaps are typically obtained from an IFilesystemNode with
1328        get_uri() and get_readonly_uri().
1329
1330        If metadata= is provided, I will use it as the metadata for the named
1331        edge. This will replace any existing metadata. If metadata= is left
1332        as the default value of None, I will set ['mtime'] to the current
1333        time, and I will set ['ctime'] to the current time if there was not
1334        already a child by this name present. This roughly matches the
1335        ctime/mtime semantics of traditional filesystems.  See the
1336        "About the metadata" section of webapi.txt for futher information.
1337
1338        If this directory node is read-only, the Deferred will errback with a
1339        NotWriteableError."""
1340
1341    def set_children(entries, overwrite=True):
1342        """Add multiple children (by writecap+readcap) to a directory node.
1343        Takes a dictionary, with childname as keys and (writecap, readcap)
1344        tuples (or (writecap, readcap, metadata) triples) as values. Returns
1345        a Deferred that fires (with this dirnode) when the operation
1346        finishes. This is equivalent to calling set_uri() multiple times, but
1347        is much more efficient. All child names must be unicode strings.
1348        """
1349
1350    def set_node(name, child, metadata=None, overwrite=True):
1351        """I add a child at the specific name. I return a Deferred that fires
1352        when the operation finishes. This Deferred will fire with the child
1353        node that was just added. I will replace any existing child of the
1354        same name. The child name must be a unicode string. The 'child'
1355        instance must be an instance providing IFilesystemNode.
1356
1357        If metadata= is provided, I will use it as the metadata for the named
1358        edge. This will replace any existing metadata. If metadata= is left
1359        as the default value of None, I will set ['mtime'] to the current
1360        time, and I will set ['ctime'] to the current time if there was not
1361        already a child by this name present. This roughly matches the
1362        ctime/mtime semantics of traditional filesystems. See the
1363        "About the metadata" section of webapi.txt for futher information.
1364
1365        If this directory node is read-only, the Deferred will errback with a
1366        NotWriteableError."""
1367
1368    def set_nodes(entries, overwrite=True):
1369        """Add multiple children to a directory node. Takes a dict mapping
1370        unicode childname to (child_node, metdata) tuples. If metdata=None,
1371        the original metadata is left unmodified. Returns a Deferred that
1372        fires (with this dirnode) when the operation finishes. This is
1373        equivalent to calling set_node() multiple times, but is much more
1374        efficient."""
1375
1376    def add_file(name, uploadable, metadata=None, overwrite=True):
1377        """I upload a file (using the given IUploadable), then attach the
1378        resulting ImmutableFileNode to the directory at the given name. I set
1379        metadata the same way as set_uri and set_node. The child name must be
1380        a unicode string.
1381
1382        I return a Deferred that fires (with the IFileNode of the uploaded
1383        file) when the operation completes."""
1384
1385    def delete(name, must_exist=True, must_be_directory=False, must_be_file=False):
1386        """I remove the child at the specific name. I return a Deferred that
1387        fires when the operation finishes. The child name must be a unicode
1388        string. If must_exist is True and I do not have a child by that name,
1389        I raise NoSuchChildError. If must_be_directory is True and the child
1390        is a file, or if must_be_file is True and the child is a directory,
1391        I raise ChildOfWrongTypeError."""
1392
1393    def create_subdirectory(name, initial_children=None, overwrite=True,
1394                            mutable=True, mutable_version=None, metadata=None):
1395        """I create and attach a directory at the given name. The new
1396        directory can be empty, or it can be populated with children
1397        according to 'initial_children', which takes a dictionary in the same
1398        format as set_nodes (i.e. mapping unicode child name to (childnode,
1399        metadata) tuples). The child name must be a unicode string. I return
1400        a Deferred that fires (with the new directory node) when the
1401        operation finishes."""
1402
1403    def move_child_to(current_child_name, new_parent, new_child_name=None,
1404                      overwrite=True):
1405        """I take one of my children and move them to a new parent. The child
1406        is referenced by name. On the new parent, the child will live under
1407        'new_child_name', which defaults to 'current_child_name'. TODO: what
1408        should we do about metadata? I return a Deferred that fires when the
1409        operation finishes. The child name must be a unicode string. I raise
1410        NoSuchChildError if I do not have a child by that name."""
1411
1412    def build_manifest():
1413        """I generate a table of everything reachable from this directory.
1414        I also compute deep-stats as described below.
1415
1416        I return a Monitor. The Monitor's results will be a dictionary with
1417        four elements:
1418
1419         res['manifest']: a list of (path, cap) tuples for all nodes
1420                          (directories and files) reachable from this one.
1421                          'path' will be a tuple of unicode strings. The
1422                          origin dirnode will be represented by an empty path
1423                          tuple.
1424         res['verifycaps']: a list of (printable) verifycap strings, one for
1425                            each reachable non-LIT node. This is a set:
1426                            it will contain no duplicates.
1427         res['storage-index']: a list of (base32) storage index strings,
1428                               one for each reachable non-LIT node. This is
1429                               a set: it will contain no duplicates.
1430         res['stats']: a dictionary, the same that is generated by
1431                       start_deep_stats() below.
1432
1433        The Monitor will also have an .origin_si attribute with the (binary)
1434        storage index of the starting point.
1435        """
1436
1437    def start_deep_stats():
1438        """Return a Monitor, examining all nodes (directories and files)
1439        reachable from this one. The Monitor's results will be a dictionary
1440        with the following keys::
1441
1442           count-immutable-files: count of how many CHK files are in the set
1443           count-mutable-files: same, for mutable files (does not include
1444                                directories)
1445           count-literal-files: same, for LIT files
1446           count-files: sum of the above three
1447
1448           count-directories: count of directories
1449
1450           size-immutable-files: total bytes for all CHK files in the set
1451           size-mutable-files (TODO): same, for current version of all mutable
1452                                      files, does not include directories
1453           size-literal-files: same, for LIT files
1454           size-directories: size of mutable files used by directories
1455
1456           largest-directory: number of bytes in the largest directory
1457           largest-directory-children: number of children in the largest
1458                                       directory
1459           largest-immutable-file: number of bytes in the largest CHK file
1460
1461        size-mutable-files is not yet implemented, because it would involve
1462        even more queries than deep_stats does.
1463
1464        The Monitor will also have an .origin_si attribute with the (binary)
1465        storage index of the starting point.
1466
1467        This operation will visit every directory node underneath this one,
1468        and can take a long time to run. On a typical workstation with good
1469        bandwidth, this can examine roughly 15 directories per second (and
1470        takes several minutes of 100% CPU for ~1700 directories).
1471        """
1472
1473
1474class ICodecEncoder(Interface):
1475    def set_params(data_size, required_shares, max_shares):
1476        """Set up the parameters of this encoder.
1477
1478        This prepares the encoder to perform an operation that converts a
1479        single block of data into a number of shares, such that a future
1480        ICodecDecoder can use a subset of these shares to recover the
1481        original data. This operation is invoked by calling encode(). Once
1482        the encoding parameters are set up, the encode operation can be
1483        invoked multiple times.
1484
1485        set_params() prepares the encoder to accept blocks of input data that
1486        are exactly 'data_size' bytes in length. The encoder will be prepared
1487        to produce 'max_shares' shares for each encode() operation (although
1488        see the 'desired_share_ids' to use less CPU). The encoding math will
1489        be chosen such that the decoder can get by with as few as
1490        'required_shares' of these shares and still reproduce the original
1491        data. For example, set_params(1000, 5, 5) offers no redundancy at
1492        all, whereas set_params(1000, 1, 10) provides 10x redundancy.
1493
1494        Numerical Restrictions: 'data_size' is required to be an integral
1495        multiple of 'required_shares'. In general, the caller should choose
1496        required_shares and max_shares based upon their reliability
1497        requirements and the number of peers available (the total storage
1498        space used is roughly equal to max_shares*data_size/required_shares),
1499        then choose data_size to achieve the memory footprint desired (larger
1500        data_size means more efficient operation, smaller data_size means
1501        smaller memory footprint).
1502
1503        In addition, 'max_shares' must be equal to or greater than
1504        'required_shares'. Of course, setting them to be equal causes
1505        encode() to degenerate into a particularly slow form of the 'split'
1506        utility.
1507
1508        See encode() for more details about how these parameters are used.
1509
1510        set_params() must be called before any other ICodecEncoder methods
1511        may be invoked.
1512        """
1513
1514    def get_params():
1515        """Return the 3-tuple of data_size, required_shares, max_shares"""
1516
1517    def get_encoder_type():
1518        """Return a short string that describes the type of this encoder.
1519
1520        There is required to be a global table of encoder classes. This method
1521        returns an index into this table; the value at this index is an
1522        encoder class, and this encoder is an instance of that class.
1523        """
1524
1525    def get_block_size():
1526        """Return the length of the shares that encode() will produce.
1527        """
1528
1529    def encode_proposal(data, desired_share_ids=None):
1530        """Encode some data.
1531
1532        'data' must be a string (or other buffer object), and len(data) must
1533        be equal to the 'data_size' value passed earlier to set_params().
1534
1535        This will return a Deferred that will fire with two lists. The first
1536        is a list of shares, each of which is a string (or other buffer
1537        object) such that len(share) is the same as what get_share_size()
1538        returned earlier. The second is a list of shareids, in which each is
1539        an integer. The lengths of the two lists will always be equal to each
1540        other. The user should take care to keep each share closely
1541        associated with its shareid, as one is useless without the other.
1542
1543        The length of this output list will normally be the same as the value
1544        provided to the 'max_shares' parameter of set_params(). This may be
1545        different if 'desired_share_ids' is provided.
1546
1547        'desired_share_ids', if provided, is required to be a sequence of
1548        ints, each of which is required to be >= 0 and < max_shares. If not
1549        provided, encode() will produce 'max_shares' shares, as if
1550        'desired_share_ids' were set to range(max_shares). You might use this
1551        if you initially thought you were going to use 10 peers, started
1552        encoding, and then two of the peers dropped out: you could use
1553        desired_share_ids= to skip the work (both memory and CPU) of
1554        producing shares for the peers that are no longer available.
1555
1556        """
1557
1558    def encode(inshares, desired_share_ids=None):
1559        """Encode some data. This may be called multiple times. Each call is
1560        independent.
1561
1562        inshares is a sequence of length required_shares, containing buffers
1563        (i.e. strings), where each buffer contains the next contiguous
1564        non-overlapping segment of the input data. Each buffer is required to
1565        be the same length, and the sum of the lengths of the buffers is
1566        required to be exactly the data_size promised by set_params(). (This
1567        implies that the data has to be padded before being passed to
1568        encode(), unless of course it already happens to be an even multiple
1569        of required_shares in length.)
1570
1571        Note: the requirement to break up your data into
1572        'required_shares' chunks of exactly the right length before
1573        calling encode() is surprising from point of view of a user
1574        who doesn't know how FEC works. It feels like an
1575        implementation detail that has leaked outside the abstraction
1576        barrier. Is there a use case in which the data to be encoded
1577        might already be available in pre-segmented chunks, such that
1578        it is faster or less work to make encode() take a list rather
1579        than splitting a single string?
1580
1581        Yes, there is: suppose you are uploading a file with K=64,
1582        N=128, segsize=262,144. Then each in-share will be of size
1583        4096. If you use this .encode() API then your code could first
1584        read each successive 4096-byte chunk from the file and store
1585        each one in a Python string and store each such Python string
1586        in a Python list. Then you could call .encode(), passing that
1587        list as "inshares". The encoder would generate the other 64
1588        "secondary shares" and return to you a new list containing
1589        references to the same 64 Python strings that you passed in
1590        (as the primary shares) plus references to the new 64 Python
1591        strings.
1592
1593        (You could even imagine that your code could use readv() so
1594        that the operating system can arrange to get all of those
1595        bytes copied from the file into the Python list of Python
1596        strings as efficiently as possible instead of having a loop
1597        written in C or in Python to copy the next part of the file
1598        into the next string.)
1599
1600        On the other hand if you instead use the .encode_proposal()
1601        API (above), then your code can first read in all of the
1602        262,144 bytes of the segment from the file into a Python
1603        string, then call .encode_proposal() passing the segment data
1604        as the "data" argument. The encoder would basically first
1605        split the "data" argument into a list of 64 in-shares of 4096
1606        byte each, and then do the same thing that .encode() does. So
1607        this would result in a little bit more copying of data and a
1608        little bit higher of a "maximum memory usage" during the
1609        process, although it might or might not make a practical
1610        difference for our current use cases.
1611
1612        Note that "inshares" is a strange name for the parameter if
1613        you think of the parameter as being just for feeding in data
1614        to the codec. It makes more sense if you think of the result
1615        of this encoding as being the set of shares from inshares plus
1616        an extra set of "secondary shares" (or "check shares"). It is
1617        a surprising name! If the API is going to be surprising then
1618        the name should be surprising. If we switch to
1619        encode_proposal() above then we should also switch to an
1620        unsurprising name.
1621
1622        'desired_share_ids', if provided, is required to be a sequence of
1623        ints, each of which is required to be >= 0 and < max_shares. If not
1624        provided, encode() will produce 'max_shares' shares, as if
1625        'desired_share_ids' were set to range(max_shares). You might use this
1626        if you initially thought you were going to use 10 peers, started
1627        encoding, and then two of the peers dropped out: you could use
1628        desired_share_ids= to skip the work (both memory and CPU) of
1629        producing shares for the peers that are no longer available.
1630
1631        For each call, encode() will return a Deferred that fires with two
1632        lists, one containing shares and the other containing the shareids.
1633        The get_share_size() method can be used to determine the length of
1634        the share strings returned by encode(). Each shareid is a small
1635        integer, exactly as passed into 'desired_share_ids' (or
1636        range(max_shares), if desired_share_ids was not provided).
1637
1638        The shares and their corresponding shareids are required to be kept
1639        together during storage and retrieval. Specifically, the share data is
1640        useless by itself: the decoder needs to be told which share is which
1641        by providing it with both the shareid and the actual share data.
1642
1643        This function will allocate an amount of memory roughly equal to::
1644
1645         (max_shares - required_shares) * get_share_size()
1646
1647        When combined with the memory that the caller must allocate to
1648        provide the input data, this leads to a memory footprint roughly
1649        equal to the size of the resulting encoded shares (i.e. the expansion
1650        factor times the size of the input segment).
1651        """
1652
1653        # rejected ideas:
1654        #
1655        #  returning a list of (shareidN,shareN) tuples instead of a pair of
1656        #  lists (shareids..,shares..). Brian thought the tuples would
1657        #  encourage users to keep the share and shareid together throughout
1658        #  later processing, Zooko pointed out that the code to iterate
1659        #  through two lists is not really more complicated than using a list
1660        #  of tuples and there's also a performance improvement
1661        #
1662        #  having 'data_size' not required to be an integral multiple of
1663        #  'required_shares'. Doing this would require encode() to perform
1664        #  padding internally, and we'd prefer to have any padding be done
1665        #  explicitly by the caller. Yes, it is an abstraction leak, but
1666        #  hopefully not an onerous one.
1667
1668
1669class ICodecDecoder(Interface):
1670    def set_params(data_size, required_shares, max_shares):
1671        """Set the params. They have to be exactly the same ones that were
1672        used for encoding."""
1673
1674    def get_needed_shares():
1675        """Return the number of shares needed to reconstruct the data.
1676        set_params() is required to be called before this."""
1677
1678    def decode(some_shares, their_shareids):
1679        """Decode a partial list of shares into data.
1680
1681        'some_shares' is required to be a sequence of buffers of sharedata, a
1682        subset of the shares returned by ICodecEncode.encode(). Each share is
1683        required to be of the same length.  The i'th element of their_shareids
1684        is required to be the shareid of the i'th buffer in some_shares.
1685
1686        This returns a Deferred that fires with a sequence of buffers. This
1687        sequence will contain all of the segments of the original data, in
1688        order. The sum of the lengths of all of the buffers will be the
1689        'data_size' value passed into the original ICodecEncode.set_params()
1690        call. To get back the single original input block of data, use
1691        ''.join(output_buffers), or you may wish to simply write them in
1692        order to an output file.
1693
1694        Note that some of the elements in the result sequence may be
1695        references to the elements of the some_shares input sequence. In
1696        particular, this means that if those share objects are mutable (e.g.
1697        arrays) and if they are changed, then both the input (the
1698        'some_shares' parameter) and the output (the value given when the
1699        deferred is triggered) will change.
1700
1701        The length of 'some_shares' is required to be exactly the value of
1702        'required_shares' passed into the original ICodecEncode.set_params()
1703        call.
1704        """
1705
1706
1707class IEncoder(Interface):
1708    """I take an object that provides IEncryptedUploadable, which provides
1709    encrypted data, and a list of shareholders. I then encode, hash, and
1710    deliver shares to those shareholders. I will compute all the necessary
1711    Merkle hash trees that are necessary to validate the crypttext that
1712    eventually comes back from the shareholders. I provide the URI Extension
1713    Block Hash, and the encoding parameters, both of which must be included
1714    in the URI.
1715
1716    I do not choose shareholders, that is left to the IUploader. I must be
1717    given a dict of RemoteReferences to storage buckets that are ready and
1718    willing to receive data.
1719    """
1720
1721    def set_encrypted_uploadable(u):
1722        """Provide a source of encrypted upload data. 'u' must implement
1723        IEncryptedUploadable.
1724
1725        When this is called, the IEncryptedUploadable will be queried for its
1726        length and the storage_index that should be used.
1727
1728        This returns a Deferred that fires with this Encoder instance.
1729
1730        This must be performed before start() can be called.
1731        """
1732
1733    def get_param(name):
1734        """Return an encoding parameter, by name.
1735
1736        'storage_index': return a string with the (16-byte truncated SHA-256
1737                         hash) storage index to which these shares should be
1738                         pushed.
1739
1740        'share_counts': return a tuple describing how many shares are used:
1741                        (needed_shares, servers_of_happiness, total_shares)
1742
1743        'num_segments': return an int with the number of segments that
1744                        will be encoded.
1745
1746        'segment_size': return an int with the size of each segment.
1747
1748        'block_size': return the size of the individual blocks that will
1749                      be delivered to a shareholder's put_block() method. By
1750                      knowing this, the shareholder will be able to keep all
1751                      blocks in a single file and still provide random access
1752                      when reading them. # TODO: can we avoid exposing this?
1753
1754        'share_size': an int with the size of the data that will be stored
1755                      on each shareholder. This is aggregate amount of data
1756                      that will be sent to the shareholder, summed over all
1757                      the put_block() calls I will ever make. It is useful to
1758                      determine this size before asking potential
1759                      shareholders whether they will grant a lease or not,
1760                      since their answers will depend upon how much space we
1761                      need. TODO: this might also include some amount of
1762                      overhead, like the size of all the hashes. We need to
1763                      decide whether this is useful or not.
1764
1765        'serialized_params': a string with a concise description of the
1766                             codec name and its parameters. This may be passed
1767                             into the IUploadable to let it make sure that
1768                             the same file encoded with different parameters
1769                             will result in different storage indexes.
1770
1771        Once this is called, set_size() and set_params() may not be called.
1772        """
1773
1774    def set_shareholders(shareholders, servermap):
1775        """Tell the encoder where to put the encoded shares. 'shareholders'
1776        must be a dictionary that maps share number (an integer ranging from
1777        0 to n-1) to an instance that provides IStorageBucketWriter.
1778        'servermap' is a dictionary that maps share number (as defined above)
1779        to a set of peerids. This must be performed before start() can be
1780        called."""
1781
1782    def start():
1783        """Begin the encode/upload process. This involves reading encrypted
1784        data from the IEncryptedUploadable, encoding it, uploading the shares
1785        to the shareholders, then sending the hash trees.
1786
1787        set_encrypted_uploadable() and set_shareholders() must be called
1788        before this can be invoked.
1789
1790        This returns a Deferred that fires with a verify cap when the upload
1791        process is complete. The verifycap, plus the encryption key, is
1792        sufficient to construct the read cap.
1793        """
1794
1795
1796class IDecoder(Interface):
1797    """I take a list of shareholders and some setup information, then
1798    download, validate, decode, and decrypt data from them, writing the
1799    results to an output file.
1800
1801    I do not locate the shareholders, that is left to the IDownloader. I must
1802    be given a dict of RemoteReferences to storage buckets that are ready to
1803    send data.
1804    """
1805
1806    def setup(outfile):
1807        """I take a file-like object (providing write and close) to which all
1808        the plaintext data will be written.
1809
1810        TODO: producer/consumer . Maybe write() should return a Deferred that
1811        indicates when it will accept more data? But probably having the
1812        IDecoder be a producer is easier to glue to IConsumer pieces.
1813        """
1814
1815    def set_shareholders(shareholders):
1816        """I take a dictionary that maps share identifiers (small integers)
1817        to RemoteReferences that provide RIBucketReader. This must be called
1818        before start()."""
1819
1820    def start():
1821        """I start the download. This process involves retrieving data and
1822        hash chains from the shareholders, using the hashes to validate the
1823        data, decoding the shares into segments, decrypting the segments,
1824        then writing the resulting plaintext to the output file.
1825
1826        I return a Deferred that will fire (with self) when the download is
1827        complete.
1828        """
1829
1830
1831class IDownloadTarget(Interface):
1832    # Note that if the IDownloadTarget is also an IConsumer, the downloader
1833    # will register itself as a producer. This allows the target to invoke
1834    # downloader.pauseProducing, resumeProducing, and stopProducing.
1835    def open(size):
1836        """Called before any calls to write() or close(). If an error
1837        occurs before any data is available, fail() may be called without
1838        a previous call to open().
1839
1840        'size' is the length of the file being downloaded, in bytes."""
1841
1842    def write(data):
1843        """Output some data to the target."""
1844
1845    def close():
1846        """Inform the target that there is no more data to be written."""
1847
1848    def fail(why):
1849        """fail() is called to indicate that the download has failed. 'why'
1850        is a Failure object indicating what went wrong. No further methods
1851        will be invoked on the IDownloadTarget after fail()."""
1852
1853    def register_canceller(cb):
1854        """The CiphertextDownloader uses this to register a no-argument function
1855        that the target can call to cancel the download. Once this canceller
1856        is invoked, no further calls to write() or close() will be made."""
1857
1858    def finish():
1859        """When the CiphertextDownloader is done, this finish() function will be
1860        called. Whatever it returns will be returned to the invoker of
1861        Downloader.download.
1862        """
1863
1864
1865class IDownloader(Interface):
1866    def download(uri, target):
1867        """Perform a CHK download, sending the data to the given target.
1868        'target' must provide IDownloadTarget.
1869
1870        Returns a Deferred that fires (with the results of target.finish)
1871        when the download is finished, or errbacks if something went wrong."""
1872
1873
1874class IEncryptedUploadable(Interface):
1875    def set_upload_status(upload_status):
1876        """Provide an IUploadStatus object that should be filled with status
1877        information. The IEncryptedUploadable is responsible for setting
1878        key-determination progress ('chk'), size, storage_index, and
1879        ciphertext-fetch progress. It may delegate some of this
1880        responsibility to others, in particular to the IUploadable."""
1881
1882    def get_size():
1883        """This behaves just like IUploadable.get_size()."""
1884
1885    def get_all_encoding_parameters():
1886        """Return a Deferred that fires with a tuple of
1887        (k,happy,n,segment_size). The segment_size will be used as-is, and
1888        must match the following constraints: it must be a multiple of k, and
1889        it shouldn't be unreasonably larger than the file size (if
1890        segment_size is larger than filesize, the difference must be stored
1891        as padding).
1892
1893        This usually passes through to the IUploadable method of the same
1894        name.
1895
1896        The encoder strictly obeys the values returned by this method. To
1897        make an upload use non-default encoding parameters, you must arrange
1898        to control the values that this method returns.
1899        """
1900
1901    def get_storage_index():
1902        """Return a Deferred that fires with a 16-byte storage index.
1903        """
1904
1905    def read_encrypted(length, hash_only):
1906        """This behaves just like IUploadable.read(), but returns crypttext
1907        instead of plaintext. If hash_only is True, then this discards the
1908        data (and returns an empty list); this improves efficiency when
1909        resuming an interrupted upload (where we need to compute the
1910        plaintext hashes, but don't need the redundant encrypted data)."""
1911
1912    def close():
1913        """Just like IUploadable.close()."""
1914
1915
1916class IUploadable(Interface):
1917    def set_upload_status(upload_status):
1918        """Provide an IUploadStatus object that should be filled with status
1919        information. The IUploadable is responsible for setting
1920        key-determination progress ('chk')."""
1921
1922    def set_default_encoding_parameters(params):
1923        """Set the default encoding parameters, which must be a dict mapping
1924        strings to ints. The meaningful keys are 'k', 'happy', 'n', and
1925        'max_segment_size'. These might have an influence on the final
1926        encoding parameters returned by get_all_encoding_parameters(), if the
1927        Uploadable doesn't have more specific preferences.
1928
1929        This call is optional: if it is not used, the Uploadable will use
1930        some built-in defaults. If used, this method must be called before
1931        any other IUploadable methods to have any effect.
1932        """
1933
1934    def get_size():
1935        """Return a Deferred that will fire with the length of the data to be
1936        uploaded, in bytes. This will be called before the data is actually
1937        used, to compute encoding parameters.
1938        """
1939
1940    def get_all_encoding_parameters():
1941        """Return a Deferred that fires with a tuple of
1942        (k,happy,n,segment_size). The segment_size will be used as-is, and
1943        must match the following constraints: it must be a multiple of k, and
1944        it shouldn't be unreasonably larger than the file size (if
1945        segment_size is larger than filesize, the difference must be stored
1946        as padding).
1947
1948        The relative values of k and n allow some IUploadables to request
1949        better redundancy than others (in exchange for consuming more space
1950        in the grid).
1951
1952        Larger values of segment_size reduce hash overhead, while smaller
1953        values reduce memory footprint and cause data to be delivered in
1954        smaller pieces (which may provide a smoother and more predictable
1955        download experience).
1956
1957        The encoder strictly obeys the values returned by this method. To
1958        make an upload use non-default encoding parameters, you must arrange
1959        to control the values that this method returns. One way to influence
1960        them may be to call set_encoding_parameters() before calling
1961        get_all_encoding_parameters().
1962        """
1963
1964    def get_encryption_key():
1965        """Return a Deferred that fires with a 16-byte AES key. This key will
1966        be used to encrypt the data. The key will also be hashed to derive
1967        the StorageIndex.
1968
1969        Uploadables that want to achieve convergence should hash their file
1970        contents and the serialized_encoding_parameters to form the key
1971        (which of course requires a full pass over the data). Uploadables can
1972        use the upload.ConvergentUploadMixin class to achieve this
1973        automatically.
1974
1975        Uploadables that do not care about convergence (or do not wish to
1976        make multiple passes over the data) can simply return a
1977        strongly-random 16 byte string.
1978
1979        get_encryption_key() may be called multiple times: the IUploadable is
1980        required to return the same value each time.
1981        """
1982
1983    def read(length):
1984        """Return a Deferred that fires with a list of strings (perhaps with
1985        only a single element) that, when concatenated together, contain the
1986        next 'length' bytes of data. If EOF is near, this may provide fewer
1987        than 'length' bytes. The total number of bytes provided by read()
1988        before it signals EOF must equal the size provided by get_size().
1989
1990        If the data must be acquired through multiple internal read
1991        operations, returning a list instead of a single string may help to
1992        reduce string copies. However, the length of the concatenated strings
1993        must equal the amount of data requested, unless EOF is encountered.
1994        Long reads, or short reads without EOF, are not allowed. read()
1995        should return the same amount of data as a local disk file read, just
1996        in a different shape and asynchronously.
1997
1998        'length' will typically be equal to (min(get_size(),1MB)/req_shares),
1999        so a 10kB file means length=3kB, 100kB file means length=30kB,
2000        and >=1MB file means length=300kB.
2001
2002        This method provides for a single full pass through the data. Later
2003        use cases may desire multiple passes or access to only parts of the
2004        data (such as a mutable file making small edits-in-place). This API
2005        will be expanded once those use cases are better understood.
2006        """
2007
2008    def close():
2009        """The upload is finished, and whatever filehandle was in use may be
2010        closed."""
2011
2012
2013class IMutableUploadable(Interface):
2014    """
2015    I represent content that is due to be uploaded to a mutable filecap.
2016    """
2017    # This is somewhat simpler than the IUploadable interface above
2018    # because mutable files do not need to be concerned with possibly
2019    # generating a CHK, nor with per-file keys. It is a subset of the
2020    # methods in IUploadable, though, so we could just as well implement
2021    # the mutable uploadables as IUploadables that don't happen to use
2022    # those methods (with the understanding that the unused methods will
2023    # never be called on such objects)
2024    def get_size():
2025        """
2026        Returns a Deferred that fires with the size of the content held
2027        by the uploadable.
2028        """
2029
2030    def read(length):
2031        """
2032        Returns a list of strings that, when concatenated, are the next
2033        length bytes of the file, or fewer if there are fewer bytes
2034        between the current location and the end of the file.
2035        """
2036
2037    def close():
2038        """
2039        The process that used the Uploadable is finished using it, so
2040        the uploadable may be closed.
2041        """
2042
2043
2044class IUploadResults(Interface):
2045    """I am returned by immutable upload() methods and contain the results of
2046    the upload.
2047
2048    Note that some of my methods return empty values (0 or an empty dict)
2049    when called for non-distributed LIT files."""
2050
2051    def get_file_size():
2052        """Return the file size, in bytes."""
2053
2054    def get_uri():
2055        """Return the (string) URI of the object uploaded, a CHK readcap."""
2056
2057    def get_ciphertext_fetched():
2058        """Return the number of bytes fetched by the helpe for this upload,
2059        or 0 if the helper did not need to fetch any bytes (or if there was
2060        no helper)."""
2061
2062    def get_preexisting_shares():
2063        """Return the number of shares that were already present in the grid."""
2064
2065    def get_pushed_shares():
2066        """Return the number of shares that were uploaded."""
2067
2068    def get_sharemap():
2069        """Return a dict mapping share identifier to set of IServer
2070        instances. This indicates which servers were given which shares. For
2071        immutable files, the shareid is an integer (the share number, from 0
2072        to N-1). For mutable files, it is a string of the form
2073        'seq%d-%s-sh%d', containing the sequence number, the roothash, and
2074        the share number."""
2075
2076    def get_servermap():
2077        """Return dict mapping IServer instance to a set of share numbers."""
2078
2079    def get_timings():
2080        """Return dict of timing information, mapping name to seconds. All
2081        times are floats:
2082          total : total upload time, start to finish
2083          storage_index : time to compute the storage index
2084          peer_selection : time to decide which peers will be used
2085          contacting_helper : initial helper query to upload/no-upload decision
2086          helper_total : initial helper query to helper finished pushing
2087          cumulative_fetch : helper waiting for ciphertext requests
2088          total_fetch : helper start to last ciphertext response
2089          cumulative_encoding : just time spent in zfec
2090          cumulative_sending : just time spent waiting for storage servers
2091          hashes_and_close : last segment push to shareholder close
2092          total_encode_and_push : first encode to shareholder close
2093        """
2094
2095    def get_uri_extension_data():
2096        """Return the dict of UEB data created for this file."""
2097
2098    def get_verifycapstr():
2099        """Return the (string) verify-cap URI for the uploaded object."""
2100
2101
2102class IDownloadResults(Interface):
2103    """I am created internally by download() methods. I contain a number of
2104    public attributes that contain details about the download process.::
2105
2106     .file_size : the size of the file, in bytes
2107     .servers_used : set of server peerids that were used during download
2108     .server_problems : dict mapping server peerid to a problem string. Only
2109                        servers that had problems (bad hashes, disconnects)
2110                        are listed here.
2111     .servermap : dict mapping server peerid to a set of share numbers. Only
2112                  servers that had any shares are listed here.
2113     .timings : dict of timing information, mapping name to seconds (float)
2114       peer_selection : time to ask servers about shares
2115       servers_peer_selection : dict of peerid to DYHB-query time
2116       uri_extension : time to fetch a copy of the URI extension block
2117       hashtrees : time to fetch the hash trees
2118       segments : time to fetch, decode, and deliver segments
2119       cumulative_fetch : time spent waiting for storage servers
2120       cumulative_decode : just time spent in zfec
2121       cumulative_decrypt : just time spent in decryption
2122       total : total download time, start to finish
2123       fetch_per_server : dict of server to list of per-segment fetch times
2124    """
2125
2126
2127class IUploader(Interface):
2128    def upload(uploadable):
2129        """Upload the file. 'uploadable' must impement IUploadable. This
2130        returns a Deferred that fires with an IUploadResults instance, from
2131        which the URI of the file can be obtained as results.uri ."""
2132
2133
2134class ICheckable(Interface):
2135    def check(monitor, verify=False, add_lease=False):
2136        """Check up on my health, optionally repairing any problems.
2137
2138        This returns a Deferred that fires with an instance that provides
2139        ICheckResults, or None if the object is non-distributed (i.e. LIT
2140        files).
2141
2142        The monitor will be checked periodically to see if the operation has
2143        been cancelled. If so, no new queries will be sent, and the Deferred
2144        will fire (with a OperationCancelledError) immediately.
2145
2146        Filenodes and dirnodes (which provide IFilesystemNode) are also
2147        checkable. Instances that represent verifier-caps will be checkable
2148        but not downloadable. Some objects (like LIT files) do not actually
2149        live in the grid, and their checkers return None (non-distributed
2150        files are always healthy).
2151
2152        If verify=False, a relatively lightweight check will be performed: I
2153        will ask all servers if they have a share for me, and I will believe
2154        whatever they say. If there are at least N distinct shares on the
2155        grid, my results will indicate r.is_healthy()==True. This requires a
2156        roundtrip to each server, but does not transfer very much data, so
2157        the network bandwidth is fairly low.
2158
2159        If verify=True, a more resource-intensive check will be performed:
2160        every share will be downloaded, and the hashes will be validated on
2161        every bit. I will ignore any shares that failed their hash checks. If
2162        there are at least N distinct valid shares on the grid, my results
2163        will indicate r.is_healthy()==True. This requires N/k times as much
2164        download bandwidth (and server disk IO) as a regular download. If a
2165        storage server is holding a corrupt share, or is experiencing memory
2166        failures during retrieval, or is malicious or buggy, then
2167        verification will detect the problem, but checking will not.
2168
2169        If add_lease=True, I will ensure that an up-to-date lease is present
2170        on each share. The lease secrets will be derived from by node secret
2171        (in BASEDIR/private/secret), so either I will add a new lease to the
2172        share, or I will merely renew the lease that I already had. In a
2173        future version of the storage-server protocol (once Accounting has
2174        been implemented), there may be additional options here to define the
2175        kind of lease that is obtained (which account number to claim, etc).
2176
2177        TODO: any problems seen during checking will be reported to the
2178        health-manager.furl, a centralized object that is responsible for
2179        figuring out why files are unhealthy so corrective action can be
2180        taken.
2181        """
2182
2183    def check_and_repair(monitor, verify=False, add_lease=False):
2184        """Like check(), but if the file/directory is not healthy, attempt to
2185        repair the damage.
2186
2187        Any non-healthy result will cause an immediate repair operation, to
2188        generate and upload new shares. After repair, the file will be as
2189        healthy as we can make it. Details about what sort of repair is done
2190        will be put in the check-and-repair results. The Deferred will not
2191        fire until the repair is complete.
2192
2193        This returns a Deferred that fires with an instance of
2194        ICheckAndRepairResults."""
2195
2196
2197class IDeepCheckable(Interface):
2198    def start_deep_check(verify=False, add_lease=False):
2199        """Check upon the health of me and everything I can reach.
2200
2201        This is a recursive form of check(), useable only on dirnodes.
2202
2203        I return a Monitor, with results that are an IDeepCheckResults
2204        object.
2205
2206        TODO: If any of the directories I traverse are unrecoverable, the
2207        Monitor will report failure. If any of the files I check upon are
2208        unrecoverable, those problems will be reported in the
2209        IDeepCheckResults as usual, and the Monitor will not report a
2210        failure.
2211        """
2212
2213    def start_deep_check_and_repair(verify=False, add_lease=False):
2214        """Check upon the health of me and everything I can reach. Repair
2215        anything that isn't healthy.
2216
2217        This is a recursive form of check_and_repair(), useable only on
2218        dirnodes.
2219
2220        I return a Monitor, with results that are an
2221        IDeepCheckAndRepairResults object.
2222
2223        TODO: If any of the directories I traverse are unrecoverable, the
2224        Monitor will report failure. If any of the files I check upon are
2225        unrecoverable, those problems will be reported in the
2226        IDeepCheckResults as usual, and the Monitor will not report a
2227        failure.
2228        """
2229
2230
2231class ICheckResults(Interface):
2232    """I contain the detailed results of a check/verify operation.
2233    """
2234
2235    def get_storage_index():
2236        """Return a string with the (binary) storage index."""
2237
2238    def get_storage_index_string():
2239        """Return a string with the (printable) abbreviated storage index."""
2240
2241    def get_uri():
2242        """Return the (string) URI of the object that was checked."""
2243
2244    def is_healthy():
2245        """Return a boolean, True if the file/dir is fully healthy, False if
2246        it is damaged in any way. Non-distributed LIT files always return
2247        True."""
2248
2249    def is_recoverable():
2250        """Return a boolean, True if the file/dir can be recovered, False if
2251        not. Unrecoverable files are obviously unhealthy. Non-distributed LIT
2252        files always return True."""
2253
2254    # the following methods all return None for non-distributed LIT files
2255
2256    def get_happiness():
2257        """Return the happiness count of the file."""
2258
2259    def get_encoding_needed():
2260        """Return 'k', the number of shares required for recovery."""
2261
2262    def get_encoding_expected():
2263        """Return 'N', the number of total shares generated."""
2264
2265    def get_share_counter_good():
2266        """Return the number of distinct good shares that were found. For
2267        mutable files, this counts shares for the 'best' version."""
2268
2269    def get_share_counter_wrong():
2270        """For mutable files, return the number of shares for versions other
2271        than the 'best' one (which is defined as being the recoverable
2272        version with the highest sequence number, then the highest roothash).
2273        These are either leftover shares from an older version (perhaps on a
2274        server that was offline when an update occurred), shares from an
2275        unrecoverable newer version, or shares from an alternate current
2276        version that results from an uncoordinated write collision. For a
2277        healthy file, this will equal 0. For immutable files, this will
2278        always equal 0."""
2279
2280    def get_corrupt_shares():
2281        """Return a list of 'share locators', one for each share that was
2282        found to be corrupt (integrity failure). Each share locator is a list
2283        of (IServer, storage_index, sharenum)."""
2284
2285    def get_incompatible_shares():
2286        """Return a list of 'share locators', one for each share that was
2287        found to be of an unknown format. Each share locator is a list of
2288        (IServer, storage_index, sharenum)."""
2289
2290    def get_servers_responding():
2291        """Return a list of IServer objects, one for each server that
2292        responded to the share query (even if they said they didn't have
2293        shares, and even if they said they did have shares but then didn't
2294        send them when asked, or dropped the connection, or returned a
2295        Failure, and even if they said they did have shares and sent
2296        incorrect ones when asked)"""
2297
2298    def get_host_counter_good_shares():
2299        """Return the number of distinct storage servers with good shares. If
2300        this number is less than get_share_counters()[good], then some shares
2301        are doubled up, increasing the correlation of failures. This
2302        indicates that one or more shares should be moved to an otherwise
2303        unused server, if one is available.
2304        """
2305
2306    def get_version_counter_recoverable():
2307        """Return the number of recoverable versions of the file. For a
2308        healthy file, this will equal 1."""
2309
2310    def get_version_counter_unrecoverable():
2311         """Return the number of unrecoverable versions of the file. For a
2312         healthy file, this will be 0."""
2313
2314    def get_sharemap():
2315        """Return a dict mapping share identifier to list of IServer objects.
2316        This indicates which servers are holding which shares. For immutable
2317        files, the shareid is an integer (the share number, from 0 to N-1).
2318        For mutable files, it is a string of the form 'seq%d-%s-sh%d',
2319        containing the sequence number, the roothash, and the share number."""
2320
2321    def get_summary():
2322        """Return a string with a brief (one-line) summary of the results."""
2323
2324    def get_report():
2325        """Return a list of strings with more detailed results."""
2326
2327
2328class ICheckAndRepairResults(Interface):
2329    """I contain the detailed results of a check/verify/repair operation.
2330
2331    The IFilesystemNode.check()/verify()/repair() methods all return
2332    instances that provide ICheckAndRepairResults.
2333    """
2334
2335    def get_storage_index():
2336        """Return a string with the (binary) storage index."""
2337
2338    def get_storage_index_string():
2339        """Return a string with the (printable) abbreviated storage index."""
2340
2341    def get_repair_attempted():
2342        """Return a boolean, True if a repair was attempted. We might not
2343        attempt to repair the file because it was healthy, or healthy enough
2344        (i.e. some shares were missing but not enough to exceed some
2345        threshold), or because we don't know how to repair this object."""
2346
2347    def get_repair_successful():
2348        """Return a boolean, True if repair was attempted and the file/dir
2349        was fully healthy afterwards. False if no repair was attempted or if
2350        a repair attempt failed."""
2351
2352    def get_pre_repair_results():
2353        """Return an ICheckResults instance that describes the state of the
2354        file/dir before any repair was attempted."""
2355
2356    def get_post_repair_results():
2357        """Return an ICheckResults instance that describes the state of the
2358        file/dir after any repair was attempted. If no repair was attempted,
2359        the pre-repair and post-repair results will be identical."""
2360
2361
2362class IDeepCheckResults(Interface):
2363    """I contain the results of a deep-check operation.
2364
2365    This is returned by a call to ICheckable.deep_check().
2366    """
2367
2368    def get_root_storage_index_string():
2369        """Return the storage index (abbreviated human-readable string) of
2370        the first object checked."""
2371
2372    def get_counters():
2373        """Return a dictionary with the following keys::
2374
2375             count-objects-checked: count of how many objects were checked
2376             count-objects-healthy: how many of those objects were completely
2377                                    healthy
2378             count-objects-unhealthy: how many were damaged in some way
2379             count-objects-unrecoverable: how many were unrecoverable
2380             count-corrupt-shares: how many shares were found to have
2381                                   corruption, summed over all objects
2382                                   examined
2383        """
2384
2385    def get_corrupt_shares():
2386        """Return a set of (IServer, storage_index, sharenum) for all shares
2387        that were found to be corrupt. storage_index is binary."""
2388
2389    def get_all_results():
2390        """Return a dictionary mapping pathname (a tuple of strings, ready to
2391        be slash-joined) to an ICheckResults instance, one for each object
2392        that was checked."""
2393
2394    def get_results_for_storage_index(storage_index):
2395        """Retrive the ICheckResults instance for the given (binary)
2396        storage index. Raises KeyError if there are no results for that
2397        storage index."""
2398
2399    def get_stats():
2400        """Return a dictionary with the same keys as
2401        IDirectoryNode.deep_stats()."""
2402
2403
2404class IDeepCheckAndRepairResults(Interface):
2405    """I contain the results of a deep-check-and-repair operation.
2406
2407    This is returned by a call to ICheckable.deep_check_and_repair().
2408    """
2409
2410    def get_root_storage_index_string():
2411        """Return the storage index (abbreviated human-readable string) of
2412        the first object checked."""
2413
2414    def get_counters():
2415        """Return a dictionary with the following keys::
2416
2417             count-objects-checked: count of how many objects were checked
2418             count-objects-healthy-pre-repair: how many of those objects were
2419                                               completely healthy (before any
2420                                               repair)
2421             count-objects-unhealthy-pre-repair: how many were damaged in
2422                                                 some way
2423             count-objects-unrecoverable-pre-repair: how many were unrecoverable
2424             count-objects-healthy-post-repair: how many of those objects were
2425                                                completely healthy (after any
2426                                                repair)
2427             count-objects-unhealthy-post-repair: how many were damaged in
2428                                                  some way
2429             count-objects-unrecoverable-post-repair: how many were
2430                                                      unrecoverable
2431             count-repairs-attempted: repairs were attempted on this many
2432                                      objects. The count-repairs- keys will
2433                                      always be provided, however unless
2434                                      repair=true is present, they will all
2435                                      be zero.
2436             count-repairs-successful: how many repairs resulted in healthy
2437                                       objects
2438             count-repairs-unsuccessful: how many repairs resulted did not
2439                                         results in completely healthy objects
2440             count-corrupt-shares-pre-repair: how many shares were found to
2441                                              have corruption, summed over all
2442                                              objects examined (before any
2443                                              repair)
2444             count-corrupt-shares-post-repair: how many shares were found to
2445                                               have corruption, summed over all
2446                                               objects examined (after any
2447                                               repair)
2448        """
2449
2450    def get_stats():
2451        """Return a dictionary with the same keys as
2452        IDirectoryNode.deep_stats()."""
2453
2454    def get_corrupt_shares():
2455        """Return a set of (IServer, storage_index, sharenum) for all shares
2456        that were found to be corrupt before any repair was attempted.
2457        storage_index is binary.
2458        """
2459    def get_remaining_corrupt_shares():
2460        """Return a set of (IServer, storage_index, sharenum) for all shares
2461        that were found to be corrupt after any repair was completed.
2462        storage_index is binary. These are shares that need manual inspection
2463        and probably deletion.
2464        """
2465    def get_all_results():
2466        """Return a dictionary mapping pathname (a tuple of strings, ready to
2467        be slash-joined) to an ICheckAndRepairResults instance, one for each
2468        object that was checked."""
2469
2470    def get_results_for_storage_index(storage_index):
2471        """Retrive the ICheckAndRepairResults instance for the given (binary)
2472        storage index. Raises KeyError if there are no results for that
2473        storage index."""
2474
2475
2476class IRepairable(Interface):
2477    def repair(check_results):
2478        """Attempt to repair the given object. Returns a Deferred that fires
2479        with a IRepairResults object.
2480
2481        I must be called with an object that implements ICheckResults, as
2482        proof that you have actually discovered a problem with this file. I
2483        will use the data in the checker results to guide the repair process,
2484        such as which servers provided bad data and should therefore be
2485        avoided. The ICheckResults object is inside the
2486        ICheckAndRepairResults object, which is returned by the
2487        ICheckable.check() method::
2488
2489         d = filenode.check(repair=False)
2490         def _got_results(check_and_repair_results):
2491             check_results = check_and_repair_results.get_pre_repair_results()
2492             return filenode.repair(check_results)
2493         d.addCallback(_got_results)
2494         return d
2495        """
2496
2497
2498class IRepairResults(Interface):
2499    """I contain the results of a repair operation."""
2500    def get_successful():
2501        """Returns a boolean: True if the repair made the file healthy, False
2502        if not. Repair failure generally indicates a file that has been
2503        damaged beyond repair."""
2504
2505
2506class IClient(Interface):
2507    def upload(uploadable):
2508        """Upload some data into a CHK, get back the UploadResults for it.
2509        @param uploadable: something that implements IUploadable
2510        @return: a Deferred that fires with the UploadResults instance.
2511                 To get the URI for this file, use results.uri .
2512        """
2513
2514    def create_mutable_file(contents=""):
2515        """Create a new mutable file (with initial) contents, get back the
2516        new node instance.
2517
2518        @param contents: (bytestring, callable, or None): this provides the
2519        initial contents of the mutable file. If 'contents' is a bytestring,
2520        it will be used as-is. If 'contents' is a callable, it will be
2521        invoked with the new MutableFileNode instance and is expected to
2522        return a bytestring with the initial contents of the file (the
2523        callable can use node.get_writekey() to decide how to encrypt the
2524        initial contents, e.g. for a brand new dirnode with initial
2525        children). contents=None is equivalent to an empty string. Using
2526        content_maker= is more efficient than creating a mutable file and
2527        setting its contents in two separate operations.
2528
2529        @return: a Deferred that fires with an IMutableFileNode instance.
2530        """
2531
2532    def create_dirnode(initial_children=None):
2533        """Create a new unattached dirnode, possibly with initial children.
2534
2535        @param initial_children: dict with keys that are unicode child names,
2536        and values that are (childnode, metadata) tuples.
2537
2538        @return: a Deferred that fires with the new IDirectoryNode instance.
2539        """
2540
2541    def create_node_from_uri(uri, rouri):
2542        """Create a new IFilesystemNode instance from the uri, synchronously.
2543        @param uri: a string or IURI-providing instance, or None. This could
2544                    be for a LiteralFileNode, a CHK file node, a mutable file
2545                    node, or a directory node
2546        @param rouri: a string or IURI-providing instance, or None. If the
2547                      main uri is None, I will use the rouri instead. If I
2548                      recognize the format of the main uri, I will ignore the
2549                      rouri (because it can be derived from the writecap).
2550
2551        @return: an instance that provides IFilesystemNode (or more usefully
2552                 one of its subclasses). File-specifying URIs will result in
2553                 IFileNode-providing instances, like ImmutableFileNode,
2554                 LiteralFileNode, or MutableFileNode. Directory-specifying
2555                 URIs will result in IDirectoryNode-providing instances, like
2556                 DirectoryNode.
2557        """
2558
2559
2560class INodeMaker(Interface):
2561    """The NodeMaker is used to create IFilesystemNode instances. It can
2562    accept a filecap/dircap string and return the node right away. It can
2563    also create new nodes (i.e. upload a file, or create a mutable file)
2564    asynchronously. Once you have one of these nodes, you can use other
2565    methods to determine whether it is a file or directory, and to download
2566    or modify its contents.
2567
2568    The NodeMaker encapsulates all the authorities that these
2569    IFilesystemNodes require (like references to the StorageFarmBroker). Each
2570    Tahoe process will typically have a single NodeMaker, but unit tests may
2571    create simplified/mocked forms for testing purposes.
2572    """
2573
2574    def create_from_cap(writecap, readcap=None, deep_immutable=False, name=u"<unknown name>"):
2575        """I create an IFilesystemNode from the given writecap/readcap. I can
2576        only provide nodes for existing file/directory objects: use my other
2577        methods to create new objects. I return synchronously."""
2578
2579    def create_mutable_file(contents=None, keysize=None):
2580        """I create a new mutable file, and return a Deferred that will fire
2581        with the IMutableFileNode instance when it is ready. If contents= is
2582        provided (a bytestring), it will be used as the initial contents of
2583        the new file, otherwise the file will contain zero bytes. keysize= is
2584        for use by unit tests, to create mutable files that are smaller than
2585        usual."""
2586
2587    def create_new_mutable_directory(initial_children=None):
2588        """I create a new mutable directory, and return a Deferred that will
2589        fire with the IDirectoryNode instance when it is ready. If
2590        initial_children= is provided (a dict mapping unicode child name to
2591        (childnode, metadata_dict) tuples), the directory will be populated
2592        with those children, otherwise it will be empty."""
2593
2594
2595class IClientStatus(Interface):
2596    def list_all_uploads():
2597        """Return a list of uploader objects, one for each upload that
2598        currently has an object available (tracked with weakrefs). This is
2599        intended for debugging purposes."""
2600
2601    def list_active_uploads():
2602        """Return a list of active IUploadStatus objects."""
2603
2604    def list_recent_uploads():
2605        """Return a list of IUploadStatus objects for the most recently
2606        started uploads."""
2607
2608    def list_all_downloads():
2609        """Return a list of downloader objects, one for each download that
2610        currently has an object available (tracked with weakrefs). This is
2611        intended for debugging purposes."""
2612
2613    def list_active_downloads():
2614        """Return a list of active IDownloadStatus objects."""
2615
2616    def list_recent_downloads():
2617        """Return a list of IDownloadStatus objects for the most recently
2618        started downloads."""
2619
2620
2621class IUploadStatus(Interface):
2622    def get_started():
2623        """Return a timestamp (float with seconds since epoch) indicating
2624        when the operation was started."""
2625
2626    def get_storage_index():
2627        """Return a string with the (binary) storage index in use on this
2628        upload. Returns None if the storage index has not yet been
2629        calculated."""
2630
2631    def get_size():
2632        """Return an integer with the number of bytes that will eventually
2633        be uploaded for this file. Returns None if the size is not yet known.
2634        """
2635    def using_helper():
2636        """Return True if this upload is using a Helper, False if not."""
2637
2638    def get_status():
2639        """Return a string describing the current state of the upload
2640        process."""
2641
2642    def get_progress():
2643        """Returns a tuple of floats, (chk, ciphertext, encode_and_push),
2644        each from 0.0 to 1.0 . 'chk' describes how much progress has been
2645        made towards hashing the file to determine a CHK encryption key: if
2646        non-convergent encryption is in use, this will be trivial, otherwise
2647        the whole file must be hashed. 'ciphertext' describes how much of the
2648        ciphertext has been pushed to the helper, and is '1.0' for non-helper
2649        uploads. 'encode_and_push' describes how much of the encode-and-push
2650        process has finished: for helper uploads this is dependent upon the
2651        helper providing progress reports. It might be reasonable to add all
2652        three numbers and report the sum to the user."""
2653
2654    def get_active():
2655        """Return True if the upload is currently active, False if not."""
2656
2657    def get_results():
2658        """Return an instance of UploadResults (which contains timing and
2659        sharemap information). Might return None if the upload is not yet
2660        finished."""
2661
2662    def get_counter():
2663        """Each upload status gets a unique number: this method returns that
2664        number. This provides a handle to this particular upload, so a web
2665        page can generate a suitable hyperlink."""
2666
2667
2668class IDownloadStatus(Interface):
2669    def get_started():
2670        """Return a timestamp (float with seconds since epoch) indicating
2671        when the operation was started."""
2672
2673    def get_storage_index():
2674        """Return a string with the (binary) storage index in use on this
2675        download. This may be None if there is no storage index (i.e. LIT
2676        files)."""
2677
2678    def get_size():
2679        """Return an integer with the number of bytes that will eventually be
2680        retrieved for this file. Returns None if the size is not yet known.
2681        """
2682
2683    def using_helper():
2684        """Return True if this download is using a Helper, False if not."""
2685
2686    def get_status():
2687        """Return a string describing the current state of the download
2688        process."""
2689
2690    def get_progress():
2691        """Returns a float (from 0.0 to 1.0) describing the amount of the
2692        download that has completed. This value will remain at 0.0 until the
2693        first byte of plaintext is pushed to the download target."""
2694
2695    def get_active():
2696        """Return True if the download is currently active, False if not."""
2697
2698    def get_counter():
2699        """Each download status gets a unique number: this method returns
2700        that number. This provides a handle to this particular download, so a
2701        web page can generate a suitable hyperlink."""
2702
2703
2704class IServermapUpdaterStatus(Interface):
2705    pass
2706
2707class IPublishStatus(Interface):
2708    pass
2709
2710class IRetrieveStatus(Interface):
2711    pass
2712
2713
2714class NotCapableError(Exception):
2715    """You have tried to write to a read-only node."""
2716
2717class BadWriteEnablerError(Exception):
2718    pass
2719
2720
2721class RIControlClient(RemoteInterface):
2722    def wait_for_client_connections(num_clients=int):
2723        """Do not return until we have connections to at least NUM_CLIENTS
2724        storage servers.
2725        """
2726
2727    # debug stuff
2728
2729    def upload_random_data_from_file(size=int, convergence=bytes):
2730        return str
2731
2732    def download_to_tempfile_and_delete(uri=bytes):
2733        return None
2734
2735    def get_memory_usage():
2736        """Return a dict describes the amount of memory currently in use. The
2737        keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
2738        measuring memory consupmtion in bytes."""
2739        return DictOf(bytes, int)
2740
2741    def speed_test(count=int, size=int, mutable=Any()):
2742        """Write 'count' tempfiles to disk, all of the given size. Measure
2743        how long (in seconds) it takes to upload them all to the servers.
2744        Then measure how long it takes to download all of them. If 'mutable'
2745        is 'create', time creation of mutable files. If 'mutable' is
2746        'upload', then time access to the same mutable file instead of
2747        creating one.
2748
2749        Returns a tuple of (upload_time, download_time).
2750        """
2751        return (float, float)
2752
2753    def measure_peer_response_time():
2754        """Send a short message to each connected peer, and measure the time
2755        it takes for them to respond to it. This is a rough measure of the
2756        application-level round trip time.
2757
2758        @return: a dictionary mapping peerid to a float (RTT time in seconds)
2759        """
2760
2761        return DictOf(bytes, float)
2762
2763
2764UploadResults = Any() #DictOf(bytes, bytes)
2765
2766
2767class RIEncryptedUploadable(RemoteInterface):
2768    __remote_name__ = "RIEncryptedUploadable.tahoe.allmydata.com"
2769
2770    def get_size():
2771        return Offset
2772
2773    def get_all_encoding_parameters():
2774        return (int, int, int, int)
2775
2776    def read_encrypted(offset=Offset, length=ReadSize):
2777        return ListOf(bytes)
2778
2779    def close():
2780        return None
2781
2782
2783class RICHKUploadHelper(RemoteInterface):
2784    __remote_name__ = "RIUploadHelper.tahoe.allmydata.com"
2785
2786    def get_version():
2787        """
2788        Return a dictionary of version information.
2789        """
2790        return DictOf(bytes, Any())
2791
2792    def upload(reader=RIEncryptedUploadable):
2793        return UploadResults
2794
2795
2796class RIHelper(RemoteInterface):
2797    __remote_name__ = "RIHelper.tahoe.allmydata.com"
2798
2799    def get_version():
2800        """
2801        Return a dictionary of version information.
2802        """
2803        return DictOf(bytes, Any())
2804
2805    def upload_chk(si=StorageIndex):
2806        """See if a file with a given storage index needs uploading. The
2807        helper will ask the appropriate storage servers to see if the file
2808        has already been uploaded. If so, the helper will return a set of
2809        'upload results' that includes whatever hashes are needed to build
2810        the read-cap, and perhaps a truncated sharemap.
2811
2812        If the file has not yet been uploaded (or if it was only partially
2813        uploaded), the helper will return an empty upload-results dictionary
2814        and also an RICHKUploadHelper object that will take care of the
2815        upload process. The client should call upload() on this object and
2816        pass it a reference to an RIEncryptedUploadable object that will
2817        provide ciphertext. When the upload is finished, the upload() method
2818        will finish and return the upload results.
2819        """
2820        return (UploadResults, ChoiceOf(RICHKUploadHelper, None))
2821
2822
2823class IStatsProducer(Interface):
2824    def get_stats():
2825        """
2826        returns a dictionary, with bytes keys representing the names of stats
2827        to be monitored, and numeric values.
2828        """
2829
2830class FileTooLargeError(Exception):
2831    pass
2832
2833
2834class IValidatedThingProxy(Interface):
2835    def start():
2836        """ Acquire a thing and validate it. Return a deferred that is
2837        eventually fired with self if the thing is valid or errbacked if it
2838        can't be acquired or validated."""
2839
2840
2841class InsufficientVersionError(Exception):
2842    def __init__(self, needed, got):
2843        self.needed = needed
2844        self.got = got
2845
2846    def __repr__(self):
2847        return "InsufficientVersionError(need '%s', got %s)" % (self.needed,
2848                                                                self.got)
2849
2850class EmptyPathnameComponentError(Exception):
2851    """The webapi disallows empty pathname components."""
2852
2853class IConnectionStatus(Interface):
2854    """
2855    I hold information about the 'connectedness' for some reference.
2856    Connections are an illusion, of course: only messages hold any meaning,
2857    and they are fleeting. But for status displays, it is useful to pretend
2858    that 'recently contacted' means a connection is established, and
2859    'recently failed' means it is not.
2860
2861    This object is not 'live': it is created and populated when requested
2862    from the connection manager, and it does not change after that point.
2863    """
2864
2865    connected = Attribute(
2866        """
2867        True if we appear to be connected: we've been successful in
2868        communicating with our target at some point in the past, and we
2869        haven't experienced any errors since then.""")
2870
2871    last_connection_time = Attribute(
2872        """
2873        If is_connected() is True, this is a timestamp (seconds-since-epoch)
2874        when we last transitioned from 'not connected' to 'connected', such
2875        as when a TCP connect() operation completed and subsequent
2876        negotiation was successful. Otherwise it is None.
2877        """)
2878
2879    summary = Attribute(
2880        """
2881        A string with a brief summary of the current status, suitable for
2882        display on an informational page. The more complete text from
2883        last_connection_description would be appropriate for a tool-tip
2884        popup.
2885        """)
2886
2887    last_received_time = Attribute(
2888        """
2889        A timestamp (seconds-since-epoch) describing the last time we heard
2890        anything (including low-level keep-alives or inbound requests) from
2891        the other side.
2892        """)
2893
2894    non_connected_statuses = Attribute(
2895        """
2896        A dictionary, describing all connections that are not (yet)
2897        successful. When connected is True, this will only be the losing
2898        attempts. When connected is False, this will include all attempts.
2899
2900        This maps a connection description string (for foolscap this is a
2901        connection hint and the handler it is using) to the status string
2902        (pending, connected, refused, or other errors).
2903        """)
2904
2905
2906
2907class IFoolscapStoragePlugin(IPlugin):
2908    """
2909    An ``IStoragePlugin`` provides client- and server-side implementations of
2910    a Foolscap-based protocol which can be used to store and retrieve data.
2911
2912    Implementations are free to apply access control or authorization policies
2913    to this storage service and doing so is a large part of the motivation for
2914    providing this point of pluggability.
2915
2916    There should be enough information and hook points to support at
2917    least these use-cases:
2918
2919      - anonymous, everything allowed (current default)
2920      - "storage club" / "friend-net" (possibly identity based)
2921      - cryptocurrencies (ideally, paying for each API call)
2922      - anonymous tokens (payment for service, but without identities)
2923    """
2924    name = Attribute(
2925        """
2926        A name for referring to this plugin.  This name is both user-facing
2927        (for example, it is written in configuration files) and machine-facing
2928        (for example, it may be used to construct URLs).  It should be unique
2929        across all plugins for this interface.  Two plugins with the same name
2930        cannot be used in one client.
2931
2932        Because it is used to construct URLs, it is constrained to URL safe
2933        characters (it must be a *segment* as defined by RFC 3986, section
2934        3.3).
2935
2936        :type: ``unicode``
2937        """
2938    )
2939
2940    def get_storage_server(configuration, get_anonymous_storage_server):
2941        """
2942        Get an ``IAnnounceableStorageServer`` provider that gives an announcement
2943        for and an implementation of the server side of the storage protocol.
2944        This will be exposed and offered to clients in the storage server's
2945        announcement.
2946
2947        :param dict configuration: Any configuration given in the section for
2948            this plugin in the node's configuration file.  As an example, the
2949            configuration for the original anonymous-access filesystem-based
2950            storage server might look like::
2951
2952                {u"storedir": u"/foo/bar/storage",
2953                 u"nodeid": u"abcdefg...",
2954                 u"reserved_space": 0,
2955                 u"discard_storage": False,
2956                 u"readonly_storage": False,
2957                 u"expiration_enabled": False,
2958                 u"expiration_mode": u"age",
2959                 u"expiration_override_lease_duration": None,
2960                 u"expiration_cutoff_date": None,
2961                 u"expiration_sharetypes": (u"mutable, u"immutable"),
2962                }
2963
2964        :param get_anonymous_storage_server: A no-argument callable which
2965            returns a single instance of the original, anonymous-access
2966            storage server.  This may be helpful in providing actual storage
2967            implementation behavior for a wrapper-style plugin.  This is also
2968            provided to keep the Python API offered by Tahoe-LAFS to plugin
2969            developers narrow (do not try to find and instantiate the original
2970            storage server yourself; if you want it, call this).
2971
2972        :rtype: ``Deferred`` firing with ``IAnnounceableStorageServer``
2973        """
2974
2975    def get_storage_client(configuration, announcement, get_rref):
2976        """
2977        Get an ``IStorageServer`` provider that implements the client side of the
2978        storage protocol.
2979
2980        :param allmydata.node._Config configuration: A representation of the
2981            configuration for the node into which this plugin has been loaded.
2982
2983        :param dict announcement: The announcement for the corresponding
2984            server portion of this plugin received from a storage server which
2985            is offering it.
2986
2987        :param get_rref: A no-argument callable which returns a
2988            ``foolscap.referenceable.RemoteReference`` which refers to the
2989            server portion of this plugin on the currently active connection,
2990            or ``None`` if no connection has been established yet.
2991
2992        :rtype: ``IStorageServer``
2993        """
2994
2995    def get_client_resource(configuration):
2996        """
2997        Get an ``IResource`` that can be published in the Tahoe-LAFS web interface
2998        to expose information related to this plugin.
2999
3000        :param allmydata.node._Config configuration: A representation of the
3001            configuration for the node into which this plugin has been loaded.
3002
3003        :rtype: ``IResource``
3004        """
3005
3006
3007class IAnnounceableStorageServer(Interface):
3008    announcement = Attribute(
3009        """
3010        Data for an announcement for the associated storage server.
3011
3012        :note: This does not include the storage server nickname nor Foolscap
3013            fURL.  These will be added to the announcement automatically.  It
3014            may be usual for this announcement to contain no information.
3015            Once the client connects to this server it can use other methods
3016            to query for additional information (eg, in the manner of
3017            ``RIStorageServer.remote_get_version``).  The announcement only
3018            needs to contain information to help the client determine how to
3019            connect.
3020
3021        :type: ``dict`` of JSON-serializable types
3022        """
3023    )
3024
3025    storage_server = Attribute(
3026        """
3027        A Foolscap referenceable object implementing the server side of the
3028        storage protocol.
3029
3030        :type: ``IReferenceable`` provider
3031        """
3032    )
3033
3034
3035class IAddressFamily(Interface):
3036    """
3037    Support for one specific address family.
3038
3039    This stretches the definition of address family to include things like Tor
3040    and I2P.
3041    """
3042    def get_listener():
3043        """
3044        Return a string endpoint description or an ``IStreamServerEndpoint``.
3045
3046        This would be named ``get_server_endpoint`` if not for historical
3047        reasons.
3048        """
3049
3050    def get_client_endpoint():
3051        """
3052        Return an ``IStreamClientEndpoint``.
3053        """
Note: See TracBrowser for help on using the repository browser.