-
Notifications
You must be signed in to change notification settings - Fork 104
/
Copy pathservice.go
886 lines (802 loc) · 27.9 KB
/
service.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
// Package calypso implements the LTS functionality of the Calypso paper. It
// implements both the access-control cothority and the secret management
// cothority. (1) The access-control cothority is implemented using ByzCoin
// with two contracts, `Write` and `Read` (2) The secret-management cothority
// uses an onet service with methods to set up a Long Term Secret (LTS)
// distributed key and to request a re-encryption
//
// For more details, see
// https://github.com/dedis/cothority/tree/main/calypso/README.md
//
// There are two contracts implemented by this package:
//
// Contract "calypsoWrite" is used to store a secret in the ledger, so that an
// authorized reader can retrieve it by creating a Read-instance.
//
// Accepted Instructions: - spawn:calypsoWrite creates a new write-request from
// the argument "write" - spawn:calypsoRead creates a new read-request for this
// write-request.
//
// Contract "calypsoRead" is used to create read instances that prove a reader
// has access to a given write instance. They are only spawned by calling Spawn
// on an existing Write instance, with the proposed Read request in the "read"
// argument.
//
// TODO: correctly handle multi signatures for read requests: to whom should
// the secret be re-encrypted to? Perhaps for multi signatures we only want to
// have ephemeral keys.
package calypso
import (
"encoding/binary"
"fmt"
"math"
"net"
"net/http"
"os"
"sync"
"time"
"go.dedis.ch/kyber/v3/sign/schnorr"
"golang.org/x/xerrors"
"go.dedis.ch/cothority/v3"
"go.dedis.ch/cothority/v3/byzcoin"
"go.dedis.ch/cothority/v3/calypso/protocol"
"go.dedis.ch/cothority/v3/darc"
dkgprotocol "go.dedis.ch/cothority/v3/dkg/pedersen"
"go.dedis.ch/cothority/v3/skipchain"
"go.dedis.ch/kyber/v3"
"go.dedis.ch/kyber/v3/share"
dkg "go.dedis.ch/kyber/v3/share/dkg/pedersen"
"go.dedis.ch/kyber/v3/util/key"
"go.dedis.ch/onet/v3"
"go.dedis.ch/onet/v3/log"
"go.dedis.ch/onet/v3/network"
"go.dedis.ch/protobuf"
)
// Used for tests
var calypsoID onet.ServiceID
// ServiceName of the secret-management part of Calypso.
const ServiceName = "Calypso"
// dkgTimeout is how long the system waits for the DKG to finish
const propagationTimeout = 20 * time.Second
const calypsoReshareProto = "calypso_reshare_proto"
var allowInsecureAdmin = false
// Allows one to register custom MakeAttrInterpreters for the read request
// verify.
var readMakeAttrInterpreter = make([]makeAttrInterpreterWrapper, 0)
// makeAttrInterpreterWrapper holds the data needed to register a
// MakeAttrInterpreter.
type makeAttrInterpreterWrapper struct {
// name is the corresponding name of the custom attribute.
name string
// interpreter is the function producing the interepreter for the given name.
// We are using a callback to have access to the instance's context.
interpreter func(c ContractWrite, rst byzcoin.ReadOnlyStateTrie,
inst byzcoin.Instruction) func(string) error
}
func init() {
var err error
_, err = onet.GlobalProtocolRegister(calypsoReshareProto, dkgprotocol.NewSetup)
log.ErrFatal(err)
calypsoID, err = onet.RegisterNewService(ServiceName, newService)
log.ErrFatal(err)
network.RegisterMessages(&storage{}, &vData{})
// The loopback check makes Java testing not work, because Java client
// commands come from outside of the docker container. The Java testing
// Docker container runs with this variable set.
if os.Getenv("COTHORITY_ALLOW_INSECURE_ADMIN") != "" {
log.Warn("COTHORITY_ALLOW_INSECURE_ADMIN is set; Calypso admin actions allowed from the public network.")
allowInsecureAdmin = true
}
err = byzcoin.RegisterGlobalContract(ContractWriteID, contractWriteFromBytes)
if err != nil {
log.ErrFatal(err)
}
err = byzcoin.RegisterGlobalContract(ContractReadID, contractReadFromBytes)
if err != nil {
log.ErrFatal(err)
}
err = byzcoin.RegisterGlobalContract(ContractLongTermSecretID, contractLTSFromBytes)
if err != nil {
log.ErrFatal(err)
}
}
// Service is our calypso-service. It stores all created LTSs.
type Service struct {
*onet.ServiceProcessor
storage *storage
// Genesis blocks are stored here instead of the usual skipchain DB as we
// don't want to override authorized skipchains or related security. The
// blocks are only used to insure that proofs start with the expected roster.
genesisBlocks map[string]*skipchain.SkipBlock
genesisBlocksLock sync.Mutex
// for use by testing only
afterReshare func()
}
// pubPoly is a serializable version of share.PubPoly
type pubPoly struct {
B kyber.Point
Commits []kyber.Point
}
// vData is sent to all nodes when re-encryption takes place. If Ephemeral
// is non-nil, Signature needs to hold a valid signature from the reader
// in the Proof.
type vData struct {
Proof byzcoin.Proof
Ephemeral kyber.Point
Signature *darc.Signature
}
// AddReadAttrInterpreter adds a new AttrInterpreters that will be evaluated
// during a read request. This function is not thread safe and should only be
// called in an init().
func AddReadAttrInterpreter(name string, interpreter func(c ContractWrite,
rst byzcoin.ReadOnlyStateTrie, inst byzcoin.Instruction) func(string) error) {
readMakeAttrInterpreter = append(readMakeAttrInterpreter, makeAttrInterpreterWrapper{name, interpreter})
}
// ProcessClientRequest implements onet.Service. We override the version
// we normally get from embeddeding onet.ServiceProcessor in order to
// hook it and get a look at the http.Request.
func (s *Service) ProcessClientRequest(req *http.Request, path string, buf []byte) ([]byte, *onet.StreamingTunnel, error) {
if !allowInsecureAdmin && path == "Authorise" {
h, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
return nil, nil, xerrors.Errorf("splitting host port: %v", err)
}
ip := net.ParseIP(h)
if !ip.IsLoopback() {
return nil, nil, xerrors.New("authorise is only allowed on loopback")
}
}
return s.ServiceProcessor.ProcessClientRequest(req, path, buf)
}
// Authorise adds a ByzCoinID to the list of authorized IDs. It can only be
// called from localhost, except if the COTHORITY_ALLOW_INSECURE_ADMIN is set
// to 'true'.
// Deprecated: please use Authorize.
func (s *Service) Authorise(req *Authorise) (*AuthoriseReply, error) {
if len(req.ByzCoinID) == 0 {
return nil, xerrors.New("empty ByzCoin ID")
}
s.storage.Lock()
bcID := string(req.ByzCoinID)
if _, ok := s.storage.AuthorisedByzCoinIDs[bcID]; ok {
s.storage.Unlock()
return nil, xerrors.New("ByzCoinID already authorised")
}
s.storage.AuthorisedByzCoinIDs[bcID] = true
s.storage.Unlock()
err := s.save()
if err != nil {
return nil, xerrors.Errorf("saving data: %v", err)
}
log.Lvl1("Stored ByzCoinID")
return &AuthoriseReply{}, err
}
// Authorize adds a ByzCoinID to the list of authorized IDs. It should
// be called by the administrator at the beginning, before any other API calls
// are made. A ByzCoinID that is not authorised will not be allowed to call the
// other APIs.
//
// If COTHORITY_ALLOW_INSECURE_ADMIN='true', the signature verification is
// skipped.
func (s *Service) Authorize(req *Authorize) (*AuthorizeReply, error) {
if len(req.ByzCoinID) == 0 {
return nil, xerrors.New("empty ByzCoin ID")
}
if !allowInsecureAdmin {
if len(req.Signature) == 0 {
return nil, xerrors.New("no signature provided")
}
if math.Abs(time.Now().Sub(time.Unix(req.Timestamp, 0)).Seconds()) > 60 {
return nil, xerrors.New("signature is too old")
}
msg := append(req.ByzCoinID, make([]byte, 8)...)
binary.LittleEndian.PutUint64(msg[32:], uint64(req.Timestamp))
err := schnorr.Verify(cothority.Suite, s.ServerIdentity().Public, msg, req.Signature)
if err != nil {
return nil, xerrors.Errorf("signature verification failed: %v", err)
}
}
s.storage.Lock()
bcID := string(req.ByzCoinID)
if _, ok := s.storage.AuthorisedByzCoinIDs[bcID]; ok {
s.storage.Unlock()
// This error string is tested against in
// `external/js/cothority/src/calypso/calypso-rpc.ts, so if you change the
// error-message here, all apps depending on the @dedis/cothority
// npm-package will fail.
return nil, xerrors.New("ByzCoinID already authorised")
}
s.storage.AuthorisedByzCoinIDs[bcID] = true
s.storage.Unlock()
err := s.save()
if err != nil {
return nil, xerrors.Errorf("saving data: %v", err)
}
log.Lvl1("Stored ByzCoinID")
return &AuthorizeReply{}, nil
}
// CreateLTS takes as input a roster with a list of all nodes that should
// participate in the DKG. Every node will store its private key and wait for
// decryption requests. The LTSID should be the InstanceID.
func (s *Service) CreateLTS(req *CreateLTS) (reply *CreateLTSReply, err error) {
if err := s.verifyProof(&req.Proof); err != nil {
return nil, xerrors.Errorf("verifying proof: %v", err)
}
roster, instID, err := s.getLtsRoster(&req.Proof)
if err != nil {
return nil, xerrors.Errorf("get roster: %v", err)
}
// NOTE: the roster stored in ByzCoin must have myself.
tree := roster.GenerateNaryTreeWithRoot(len(roster.List), s.ServerIdentity())
if tree == nil {
log.Error("cannot create tree with roster", roster.List)
return nil, xerrors.New("error while generating tree")
}
cfg := newLtsConfig{
req.Proof,
}
cfgBuf, err := protobuf.Encode(&cfg)
if err != nil {
return nil, xerrors.Errorf("serializing configuration, %v", err)
}
pi, err := s.CreateProtocol(dkgprotocol.Name, tree)
if err != nil {
return nil, xerrors.Errorf("creating dkg protocol: %v", err)
}
setupDKG := pi.(*dkgprotocol.Setup)
setupDKG.Wait = true
err = setupDKG.SetConfig(&onet.GenericConfig{Data: cfgBuf})
if err != nil {
return nil, xerrors.Errorf("set dkg config: %v", err)
}
setupDKG.KeyPair = s.getKeyPair()
if err := pi.Start(); err != nil {
return nil, xerrors.Errorf("starting dkg protocol: %v", err)
}
log.Lvl3("Started DKG-protocol - waiting for done", len(roster.List))
select {
case <-setupDKG.Finished:
shared, dks, err := setupDKG.SharedSecret()
if err != nil {
return nil, xerrors.Errorf("get aggregate public key: %v", err)
}
reply = &CreateLTSReply{
ByzCoinID: req.Proof.Latest.SkipChainID(),
InstanceID: instID,
X: shared.X,
}
s.storage.Lock()
s.storage.Shared[instID] = shared
s.storage.Polys[instID] = &pubPoly{s.Suite().Point().Base(), dks.Commits}
s.storage.Rosters[instID] = roster
s.storage.Replies[instID] = reply
s.storage.DKS[instID] = dks
s.storage.Unlock()
err = s.save()
if err != nil {
return nil, xerrors.Errorf("save dkg state: %v", err)
}
log.Lvlf2("%v Created LTS with ID: %v, pk %v", s.ServerIdentity(), instID, reply.X)
case <-time.After(propagationTimeout):
return nil, xerrors.New("new-dkg didn't finish in time")
}
return
}
// ReshareLTS starts a request to reshare the LTS. The new roster which holds
// the new secret shares must exist in the proof specified by the request.
// All hosts must be online in this step.
func (s *Service) ReshareLTS(req *ReshareLTS) (*ReshareLTSReply, error) {
// Verify the request
roster, id, err := s.getLtsRoster(&req.Proof)
if err != nil {
return nil, xerrors.Errorf("get roster: %v", err)
}
if err := s.verifyProof(&req.Proof); err != nil {
return nil, xerrors.Errorf("verifying proof: %v", err)
}
// Iterate through the new roster and update the set of valid peers
cl := onet.NewClient(cothority.Suite, ServiceName)
var reply updateValidPeersReply
for _, srv := range roster.List {
err := cl.SendProtobuf(srv, &updateValidPeers{Proof: req.Proof}, &reply)
if err != nil {
return nil, xerrors.Errorf("updating valid peers on %v: %v",
srv, err)
}
}
// Initialise the protocol
setupDKG, err := func() (*dkgprotocol.Setup, error) {
s.storage.Lock()
defer s.storage.Unlock()
// Check that we know the shared secret, otherwise don't do re-sharing
if s.storage.Shared[id] == nil || s.storage.DKS[id] == nil {
return nil, xerrors.New("cannot start resharing without an LTS")
}
// NOTE: the roster stored in ByzCoin must have myself.
tree := roster.GenerateNaryTreeWithRoot(len(roster.List), s.ServerIdentity())
if tree == nil {
return nil, xerrors.New("failed to generate tree " +
"-- root not in roster")
}
cfg := reshareLtsConfig{
Proof: req.Proof,
// We pass the public coefficients out with the protocol,
// because new nodes will need it for their dkg.Config.PublicCoeffs.
Commits: s.storage.DKS[id].Commits,
OldNodes: s.storage.Rosters[id].Publics(),
}
cfgBuf, err := protobuf.Encode(&cfg)
if err != nil {
return nil, xerrors.Errorf("serializing configuration: %v", err)
}
pi, err := s.CreateProtocol(calypsoReshareProto, tree)
if err != nil {
return nil, xerrors.Errorf("creating reshare protocol: %v", err)
}
setupDKG := pi.(*dkgprotocol.Setup)
setupDKG.Wait = true
setupDKG.KeyPair = s.getKeyPair()
err = setupDKG.SetConfig(&onet.GenericConfig{Data: cfgBuf})
if err != nil {
return nil, xerrors.Errorf("setting dkg configuration: %v", err)
}
// Because we are the node starting the resharing protocol, by
// definition, we are inside the old group. (Checked first thing
// in this function.) So we have only Share, not PublicCoeffs.
oldn := len(s.storage.Rosters[id].List)
n := len(roster.List)
c := &dkg.Config{
Suite: cothority.Suite,
Longterm: setupDKG.KeyPair.Private,
OldNodes: s.storage.Rosters[id].Publics(),
NewNodes: roster.Publics(),
Share: s.storage.DKS[id],
Threshold: n - (n-1)/3,
OldThreshold: oldn - (oldn-1)/3,
}
setupDKG.NewDKG = func() (*dkg.DistKeyGenerator, error) {
d, err := dkg.NewDistKeyHandler(c)
return d, cothority.ErrorOrNil(err,
"creating new distributed key generator")
}
return setupDKG, nil
}()
if err != nil {
return nil, xerrors.Errorf("initializing dkg: %v", err)
}
if err := setupDKG.Start(); err != nil {
return nil, xerrors.Errorf("starting dkg: %v", err)
}
log.Lvl3(s.ServerIdentity(), "Started resharing DKG-protocol - waiting for done")
var pk kyber.Point
select {
case <-setupDKG.Finished:
shared, dks, err := setupDKG.SharedSecret()
if err != nil {
return nil, xerrors.Errorf("getting shared secret: %v", err)
}
pk = shared.X
s.storage.Lock()
// Check the secret shares are different
if shared.V.Equal(s.storage.Shared[id].V) {
s.storage.Unlock()
return nil, xerrors.New("the reshared secret is the same")
}
// Check the public key remains the same
if !shared.X.Equal(s.storage.Shared[id].X) {
s.storage.Unlock()
return nil, xerrors.New("the reshared public point is different")
}
s.storage.Shared[id] = shared
s.storage.Polys[id] = &pubPoly{s.Suite().Point().Base(), dks.Commits}
s.storage.Rosters[id] = roster
s.storage.DKS[id] = dks
s.storage.Unlock()
err = s.save()
if err != nil {
return nil, xerrors.Errorf("saving dkg state: %v", err)
}
if s.afterReshare != nil {
s.afterReshare()
}
case <-time.After(propagationTimeout):
return nil, xerrors.New("resharing-dkg didn't finish in time")
}
log.Lvl2(s.ServerIdentity(), "resharing protocol finished")
log.Lvlf2("%v Reshared LTS with ID: %v, pk %v", s.ServerIdentity(), id, pk)
return &ReshareLTSReply{}, nil
}
// Private service endpoint that sets the valid peers according to the roster
// in the provided proof.
func (s *Service) updateValidPeers(req *updateValidPeers) (
*updateValidPeersReply, error) {
err := s.verifyProof(&req.Proof)
if err != nil {
return nil, xerrors.Errorf("verifying proof: %v", err)
}
newRoster, ltsID, err := s.getLtsRoster(&req.Proof)
if err != nil {
return nil, xerrors.Errorf("retrieving roster: %v", err)
}
s.SetValidPeers(s.NewPeerSetID(ltsID[:]), newRoster.List)
return &updateValidPeersReply{}, nil
}
func (s *Service) verifyProof(proof *byzcoin.Proof) error {
scID := proof.Latest.SkipChainID()
s.storage.Lock()
defer s.storage.Unlock()
if _, ok := s.storage.AuthorisedByzCoinIDs[string(scID)]; !ok {
return xerrors.New("this ByzCoin ID is not authorised")
}
sb, err := s.fetchGenesisBlock(scID, proof.Latest.Roster)
if err != nil {
return xerrors.Errorf("fetching genesis block: %v", err)
}
return cothority.ErrorOrNil(proof.VerifyFromBlock(sb),
"verifying proof from block")
}
func (s *Service) fetchGenesisBlock(scID skipchain.SkipBlockID, roster *onet.Roster) (*skipchain.SkipBlock, error) {
s.genesisBlocksLock.Lock()
defer s.genesisBlocksLock.Unlock()
sb := s.genesisBlocks[string(scID)]
if sb != nil {
return sb, nil
}
cl := skipchain.NewClient()
sb, err := cl.GetSingleBlock(roster, scID)
if err != nil {
return nil, xerrors.Errorf("getting single block: %v", err)
}
// Genesis block can be reused later on.
s.genesisBlocks[string(scID)] = sb
return sb, nil
}
func (s *Service) getLtsRoster(proof *byzcoin.Proof) (*onet.Roster, byzcoin.InstanceID, error) {
instanceID, buf, _, _, err := proof.KeyValue()
if err != nil {
return nil, byzcoin.InstanceID{},
xerrors.Errorf("getting keys and values in the proof: %v", err)
}
var info LtsInstanceInfo
err = protobuf.DecodeWithConstructors(buf, &info, network.DefaultConstructors(cothority.Suite))
if err != nil {
return nil, byzcoin.InstanceID{},
xerrors.Errorf("decoding roster: %v", err)
}
return &info.Roster, byzcoin.NewInstanceID(instanceID), nil
}
// DecryptKey takes as an input a Read- and a Write-proof. Proofs contain
// everything necessary to verify that a given instance is correct and
// stored in ByzCoin.
// Using the Read and the Write-instance, this method verifies that the
// requests match and then re-encrypts the secret to the public key given
// in the Read-instance.
func (s *Service) DecryptKey(dkr *DecryptKey) (reply *DecryptKeyReply, err error) {
reply = &DecryptKeyReply{}
log.Lvl2(s.ServerIdentity(), "Re-encrypt the key to the public key of the reader")
var read Read
if err := dkr.Read.VerifyAndDecode(cothority.Suite, ContractReadID, &read); err != nil {
return nil, xerrors.New("didn't get a read instance: " + err.Error())
}
var write Write
if err := dkr.Write.VerifyAndDecode(cothority.Suite, ContractWriteID, &write); err != nil {
return nil, xerrors.New("didn't get a write instance: " + err.Error())
}
if !read.Write.Equal(byzcoin.NewInstanceID(dkr.Write.InclusionProof.Key())) {
return nil, xerrors.New("read doesn't point to passed write")
}
s.storage.Lock()
id := write.LTSID
roster := s.storage.Rosters[id]
if roster == nil {
s.storage.Unlock()
return nil,
xerrors.Errorf("don't know the LTSID '%v' stored in write", id)
}
s.storage.Unlock()
if err = s.verifyProof(&dkr.Read); err != nil {
return nil, xerrors.Errorf(
"read proof cannot be verified to come from scID: %v",
err)
}
if err = s.verifyProof(&dkr.Write); err != nil {
return nil, xerrors.Errorf(
"write proof cannot be verified to come from scID: %v",
err)
}
// Start ocs-protocol to re-encrypt the file's symmetric key under the
// reader's public key.
nodes := len(roster.List)
threshold := nodes - (nodes-1)/3
tree := roster.GenerateNaryTreeWithRoot(nodes, s.ServerIdentity())
pi, err := s.CreateProtocol(protocol.NameOCS, tree)
if err != nil {
return nil, xerrors.Errorf("failed to create ocs-protocol: %v", err)
}
ocsProto := pi.(*protocol.OCS)
ocsProto.U = write.U
verificationData := &vData{
Proof: dkr.Read,
}
ocsProto.Xc = read.Xc
log.Lvlf2("%v Public key is: %s", s.ServerIdentity(), ocsProto.Xc)
ocsProto.VerificationData, err = protobuf.Encode(verificationData)
if err != nil {
return nil,
xerrors.Errorf("couldn't marshal verification data: %v", err)
}
// Make sure everything used from the s.Storage structure is copied, so
// there will be no races.
s.storage.Lock()
ocsProto.Shared = s.storage.Shared[id]
pp := s.storage.Polys[id]
reply.X = s.storage.Shared[id].X.Clone()
var commits []kyber.Point
for _, c := range pp.Commits {
commits = append(commits, c.Clone())
}
ocsProto.Poly = share.NewPubPoly(s.Suite(), pp.B.Clone(), commits)
s.storage.Unlock()
log.Lvl3("Starting reencryption protocol")
err = ocsProto.SetConfig(&onet.GenericConfig{Data: id.Slice()})
if err != nil {
return nil,
xerrors.Errorf("failed to set config for ocs-protocol: %v", err)
}
err = ocsProto.Start()
if err != nil {
return nil, xerrors.Errorf("failed to start ocs-protocol: %v", err)
}
if !<-ocsProto.Reencrypted {
return nil, xerrors.New("reencryption got refused")
}
log.Lvl3("Reencryption protocol is done.")
reply.XhatEnc, err = share.RecoverCommit(cothority.Suite, ocsProto.Uis,
threshold, nodes)
if err != nil {
return nil, xerrors.Errorf("failed to recover commit: %v", err)
}
reply.C = write.C
log.Lvl3("Successfully reencrypted the key")
return
}
// GetLTSReply returns the CreateLTSReply message of a previous LTS.
func (s *Service) GetLTSReply(req *GetLTSReply) (*CreateLTSReply, error) {
log.Lvlf2("Getting LTS Reply for ID: %v", req.LTSID)
s.storage.Lock()
defer s.storage.Unlock()
reply, ok := s.storage.Replies[req.LTSID]
if !ok {
return nil, xerrors.Errorf("didn't find this LTS: %v", req.LTSID)
}
return &CreateLTSReply{
ByzCoinID: append([]byte{}, reply.ByzCoinID...),
InstanceID: reply.InstanceID,
X: reply.X.Clone(),
}, nil
}
func (s *Service) getKeyPair() *key.Pair {
return &key.Pair{
Public: s.ServerIdentity().ServicePublic(ServiceName),
Private: s.ServerIdentity().ServicePrivate(ServiceName),
}
}
// NewProtocol intercepts the DKG and OCS protocols to retrieve the values
func (s *Service) NewProtocol(tn *onet.TreeNodeInstance, conf *onet.GenericConfig) (onet.ProtocolInstance, error) {
log.Lvl3(s.ServerIdentity(), tn.ProtocolName(), conf)
switch tn.ProtocolName() {
case dkgprotocol.Name:
var cfg newLtsConfig
if err := protobuf.DecodeWithConstructors(conf.Data, &cfg, network.DefaultConstructors(cothority.Suite)); err != nil {
return nil, xerrors.Errorf("decoding LTS config: %v", err)
}
if err := s.verifyProof(&cfg.Proof); err != nil {
return nil, xerrors.Errorf("verifying proof: %v", err)
}
inst, _, _, _, err := cfg.KeyValue()
if err != nil {
return nil, xerrors.Errorf("getting key value from proof: %v", err)
}
instID := byzcoin.NewInstanceID(inst)
pi, err := dkgprotocol.NewSetup(tn)
if err != nil {
return nil, xerrors.Errorf("error setting up dkg: %v", err)
}
setupDKG := pi.(*dkgprotocol.Setup)
setupDKG.KeyPair = s.getKeyPair()
go func(bcID skipchain.SkipBlockID, id byzcoin.InstanceID) {
<-setupDKG.Finished
shared, dks, err := setupDKG.SharedSecret()
if err != nil {
log.Error(err)
return
}
reply := &CreateLTSReply{
ByzCoinID: bcID,
InstanceID: instID,
X: shared.X,
}
log.Lvlf3("%v got shared %v on inst %v", s.ServerIdentity(), shared, id)
s.storage.Lock()
s.storage.Shared[id] = shared
s.storage.DKS[id] = dks
s.storage.Replies[id] = reply
s.storage.Rosters[id] = tn.Roster()
s.storage.Unlock()
err = s.save()
if err != nil {
log.Error(err)
}
}(cfg.Latest.SkipChainID(), instID)
return pi, nil
case calypsoReshareProto:
// Decode and verify config
var cfg reshareLtsConfig
if err := protobuf.DecodeWithConstructors(conf.Data, &cfg, network.DefaultConstructors(cothority.Suite)); err != nil {
return nil, xerrors.Errorf("decoding config: %v", err)
}
if err := s.verifyProof(&cfg.Proof); err != nil {
return nil, xerrors.Errorf("verifying proof: %v", err)
}
roster, id, err := s.getLtsRoster(&cfg.Proof)
// Set up the protocol
pi, err := dkgprotocol.NewSetup(tn)
if err != nil {
return nil, xerrors.Errorf("setting up dkg protocol: %v", err)
}
setupDKG := pi.(*dkgprotocol.Setup)
setupDKG.KeyPair = s.getKeyPair()
s.storage.Lock()
oldn := len(cfg.OldNodes)
n := len(tn.Roster().List)
c := &dkg.Config{
Suite: cothority.Suite,
Longterm: setupDKG.KeyPair.Private,
NewNodes: tn.Roster().Publics(),
OldNodes: cfg.OldNodes,
Threshold: n - (n-1)/3,
OldThreshold: oldn - (oldn-1)/3,
}
s.storage.Unlock()
// Set Share and PublicCoeffs according to if we are an old node or a new one.
inOld := pointInList(setupDKG.KeyPair.Public, cfg.OldNodes)
if inOld {
c.Share = s.storage.DKS[id]
} else {
c.PublicCoeffs = cfg.Commits
}
setupDKG.NewDKG = func() (*dkg.DistKeyGenerator, error) {
d, err := dkg.NewDistKeyHandler(c)
return d, cothority.ErrorOrNil(err,
"creating new distributed key generator")
}
// Wait for DKG in reshare mode to end
go func(id byzcoin.InstanceID) {
// TODO: properly propagate errors during execution of DKG protocol
// (see dedis/cothority#2320)
<-setupDKG.Finished
setValidPeers := func(r *onet.Roster) {
s.SetValidPeers(s.NewPeerSetID(id[:]), r.List)
}
s.storage.Lock()
shared, dks, err := setupDKG.SharedSecret()
if err != nil {
setValidPeers(s.storage.Rosters[id])
s.storage.Unlock()
log.Error(err)
return
}
// If we had an old share, check the new share before saving it.
if s.storage.Shared[id] != nil {
// Check the secret shares are different
if shared.V.Equal(s.storage.Shared[id].V) {
setValidPeers(s.storage.Rosters[id])
s.storage.Unlock()
log.Error("the reshared secret is the same")
return
}
// Check the public key remains the same
if !shared.X.Equal(s.storage.Shared[id].X) {
setValidPeers(s.storage.Rosters[id])
s.storage.Unlock()
log.Error("the reshared public point is different")
return
}
}
s.storage.Shared[id] = shared
s.storage.DKS[id] = dks
s.storage.Rosters[id] = roster
s.storage.Unlock()
err = s.save()
if err != nil {
log.Fatal(err)
}
if s.afterReshare != nil {
s.afterReshare()
}
}(id)
return setupDKG, nil
case protocol.NameOCS:
id := byzcoin.NewInstanceID(conf.Data)
s.storage.Lock()
shared, ok := s.storage.Shared[id]
shared = shared.Clone()
s.storage.Unlock()
if !ok {
return nil, fmt.Errorf("didn't find LTSID %v", id)
}
pi, err := protocol.NewOCS(tn)
if err != nil {
return nil, xerrors.Errorf("creating OCS protocol instance: %v", err)
}
ocs := pi.(*protocol.OCS)
ocs.Shared = shared
ocs.Verify = s.verifyReencryption
return ocs, nil
}
return nil, nil
}
func pointInList(p1 kyber.Point, l []kyber.Point) bool {
for _, p2 := range l {
if p2.Equal(p1) {
return true
}
}
return false
}
// verifyReencryption checks that the read and the write instances match.
func (s *Service) verifyReencryption(rc *protocol.Reencrypt) bool {
err := func() error {
var verificationData vData
err := protobuf.DecodeWithConstructors(*rc.VerificationData, &verificationData, network.DefaultConstructors(cothority.Suite))
if err != nil {
return xerrors.Errorf("decoding verification data: %v", err)
}
_, v0, contractID, _, err := verificationData.Proof.KeyValue()
if err != nil {
return xerrors.Errorf("proof cannot return values: %v", err)
}
if contractID != ContractReadID {
return xerrors.New("proof doesn't point to read instance")
}
var r Read
err = protobuf.DecodeWithConstructors(v0, &r, network.DefaultConstructors(cothority.Suite))
if err != nil {
return xerrors.Errorf("couldn't decode read data: %v", err)
}
if verificationData.Ephemeral != nil {
return xerrors.New("ephemeral keys not supported yet")
}
if !r.Xc.Equal(rc.Xc) {
return xerrors.New("wrong reader")
}
return nil
}()
if err != nil {
log.Lvl2(s.ServerIdentity(), "wrong reencryption:", err)
return false
}
return true
}
// newService receives the context that holds information about the node it's
// running on. Saving and loading can be done using the context. The data will
// be stored in memory for tests and simulations, and on disk for real deployments.
func newService(c *onet.Context) (onet.Service, error) {
s := &Service{
ServiceProcessor: onet.NewServiceProcessor(c),
genesisBlocks: make(map[string]*skipchain.SkipBlock),
}
if err := s.RegisterHandlers(s.CreateLTS, s.ReshareLTS, s.DecryptKey,
s.GetLTSReply, s.Authorise, s.Authorize, s.updateValidPeers); err != nil {
return nil, xerrors.New("couldn't register messages")
}
if err := s.tryLoad(); err != nil {
log.Error(err)
return nil, xerrors.Errorf("loading configuration: %v", err)
}
// Initialize the sets of valid peers for all existing LTS
for ltsID, roster := range s.storage.Rosters {
s.SetValidPeers(s.NewPeerSetID(ltsID[:]), roster.List)
}
return s, nil
}