diff --git a/platform/fabric/channel.go b/platform/fabric/channel.go index 2cf022fac..5e68a500f 100644 --- a/platform/fabric/channel.go +++ b/platform/fabric/channel.go @@ -27,15 +27,15 @@ func (c *Channel) Name() string { } func (c *Channel) Vault() *Vault { - return &Vault{ch: c.ch} + return &Vault{txidStore: c.ch.TXIDStore(), vault: c.ch.Vault(), ch: c.ch} } func (c *Channel) Ledger() *Ledger { - return &Ledger{ch: c} + return &Ledger{l: c.ch.Ledger()} } func (c *Channel) MSPManager() *MSPManager { - return &MSPManager{ch: c.ch} + return &MSPManager{ch: c.ch.ChannelMembership()} } func (c *Channel) Committer() *Committer { @@ -43,19 +43,19 @@ func (c *Channel) Committer() *Committer { } func (c *Channel) Finality() *Finality { - return &Finality{ch: c.ch} + return &Finality{finality: c.ch.Finality()} } func (c *Channel) Chaincode(name string) *Chaincode { return &Chaincode{ fns: c.fns, - chaincode: c.ch.Chaincode(name), + chaincode: c.ch.ChaincodeManager().Chaincode(name), EventListener: newEventListener(c.sp, name), } } func (c *Channel) Delivery() *Delivery { - return &Delivery{ch: c} + return &Delivery{delivery: c.ch.Delivery()} } func (c *Channel) MetadataService() *MetadataService { diff --git a/platform/fabric/committer.go b/platform/fabric/committer.go index 36ca4869c..8bcce33a9 100644 --- a/platform/fabric/committer.go +++ b/platform/fabric/committer.go @@ -18,38 +18,38 @@ type TxStatusChangeListener interface { } type Committer struct { - ch driver.Channel + committer driver.Committer subscribers *events.Subscribers } func NewCommitter(ch driver.Channel) *Committer { - return &Committer{ch: ch, subscribers: events.NewSubscribers()} + return &Committer{committer: ch.Committer(), subscribers: events.NewSubscribers()} } // ProcessNamespace registers namespaces that will be committed even if the rwset is not known func (c *Committer) ProcessNamespace(nss ...string) error { - return c.ch.ProcessNamespace(nss...) + return c.committer.ProcessNamespace(nss...) } // Status returns a validation code this committer bind to the passed transaction id, plus // a list of dependant transaction ids if they exist. -func (c *Committer) Status(txID string) (ValidationCode, string, []string, error) { - vc, message, deps, err := c.ch.Status(txID) - return ValidationCode(vc), message, deps, err +func (c *Committer) Status(txID string) (ValidationCode, string, error) { + vc, message, err := c.committer.Status(txID) + return ValidationCode(vc), message, err } func (c *Committer) AddStatusReporter(sr driver.StatusReporter) error { - return c.ch.AddStatusReporter(sr) + return c.committer.AddStatusReporter(sr) } // SubscribeTxStatusChanges registers a listener for transaction status changes for the passed transaction id. // If the transaction id is empty, the listener will be called for all transactions. func (c *Committer) SubscribeTxStatusChanges(txID string, listener TxStatusChangeListener) error { - return c.ch.SubscribeTxStatusChanges(txID, listener) + return c.committer.SubscribeTxStatusChanges(txID, listener) } // UnsubscribeTxStatusChanges unregisters a listener for transaction status changes for the passed transaction id. // If the transaction id is empty, the listener will be called for all transactions. func (c *Committer) UnsubscribeTxStatusChanges(txID string, listener TxStatusChangeListener) error { - return c.ch.UnsubscribeTxStatusChanges(txID, listener) + return c.committer.UnsubscribeTxStatusChanges(txID, listener) } diff --git a/platform/fabric/config.go b/platform/fabric/config.go index 9f1c6c493..d9291acdd 100644 --- a/platform/fabric/config.go +++ b/platform/fabric/config.go @@ -15,3 +15,7 @@ type ConfigService struct { func (s *ConfigService) GetString(key string) string { return s.confService.GetString(key) } + +func (s *ConfigService) DefaultChannel() string { + return s.confService.DefaultChannel() +} diff --git a/platform/fabric/core/generic/chaincode.go b/platform/fabric/core/generic/chaincode.go index f47b81f50..35ea5030b 100644 --- a/platform/fabric/core/generic/chaincode.go +++ b/platform/fabric/core/generic/chaincode.go @@ -7,12 +7,74 @@ SPDX-License-Identifier: Apache-2.0 package generic import ( + "context" + "sync" + "time" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/chaincode" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" ) +type Broadcaster interface { + Broadcast(context context.Context, blob interface{}) error +} + +type MSPProvider interface { + MSPManager() driver.MSPManager +} + +type ChaincodeManager struct { + NetworkID string + ChannelID string + ConfigService driver.ConfigService + ChannelConfig driver.ChannelConfig + NumRetries uint + RetrySleep time.Duration + LocalMembership driver.LocalMembership + PeerManager chaincode.PeerManager + SignerService driver.SignerService + Broadcaster Broadcaster + Finality driver.Finality + MSPProvider MSPProvider + + // chaincodes + ChaincodesLock sync.RWMutex + Chaincodes map[string]driver.Chaincode +} + +func NewChaincodeManager( + networkID string, + channelID string, + configService driver.ConfigService, + channelConfig driver.ChannelConfig, + numRetries uint, + retrySleep time.Duration, + localMembership driver.LocalMembership, + peerManager chaincode.PeerManager, + signerService driver.SignerService, + broadcaster Broadcaster, + finality driver.Finality, + MSPProvider MSPProvider, +) *ChaincodeManager { + return &ChaincodeManager{ + NetworkID: networkID, + ChannelID: channelID, + ConfigService: configService, + ChannelConfig: channelConfig, + NumRetries: numRetries, + RetrySleep: retrySleep, + LocalMembership: localMembership, + PeerManager: peerManager, + SignerService: signerService, + Broadcaster: broadcaster, + Finality: finality, + MSPProvider: MSPProvider, + Chaincodes: map[string]driver.Chaincode{}, + } +} + // Chaincode returns a chaincode handler for the passed chaincode name -func (c *Channel) Chaincode(name string) driver.Chaincode { +func (c *ChaincodeManager) Chaincode(name string) driver.Chaincode { c.ChaincodesLock.RLock() ch, ok := c.Chaincodes[name] if ok { @@ -27,7 +89,17 @@ func (c *Channel) Chaincode(name string) driver.Chaincode { if ok { return ch } - ch = chaincode.NewChaincode(name, c.Network, c) + ch = chaincode.NewChaincode( + name, + c.ConfigService, + c.ChannelConfig, + c.LocalMembership, + c.PeerManager, + c.SignerService, + c.Broadcaster, + c.Finality, + c.MSPProvider, + ) c.Chaincodes[name] = ch return ch } diff --git a/platform/fabric/core/generic/chaincode/chaincode.go b/platform/fabric/core/generic/chaincode/chaincode.go index 7848d441b..e58ef3a2b 100644 --- a/platform/fabric/core/generic/chaincode/chaincode.go +++ b/platform/fabric/core/generic/chaincode/chaincode.go @@ -7,31 +7,81 @@ SPDX-License-Identifier: Apache-2.0 package chaincode import ( + "context" "sync" "time" "github.com/ReneKroon/ttlcache/v2" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" ) +var logger = flogging.MustGetLogger("fabric-sdk.core.generic.chaincode") + +type PeerManager interface { + NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer.Client, error) +} + +type Broadcaster interface { + Broadcast(context context.Context, blob interface{}) error +} + +type SerializableSigner interface { + Sign(message []byte) ([]byte, error) + + Serialize() ([]byte, error) +} + +type MSPProvider interface { + MSPManager() driver.MSPManager +} + type Chaincode struct { - name string - Network Network - channel Channel - NumRetries uint - RetrySleep time.Duration + name string + NetworkID string + ChannelID string + ConfigService driver.ConfigService + ChannelConfig driver.ChannelConfig + NumRetries uint + RetrySleep time.Duration + LocalMembership driver.LocalMembership + PeerManager PeerManager + SignerService driver.SignerService + Broadcaster Broadcaster + Finality driver.Finality + MSPProvider MSPProvider discoveryResultsCacheLock sync.RWMutex discoveryResultsCache ttlcache.SimpleCache } -func NewChaincode(name string, network Network, channel Channel) *Chaincode { +func NewChaincode( + name string, + networkConfig driver.ConfigService, + channelConfig driver.ChannelConfig, + localMembership driver.LocalMembership, + peerManager PeerManager, + signerService driver.SignerService, + broadcaster Broadcaster, + finality driver.Finality, + MSPProvider MSPProvider, +) *Chaincode { return &Chaincode{ name: name, - Network: network, - channel: channel, - NumRetries: channel.Config().NumRetries, - RetrySleep: channel.Config().RetrySleep, + NetworkID: networkConfig.NetworkName(), + ChannelID: channelConfig.ID(), + ConfigService: networkConfig, + ChannelConfig: channelConfig, + NumRetries: channelConfig.GetNumRetries(), + RetrySleep: channelConfig.GetRetrySleep(), + LocalMembership: localMembership, + PeerManager: peerManager, + SignerService: signerService, + Broadcaster: broadcaster, + Finality: finality, + MSPProvider: MSPProvider, discoveryResultsCacheLock: sync.RWMutex{}, discoveryResultsCache: ttlcache.NewCache(), } @@ -54,18 +104,13 @@ func (c *Chaincode) IsAvailable() (bool, error) { } func (c *Chaincode) IsPrivate() bool { - channels, err := c.Network.Config().Channels() - if err != nil { - logger.Error("failed getting channels' configurations [%s]", err) + channel := c.ConfigService.Channel(c.ChannelID) + if channel == nil { return false } - for _, channel := range channels { - if channel.Name == c.channel.Name() { - for _, chaincode := range channel.Chaincodes { - if chaincode.Name == c.name { - return chaincode.Private - } - } + for _, chaincode := range channel.ChaincodeConfigs() { + if chaincode.ID() == c.name { + return chaincode.IsPrivate() } } // Nothing was found diff --git a/platform/fabric/core/generic/chaincode/discovery.go b/platform/fabric/core/generic/chaincode/discovery.go index 0915f2b6f..737a1faf8 100644 --- a/platform/fabric/core/generic/chaincode/discovery.go +++ b/platform/fabric/core/generic/chaincode/discovery.go @@ -35,7 +35,7 @@ func NewDiscovery(chaincode *Chaincode) *Discovery { // set key to the concatenation of chaincode name and version return &Discovery{ chaincode: chaincode, - DefaultTTL: chaincode.channel.Config().DiscoveryDefaultTTLS(), + DefaultTTL: chaincode.ChannelConfig.DiscoveryDefaultTTLS(), } } @@ -50,7 +50,7 @@ func (d *Discovery) GetEndorsers() ([]driver.DiscoveredPeer, error) { response, err := d.Response() // extract endorsers - cr := response.ForChannel(d.chaincode.channel.Name()) + cr := response.ForChannel(d.chaincode.ChannelID) var endorsers discovery.Endorsers switch { case len(d.ImplicitCollections) > 0: @@ -71,13 +71,13 @@ func (d *Discovery) GetEndorsers() ([]driver.DiscoveredPeer, error) { ) } if err != nil { - return nil, errors.WithMessagef(err, "failed getting endorsers for [%s:%s:%s]", d.chaincode.Network.Name(), d.chaincode.channel.Name(), d.chaincode.name) + return nil, errors.WithMessagef(err, "failed getting endorsers for [%s:%s:%s]", d.chaincode.NetworkID, d.chaincode.ChannelID, d.chaincode.name) } // prepare result configResult, err := cr.Config() if err != nil { - return nil, errors.WithMessagef(err, "failed getting config for [%s:%s:%s]", d.chaincode.Network.Name(), d.chaincode.channel.Name(), d.chaincode.name) + return nil, errors.WithMessagef(err, "failed getting config for [%s:%s:%s]", d.chaincode.NetworkID, d.chaincode.ChannelID, d.chaincode.name) } return d.toDiscoveredPeers(configResult, endorsers) } @@ -89,11 +89,11 @@ func (d *Discovery) GetPeers() ([]driver.DiscoveredPeer, error) { } // extract peers - cr := response.ForChannel(d.chaincode.channel.Name()) + cr := response.ForChannel(d.chaincode.ChannelID) var peers []*discovery.Peer peers, err = cr.Peers(ccCall(d.chaincode.name)...) if err != nil { - return nil, errors.WithMessagef(err, "failed getting peers for [%s:%s:%s]", d.chaincode.Network.Name(), d.chaincode.channel.Name(), d.chaincode.name) + return nil, errors.WithMessagef(err, "failed getting peers for [%s:%s:%s]", d.chaincode.NetworkID, d.chaincode.ChannelID, d.chaincode.name) } // filter @@ -109,15 +109,15 @@ func (d *Discovery) GetPeers() ([]driver.DiscoveredPeer, error) { // prepare result configResult, err := cr.Config() if err != nil { - return nil, errors.WithMessagef(err, "failed getting config for [%s:%s:%s]", d.chaincode.Network.Name(), d.chaincode.channel.Name(), d.chaincode.name) + return nil, errors.WithMessagef(err, "failed getting config for [%s:%s:%s]", d.chaincode.NetworkID, d.chaincode.ChannelID, d.chaincode.name) } return d.toDiscoveredPeers(configResult, peers) } func (d *Discovery) Response() (discovery.Response, error) { var sb strings.Builder - sb.WriteString(d.chaincode.Network.Name()) - sb.WriteString(d.chaincode.channel.Name()) + sb.WriteString(d.chaincode.NetworkID) + sb.WriteString(d.chaincode.ChannelID) sb.WriteString(d.chaincode.name) for _, mspiD := range d.FilterByMSPIDs { sb.WriteString(mspiD) @@ -184,7 +184,7 @@ func (d *Discovery) queryPeers() (discovery.Response, error) { // New discovery request for: // - peers and // - config, - req := discovery.NewRequest().OfChannel(d.chaincode.channel.Name()).AddPeersQuery( + req := discovery.NewRequest().OfChannel(d.chaincode.ChannelID).AddPeersQuery( &peer.ChaincodeCall{Name: d.chaincode.name}, ) req = req.AddConfigQuery() @@ -195,7 +195,7 @@ func (d *Discovery) queryEndorsers() (discovery.Response, error) { // New discovery request for: // - endorsers and // - config, - req, err := discovery.NewRequest().OfChannel(d.chaincode.channel.Name()).AddEndorsersQuery( + req, err := discovery.NewRequest().OfChannel(d.chaincode.ChannelID).AddEndorsersQuery( &peer.ChaincodeInterest{Chaincodes: []*peer.ChaincodeCall{{Name: d.chaincode.name}}}, ) if err != nil { @@ -212,13 +212,13 @@ func (d *Discovery) query(req *discovery.Request) (discovery.Response, error) { pCli.Close() } }() - pc, err := d.chaincode.channel.NewPeerClientForAddress(*d.chaincode.Network.PickPeer(driver.PeerForDiscovery)) + pc, err := d.chaincode.PeerManager.NewPeerClientForAddress(*d.chaincode.ConfigService.PickPeer(driver.PeerForDiscovery)) if err != nil { return nil, err } peerClients = append(peerClients, pc) - signer := d.chaincode.Network.LocalMembership().DefaultSigningIdentity() + signer := d.chaincode.LocalMembership.DefaultSigningIdentity() signerRaw, err := signer.Serialize() if err != nil { return nil, err @@ -231,7 +231,7 @@ func (d *Discovery) query(req *discovery.Request) (discovery.Response, error) { ClientIdentity: signerRaw, ClientTlsCertHash: ClientTLSCertHash, } - timeout, cancel := context.WithTimeout(context.Background(), d.chaincode.channel.Config().DiscoveryTimeout()) + timeout, cancel := context.WithTimeout(context.Background(), d.chaincode.ChannelConfig.DiscoveryTimeout()) defer cancel() cl, err := pc.DiscoveryClient() if err != nil { @@ -284,32 +284,32 @@ func (d *Discovery) toDiscoveredPeers(configResult *discovery2.ConfigResult, end func (d *Discovery) ChaincodeVersion() (string, error) { response, err := d.Response() if err != nil { - return "", errors.Wrapf(err, "unable to discover channel information for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.channel.Name()) + return "", errors.Wrapf(err, "unable to discover channel information for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.ChannelID) } - endorsers, err := response.ForChannel(d.chaincode.channel.Name()).Endorsers([]*peer.ChaincodeCall{{ + endorsers, err := response.ForChannel(d.chaincode.ChannelID).Endorsers([]*peer.ChaincodeCall{{ Name: d.chaincode.name, }}, &noFilter{}) if err != nil { - return "", errors.Wrapf(err, "failed to get endorsers for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.channel.Name()) + return "", errors.Wrapf(err, "failed to get endorsers for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.ChannelID) } if len(endorsers) == 0 { - return "", errors.Errorf("no endorsers found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.channel.Name()) + return "", errors.Errorf("no endorsers found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.ChannelID) } stateInfoMessage := endorsers[0].StateInfoMessage if stateInfoMessage == nil { - return "", errors.Errorf("no state info message found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.channel.Name()) + return "", errors.Errorf("no state info message found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.ChannelID) } stateInfo := stateInfoMessage.GetStateInfo() if stateInfo == nil { - return "", errors.Errorf("no state info found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.channel.Name()) + return "", errors.Errorf("no state info found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.ChannelID) } properties := stateInfo.GetProperties() if properties == nil { - return "", errors.Errorf("no properties found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.channel.Name()) + return "", errors.Errorf("no properties found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.ChannelID) } chaincodes := properties.Chaincodes if len(chaincodes) == 0 { - return "", errors.Errorf("no chaincode info found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.channel.Name()) + return "", errors.Errorf("no chaincode info found for chaincode [%s] on channel [%s]", d.chaincode.name, d.chaincode.ChannelID) } for _, chaincode := range chaincodes { if chaincode.Name == d.chaincode.name { diff --git a/platform/fabric/core/generic/chaincode/invoke.go b/platform/fabric/core/generic/chaincode/invoke.go index 80c7193a0..b58ce2735 100644 --- a/platform/fabric/core/generic/chaincode/invoke.go +++ b/platform/fabric/core/generic/chaincode/invoke.go @@ -29,8 +29,6 @@ import ( type Invoke struct { Chaincode *Chaincode - Network Network - Channel Channel TxID driver.TxID SignerIdentity view.Identity ChaincodePath string @@ -53,8 +51,6 @@ type Invoke struct { func NewInvoke(chaincode *Chaincode, function string, args ...interface{}) *Invoke { return &Invoke{ Chaincode: chaincode, - Network: chaincode.Network, - Channel: chaincode.channel, ChaincodeName: chaincode.name, Function: function, Args: args, @@ -283,7 +279,7 @@ func (i *Invoke) prepare(query bool) (string, *pb.Proposal, []*pb.ProposalRespon case len(i.EndorsersByConnConfig) != 0: // get a peer client for each connection config for _, config := range i.EndorsersByConnConfig { - peerClient, err := i.Channel.NewPeerClientForAddress(*config) + peerClient, err := i.Chaincode.PeerManager.NewPeerClientForAddress(*config) if err != nil { return "", nil, nil, nil, err } @@ -292,7 +288,7 @@ func (i *Invoke) prepare(query bool) (string, *pb.Proposal, []*pb.ProposalRespon default: if i.EndorsersFromMyOrg && len(i.EndorsersMSPIDs) == 0 { // retrieve invoker's MSP-ID - invokerMSPID, err := i.Channel.MSPManager().DeserializeIdentity(i.SignerIdentity) + invokerMSPID, err := i.Chaincode.MSPProvider.MSPManager().DeserializeIdentity(i.SignerIdentity) if err != nil { return "", nil, nil, nil, errors.WithMessagef(err, "failed to deserializer the invoker identity") } @@ -333,9 +329,9 @@ func (i *Invoke) prepare(query bool) (string, *pb.Proposal, []*pb.ProposalRespon // get a peer client for all discovered peers for _, peer := range discoveredPeers { - peerClient, err := i.Channel.NewPeerClientForAddress(grpc.ConnectionConfig{ + peerClient, err := i.Chaincode.PeerManager.NewPeerClientForAddress(grpc.ConnectionConfig{ Address: peer.Endpoint, - TLSEnabled: i.Network.Config().TLSEnabled(), + TLSEnabled: i.Chaincode.ConfigService.TLSEnabled(), TLSRootCertBytes: peer.TLSRootCerts, }) if err != nil { @@ -357,7 +353,7 @@ func (i *Invoke) prepare(query bool) (string, *pb.Proposal, []*pb.ProposalRespon } // load signer - signer, err := i.Network.SignerService().GetSigningIdentity(i.SignerIdentity) + signer, err := i.Chaincode.SignerService.GetSigningIdentity(i.SignerIdentity) if err != nil { return "", nil, nil, nil, err } @@ -396,7 +392,7 @@ func (i *Invoke) prepareProposal(signer SerializableSigner) (*pb.SignedProposal, funcName := "invoke" prop, txID, err := i.createChaincodeProposalWithTxIDAndTransient( common.HeaderType_ENDORSER_TRANSACTION, - i.Channel.Name(), + i.Chaincode.ChannelID, invocation, creator, i.TransientMap) @@ -527,8 +523,8 @@ func (i *Invoke) toBytes(arg interface{}) ([]byte, error) { } func (i *Invoke) broadcast(txID string, env *common.Envelope) error { - if err := i.Network.Broadcast(i.Context, env); err != nil { + if err := i.Chaincode.Broadcaster.Broadcast(i.Context, env); err != nil { return err } - return i.Channel.IsFinal(context.Background(), txID) + return i.Chaincode.Finality.IsFinal(context.Background(), txID) } diff --git a/platform/fabric/core/generic/chaincode/support.go b/platform/fabric/core/generic/chaincode/support.go deleted file mode 100644 index 8d28e7096..000000000 --- a/platform/fabric/core/generic/chaincode/support.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package chaincode - -import ( - "context" - - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" -) - -var logger = flogging.MustGetLogger("fabric-sdk.chaincode") - -type SignerProvider interface { - GetSigningIdentity(identity view.Identity) (*view2.SigningIdentity, error) -} - -type SerializableSigner interface { - Sign(message []byte) ([]byte, error) - - Serialize() ([]byte, error) -} - -type Network interface { - Name() string - PickPeer(funcType driver.PeerFunctionType) *grpc.ConnectionConfig - LocalMembership() driver.LocalMembership - // Broadcast sends the passed blob to the ordering service to be ordered - Broadcast(context context.Context, blob interface{}) error - SignerService() driver.SignerService - Config() *config.Config -} - -type Channel interface { - // Name returns the name of the channel - Name() string - - // Config returns the channel configuration - Config() *config.Channel - - // NewPeerClientForAddress creates an instance of a Client using the - // provided peer connection config - NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer.Client, error) - - // IsFinal takes in input a transaction id and waits for its confirmation - // with the respect to the passed context that can be used to set a deadline - // for the waiting time. - IsFinal(ctx context.Context, txID string) error - - MSPManager() driver.MSPManager - - Chaincode(name string) driver.Chaincode -} diff --git a/platform/fabric/core/generic/channel.go b/platform/fabric/core/generic/channel.go index bfbfbe271..8299ecc2d 100644 --- a/platform/fabric/core/generic/channel.go +++ b/platform/fabric/core/generic/channel.go @@ -8,32 +8,17 @@ package generic import ( "context" - "os" - "sync" - "time" - "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/committer" - config2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" - delivery2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/delivery" - finality2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/finality" - peer2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" - common2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer/common" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/finality" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/membership" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/transaction" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/vault" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - api2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/events" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/hash" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/tracing" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" "github.com/hyperledger/fabric-protos-go/common" - "github.com/hyperledger/fabric-protos-go/peer" - "github.com/hyperledger/fabric/common/channelconfig" - discovery "github.com/hyperledger/fabric/discovery/client" - "github.com/hyperledger/fabric/protoutil" "github.com/pkg/errors" ) @@ -42,8 +27,6 @@ const ( GetBlockByNumber string = "GetBlockByNumber" GetTransactionByID string = "GetTransactionByID" GetBlockByTxID string = "GetBlockByTxID" - DefaultNumRetries = 3 - DefaultRetrySleep = 1 * time.Second ) type Delivery interface { @@ -52,41 +35,23 @@ type Delivery interface { } type Channel struct { - SP view2.ServiceProvider - ChannelConfig *config2.Channel - NetworkConfig *config2.Config - Network *Network - ChannelName string - Finality driver.Finality - Vault *vault.Vault - ProcessNamespaces []string - StatusReporters []driver.StatusReporter - ExternalCommitter *committer.ExternalCommitter - ES driver.EnvelopeService - TS driver.EndorserTransactionService - MS driver.MetadataService - DeliveryService Delivery - driver.TXIDStore - RWSetLoader driver.RWSetLoader - - // ResourcesApplyLock is used to serialize calls to CommitConfig and bundle update processing. - ResourcesApplyLock sync.Mutex - // ResourcesLock is used to serialize access to resources - ResourcesLock sync.RWMutex - // resources is used to acquire configuration bundle resources. - ChannelResources channelconfig.Resources - - // chaincodes - ChaincodesLock sync.RWMutex - Chaincodes map[string]driver.Chaincode - - // connection pool - ConnCache common2.CachingEndorserPool - - // events - Subscribers *events.Subscribers - EventsSubscriber events.Subscriber - EventsPublisher events.Publisher + ChannelConfig driver.ChannelConfig + ConfigService driver.ConfigService + Network *Network + ChannelName string + FinalityService driver.Finality + VaultService driver.Vault + TXIDStoreService driver.TXIDStore + ES driver.EnvelopeService + TS driver.EndorserTransactionService + MS driver.MetadataService + DeliveryService *DeliveryService + RWSetLoaderService driver.RWSetLoader + LedgerService driver.Ledger + ChannelMembershipService *membership.Service + ChaincodeManagerService driver.ChaincodeManager + CommitterService *committer.Service + PeerManager *PeerManager } func NewChannel(nw driver.FabricNetworkService, name string, quiet bool) (driver.Channel, error) { @@ -94,38 +59,48 @@ func NewChannel(nw driver.FabricNetworkService, name string, quiet bool) (driver sp := network.SP // Channel configuration - channelConfigs, err := network.config.Channels() - if err != nil { - return nil, errors.WithMessagef(err, "failed to get Channel config") - } - var channelConfig *config2.Channel - for _, config := range channelConfigs { - if config.Name == name { - channelConfig = config - break - } - } + channelConfig := network.ConfigService().Channel(name) if channelConfig == nil { - channelConfig = &config2.Channel{ - Name: name, - Default: false, - Quiet: false, - NumRetries: DefaultNumRetries, - RetrySleep: DefaultRetrySleep, - Chaincodes: nil, - } + channelConfig = network.ConfigService().NewDefaultChannelConfig(name) } // Vault - v, txIDStore, err := NewVault(sp, network.config, name) + v, txIDStore, err := NewVault(sp, network.configService, name) if err != nil { return nil, err } + // Events + eventsPublisher, err := events.GetPublisher(sp) + if err != nil { + return nil, errors.Wrap(err, "failed to get event publisher") + } + eventsSubscriber, err := events.GetSubscriber(sp) + if err != nil { + return nil, errors.Wrap(err, "failed to get event subscriber") + } + + kvsService := kvs.GetService(sp) + + c := &Channel{ + ChannelName: name, + ConfigService: network.configService, + ChannelConfig: channelConfig, + Network: network, + VaultService: NewVaultService(v), + TXIDStoreService: txIDStore, + ES: transaction.NewEnvelopeService(kvsService, network.Name(), name), + TS: transaction.NewEndorseTransactionService(kvsService, network.Name(), name), + MS: transaction.NewMetadataService(kvsService, network.Name(), name), + PeerManager: NewPeerManager(network.configService, network.LocalMembership().DefaultSigningIdentity()), + } + // Fabric finality - fabricFinality, err := finality2.NewFabricFinality( + fabricFinality, err := finality.NewFabricFinality( name, - network, + network.ConfigService(), + c.PeerManager, + network.LocalMembership().DefaultSigningIdentity(), hash.GetHasher(sp), channelConfig.FinalityWaitTimeout(), ) @@ -133,79 +108,85 @@ func NewChannel(nw driver.FabricNetworkService, name string, quiet bool) (driver return nil, err } - // Committers - externalCommitter, err := committer.GetExternalCommitter(name, sp, v) - if err != nil { - return nil, err - } + c.ChannelMembershipService = membership.NewService() - publisher, err := events.GetPublisher(network.SP) - if err != nil { - return nil, errors.Wrapf(err, "failed to get event publisher") - } + // Committers + c.RWSetLoaderService = NewRWSetLoader( + network.Name(), name, + c.ES, c.TS, network.TransactionManager(), + v, + ) - committerInst, err := committer.New( + c.CommitterService = committer.NewService( + network.configService, channelConfig, - network, + c.VaultService, + c.ES, + c.LedgerService, + c.RWSetLoaderService, + c.Network.processorManager, + eventsSubscriber, + eventsPublisher, + c.ChannelMembershipService, + c.Network, fabricFinality, channelConfig.CommitterWaitForEventTimeout(), quiet, tracing.Get(sp).GetTracer(), - publisher, ) - if err != nil { - return nil, err - } - // Delivery - deliveryService, err := delivery2.New(channelConfig, sp, network, func(block *common.Block) (bool, error) { - // commit the block, if an error occurs then retry - err := committerInst.Commit(block) - return false, err - }, txIDStore, channelConfig.CommitterWaitForEventTimeout()) if err != nil { return nil, err } // Finality - fs, err := finality2.NewService(sp, network, channelConfig, committerInst) + c.FinalityService = c.CommitterService + + c.ChaincodeManagerService = NewChaincodeManager( + network.Name(), + name, + network.configService, + channelConfig, + channelConfig.GetNumRetries(), + channelConfig.GetRetrySleep(), + network.localMembership, + c.PeerManager, + network.sigService, + network.Ordering, + c.FinalityService, + c.ChannelMembershipService, + ) + + c.LedgerService = NewLedger( + name, + c.ChaincodeManagerService, + network.localMembership, + network.configService, + ) + + // Delivery + deliveryService, err := NewDeliveryService( + name, + channelConfig, + hash.GetHasher(sp), + network.Name(), + network.LocalMembership(), + network.ConfigService(), + c.PeerManager, + c.LedgerService, + channelConfig.CommitterWaitForEventTimeout(), + txIDStore, + func(block *common.Block) (bool, error) { + // commit the block, if an error occurs then retry + err := c.CommitterService.Commit(block) + return false, err + }, + ) if err != nil { return nil, err } - // Events - eventsPublisher, err := events.GetPublisher(sp) - if err != nil { - return nil, errors.Wrap(err, "failed to get event publisher") - } - eventsSubscriber, err := events.GetSubscriber(sp) - if err != nil { - return nil, errors.Wrap(err, "failed to get event subscriber") - } + c.DeliveryService = deliveryService - c := &Channel{ - ChannelName: name, - NetworkConfig: network.config, - ChannelConfig: channelConfig, - Network: network, - Vault: v, - SP: sp, - Finality: fs, - DeliveryService: deliveryService, - ExternalCommitter: externalCommitter, - TXIDStore: txIDStore, - ES: transaction.NewEnvelopeService(sp, network.Name(), name), - TS: transaction.NewEndorseTransactionService(sp, network.Name(), name), - MS: transaction.NewMetadataService(sp, network.Name(), name), - Chaincodes: map[string]driver.Chaincode{}, - EventsPublisher: eventsPublisher, - EventsSubscriber: eventsSubscriber, - Subscribers: events.NewSubscribers(), - } - c.RWSetLoader = NewRWSetLoader( - network.Name(), name, - c.ES, c.TS, network.TransactionManager(), - v, - ) if err := c.Init(); err != nil { return nil, errors.WithMessagef(err, "failed initializing Channel [%s]", name) } @@ -217,264 +198,50 @@ func (c *Channel) Name() string { return c.ChannelName } -func (c *Channel) NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer2.Client, error) { - logger.Debugf("NewPeerClientForAddress [%v]", cc) - return c.ConnCache.NewPeerClientForAddress(cc) -} - -func (c *Channel) IsValid(identity view.Identity) error { - id, err := c.MSPManager().DeserializeIdentity(identity) - if err != nil { - return errors.Wrapf(err, "failed deserializing identity [%s]", identity.String()) - } - - return id.Validate() -} - -func (c *Channel) GetVerifier(identity view.Identity) (api2.Verifier, error) { - id, err := c.MSPManager().DeserializeIdentity(identity) - if err != nil { - return nil, errors.Wrapf(err, "failed deserializing identity [%s]", identity.String()) - } - return id, nil -} - -func (c *Channel) GetClientConfig(tlsRootCerts [][]byte, UseTLS bool) (*grpc.ClientConfig, string, error) { - override := c.NetworkConfig.TLSServerHostOverride() - clientConfig := &grpc.ClientConfig{} - clientConfig.Timeout = c.NetworkConfig.ClientConnTimeout() - if clientConfig.Timeout == time.Duration(0) { - clientConfig.Timeout = grpc.DefaultConnectionTimeout - } - - secOpts := grpc.SecureOptions{ - UseTLS: UseTLS, - RequireClientCert: c.NetworkConfig.TLSClientAuthRequired(), - } - if UseTLS { - secOpts.RequireClientCert = false - } - - if secOpts.RequireClientCert { - keyPEM, err := os.ReadFile(c.NetworkConfig.TLSClientKeyFile()) - if err != nil { - return nil, "", errors.WithMessage(err, "unable to load fabric.tls.clientKey.file") - } - secOpts.Key = keyPEM - certPEM, err := os.ReadFile(c.NetworkConfig.TLSClientCertFile()) - if err != nil { - return nil, "", errors.WithMessage(err, "unable to load fabric.tls.clientCert.file") - } - secOpts.Certificate = certPEM - } - clientConfig.SecOpts = secOpts - - if clientConfig.SecOpts.UseTLS { - if len(tlsRootCerts) == 0 { - return nil, "", errors.New("tls root cert file must be set") - } - clientConfig.SecOpts.ServerRootCAs = tlsRootCerts - } - - clientConfig.KaOpts = grpc.KeepaliveOptions{ - ClientInterval: c.NetworkConfig.KeepAliveClientInterval(), - ClientTimeout: c.NetworkConfig.KeepAliveClientTimeout(), - } - - return clientConfig, override, nil +func (c *Channel) Close() error { + c.DeliveryService.Stop() + return c.Vault().Close() } -func (c *Channel) GetTransactionByID(txID string) (driver.ProcessedTransaction, error) { - raw, err := c.Chaincode("qscc").NewInvocation(GetTransactionByID, c.ChannelName, txID).WithSignerIdentity( - c.Network.LocalMembership().DefaultIdentity(), - ).WithEndorsersByConnConfig(c.Network.PickPeer(driver.PeerForQuery)).Query() - if err != nil { - return nil, err - } - - logger.Debugf("got transaction by id [%s] of len [%d]", txID, len(raw)) - - pt := &peer.ProcessedTransaction{} - err = proto.Unmarshal(raw, pt) - if err != nil { - return nil, err - } - return newProcessedTransaction(pt) +func (c *Channel) Vault() driver.Vault { + return c.VaultService } -func (c *Channel) GetBlockNumberByTxID(txID string) (uint64, error) { - res, err := c.Chaincode("qscc").NewInvocation(GetBlockByTxID, c.ChannelName, txID).WithSignerIdentity( - c.Network.LocalMembership().DefaultIdentity(), - ).WithEndorsersByConnConfig(c.Network.PickPeer(driver.PeerForQuery)).Query() - if err != nil { - return 0, err - } - - block := &common.Block{} - err = proto.Unmarshal(res, block) - if err != nil { - return 0, err - } - return block.Header.Number, nil +func (c *Channel) Finality() driver.Finality { + return c.FinalityService } -func (c *Channel) Close() error { - c.DeliveryService.Stop() - return c.Vault.Close() +func (c *Channel) Ledger() driver.Ledger { + return c.LedgerService } -func (c *Channel) Config() *config2.Channel { - return c.ChannelConfig +func (c *Channel) Delivery() driver.Delivery { + return c.DeliveryService } -func (c *Channel) DefaultSigner() discovery.Signer { - return c.Network.LocalMembership().DefaultSigningIdentity().Sign +func (c *Channel) ChaincodeManager() driver.ChaincodeManager { + return c.ChaincodeManagerService } -// FetchEnvelope fetches from the ledger and stores the enveloped correspoding to the passed id -func (c *Channel) FetchEnvelope(txID string) ([]byte, error) { - pt, err := c.GetTransactionByID(txID) - if err != nil { - return nil, errors.WithMessagef(err, "failed fetching tx [%s]", txID) - } - if !pt.IsValid() { - return nil, errors.Errorf("fetched tx [%s] should have been valid, instead it is [%s]", txID, peer.TxValidationCode_name[pt.ValidationCode()]) - } - return pt.Envelope(), nil +func (c *Channel) ChannelMembership() driver.ChannelMembership { + return c.ChannelMembershipService } -func (c *Channel) GetRWSetFromEvn(txID string) (driver.RWSet, driver.ProcessTransaction, error) { - return c.RWSetLoader.GetRWSetFromEvn(txID) +func (c *Channel) TXIDStore() driver.TXIDStore { + return c.TXIDStoreService } -func (c *Channel) GetRWSetFromETx(txID string) (driver.RWSet, driver.ProcessTransaction, error) { - return c.RWSetLoader.GetRWSetFromETx(txID) +func (c *Channel) RWSetLoader() driver.RWSetLoader { + return c.RWSetLoaderService } -func (c *Channel) GetInspectingRWSetFromEvn(txID string, envelopeRaw []byte) (driver.RWSet, driver.ProcessTransaction, error) { - return c.RWSetLoader.GetInspectingRWSetFromEvn(txID, envelopeRaw) +func (c *Channel) Committer() driver.Committer { + return c.CommitterService } func (c *Channel) Init() error { - if err := c.ReloadConfigTransactions(); err != nil { + if err := c.CommitterService.ReloadConfigTransactions(); err != nil { return errors.WithMessagef(err, "failed reloading config transactions") } - c.ConnCache = common2.CachingEndorserPool{ - Cache: map[string]peer2.Client{}, - ConnCreator: &connCreator{ch: c}, - Signer: c.DefaultSigner(), - } return nil } - -func newPeerClientForClientConfig(signer discovery.Signer, address, override string, clientConfig grpc.ClientConfig) (*common2.PeerClient, error) { - gClient, err := grpc.NewGRPCClient(clientConfig) - if err != nil { - return nil, errors.WithMessage(err, "failed to create Client from config") - } - pClient := &common2.PeerClient{ - Signer: signer, - CommonClient: common2.CommonClient{ - Client: gClient, - Address: address, - Sn: override, - }, - } - return pClient, nil -} - -type processedTransaction struct { - vc int32 - ue *transaction.UnpackedEnvelope - env []byte -} - -func newProcessedTransactionFromEnvelope(env *common.Envelope) (*processedTransaction, int32, error) { - ue, headerType, err := transaction.UnpackEnvelope(env) - if err != nil { - return nil, headerType, err - } - return &processedTransaction{ue: ue}, headerType, nil -} - -func newProcessedTransactionFromEnvelopeRaw(env []byte) (*processedTransaction, error) { - ue, _, err := transaction.UnpackEnvelopeFromBytes(env) - if err != nil { - return nil, err - } - return &processedTransaction{ue: ue, env: env}, nil -} - -func newProcessedTransaction(pt *peer.ProcessedTransaction) (*processedTransaction, error) { - ue, _, err := transaction.UnpackEnvelope(pt.TransactionEnvelope) - if err != nil { - return nil, err - } - env, err := protoutil.Marshal(pt.TransactionEnvelope) - if err != nil { - return nil, err - } - return &processedTransaction{vc: pt.ValidationCode, ue: ue, env: env}, nil -} - -func (p *processedTransaction) TxID() string { - return p.ue.TxID -} - -func (p *processedTransaction) Results() []byte { - return p.ue.Results -} - -func (p *processedTransaction) IsValid() bool { - return p.vc == int32(peer.TxValidationCode_VALID) -} - -func (p *processedTransaction) Envelope() []byte { - return p.env -} - -func (p *processedTransaction) ValidationCode() int32 { - return p.vc -} - -type connCreator struct { - ch *Channel -} - -func (c *connCreator) NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer2.Client, error) { - logger.Debugf("Creating new peer client for address [%s]", cc.Address) - var certs [][]byte - if cc.TLSEnabled { - switch { - case len(cc.TLSRootCertFile) != 0: - logger.Debugf("Loading TLSRootCert from file [%s]", cc.TLSRootCertFile) - caPEM, err := os.ReadFile(cc.TLSRootCertFile) - if err != nil { - logger.Error("unable to load TLS cert from %s", cc.TLSRootCertFile) - return nil, errors.WithMessagef(err, "unable to load TLS cert from %s", cc.TLSRootCertFile) - } - certs = append(certs, caPEM) - case len(cc.TLSRootCertBytes) != 0: - logger.Debugf("Loading TLSRootCert from passed bytes [%s[", cc.TLSRootCertBytes) - certs = cc.TLSRootCertBytes - default: - return nil, errors.New("missing TLSRootCertFile in client config") - } - } - - clientConfig, override, err := c.ch.GetClientConfig(certs, cc.TLSEnabled) - if err != nil { - return nil, err - } - - if len(cc.ServerNameOverride) != 0 { - override = cc.ServerNameOverride - } - - return newPeerClientForClientConfig( - c.ch.DefaultSigner(), - cc.Address, - override, - *clientConfig, - ) -} diff --git a/platform/fabric/core/generic/committer.go b/platform/fabric/core/generic/committer.go deleted file mode 100644 index bedfdc669..000000000 --- a/platform/fabric/core/generic/committer.go +++ /dev/null @@ -1,437 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package generic - -import ( - "strings" - - "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/compose" - "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/committer" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/events" - "github.com/hyperledger/fabric-protos-go/common" - "github.com/pkg/errors" -) - -func (c *Channel) Status(txID string) (driver.ValidationCode, string, []string, error) { - vc, message, err := c.Vault.Status(txID) - if err != nil { - logger.Errorf("failed to get status of [%s]: %s", txID, err) - return driver.Unknown, "", nil, err - } - if vc == driver.Unknown { - // give it a second chance - if c.EnvelopeService().Exists(txID) { - if err := c.extractStoredEnvelopeToVault(txID); err != nil { - return driver.Unknown, "", nil, errors.WithMessagef(err, "failed to extract stored enveloper for [%s]", txID) - } - vc = driver.Busy - } else { - // check status reporter, if any - for _, reporter := range c.StatusReporters { - externalStatus, externalMessage, _, err := reporter.Status(txID) - if err == nil && externalStatus != driver.Unknown { - vc = externalStatus - message = externalMessage - } - } - } - } - if c.ExternalCommitter == nil { - return vc, message, nil, nil - } - - _, dependantTxIDs, _, err := c.ExternalCommitter.Status(txID) - if err != nil { - logger.Errorf("failed to get external status of [%s]: %s", txID, err) - return driver.Unknown, "", nil, err - } - if vc == driver.Unknown && len(dependantTxIDs) != 0 { - return driver.HasDependencies, "", dependantTxIDs, nil - } - return vc, message, dependantTxIDs, nil -} - -func (c *Channel) ProcessNamespace(nss ...string) error { - c.ProcessNamespaces = append(c.ProcessNamespaces, nss...) - return nil -} - -func (c *Channel) GetProcessNamespace() []string { - return c.ProcessNamespaces -} - -func (c *Channel) AddStatusReporter(sr driver.StatusReporter) error { - c.StatusReporters = append(c.StatusReporters, sr) - return nil -} - -func (c *Channel) DiscardTx(txID string, message string) error { - logger.Debugf("discarding transaction [%s] with message [%s]", txID, message) - - defer c.notifyTxStatus(txID, driver.Invalid, message) - vc, _, deps, err := c.Status(txID) - if err != nil { - return errors.WithMessagef(err, "failed getting tx's status in state db [%s]", txID) - } - if vc == driver.Unknown { - // give it a second chance - if c.EnvelopeService().Exists(txID) { - if err := c.extractStoredEnvelopeToVault(txID); err != nil { - return errors.WithMessagef(err, "failed to extract stored enveloper for [%s]", txID) - } - } else { - // check status reporter, if any - found := false - for _, reporter := range c.StatusReporters { - externalStatus, _, _, err := reporter.Status(txID) - if err == nil && externalStatus != driver.Unknown { - found = true - break - } - } - if !found { - logger.Debugf("Discarding transaction [%s] skipped, tx is unknown", txID) - return nil - } - } - } - - if err := c.Vault.DiscardTx(txID, message); err != nil { - logger.Errorf("failed discarding tx [%s] in vault: %s", txID, err) - } - for _, dep := range deps { - if err := c.Vault.DiscardTx(dep, message); err != nil { - logger.Errorf("failed discarding dependant tx [%s] of [%s] in vault: %s", dep, txID, err) - } - } - return nil -} - -func (c *Channel) CommitTX(txID string, block uint64, indexInBlock int, envelope *common.Envelope) (err error) { - logger.Debugf("Committing transaction [%s,%d,%d]", txID, block, indexInBlock) - defer logger.Debugf("Committing transaction [%s,%d,%d] done [%s]", txID, block, indexInBlock, err) - defer func() { - if err == nil { - c.notifyTxStatus(txID, driver.Valid, "") - } - }() - - vc, _, deps, err := c.Status(txID) - if err != nil { - return errors.WithMessagef(err, "failed getting tx's status in state db [%s]", txID) - } - switch vc { - case driver.Valid: - // This should generate a panic - logger.Debugf("[%s] is already valid", txID) - return errors.Errorf("[%s] is already valid", txID) - case driver.Invalid: - // This should generate a panic - logger.Debugf("[%s] is invalid", txID) - return errors.Errorf("[%s] is invalid", txID) - case driver.Unknown: - return c.commitUnknown(txID, block, indexInBlock, envelope) - case driver.HasDependencies: - return c.commitDeps(txID, block, indexInBlock) - case driver.Busy: - return c.commit(txID, deps, block, indexInBlock, envelope) - default: - return errors.Errorf("invalid status code [%d] for [%s]", vc, txID) - } -} - -func (c *Channel) SubscribeTxStatusChanges(txID string, listener driver.TxStatusChangeListener) error { - _, topic := compose.CreateTxTopic(c.Network.Name(), c.ChannelName, txID) - l := &TxEventsListener{listener: listener} - logger.Debugf("[%s] Subscribing to transaction status changes", txID) - c.EventsSubscriber.Subscribe(topic, l) - logger.Debugf("[%s] store mapping", txID) - c.Subscribers.Set(topic, listener, l) - logger.Debugf("[%s] Subscribing to transaction status changes done", txID) - return nil -} - -func (c *Channel) UnsubscribeTxStatusChanges(txID string, listener driver.TxStatusChangeListener) error { - _, topic := compose.CreateTxTopic(c.Network.Name(), c.ChannelName, txID) - l, ok := c.Subscribers.Get(topic, listener) - if !ok { - return errors.Errorf("listener not found for txID [%s]", txID) - } - el, ok := l.(events.Listener) - if !ok { - return errors.Errorf("listener not found for txID [%s]", txID) - } - c.Subscribers.Delete(topic, listener) - c.EventsSubscriber.Unsubscribe(topic, el) - return nil -} - -func (c *Channel) commitUnknown(txID string, block uint64, indexInBlock int, envelope *common.Envelope) error { - // if an envelope exists for the passed txID, then commit it - if c.EnvelopeService().Exists(txID) { - return c.commitStoredEnvelope(txID, block, indexInBlock) - } - - var envelopeRaw []byte - var err error - if envelope != nil { - // Store it - envelopeRaw, err = proto.Marshal(envelope) - if err != nil { - return errors.WithMessagef(err, "failed to store unknown envelope for [%s]", txID) - } - } else { - // fetch envelope and store it - envelopeRaw, err = c.FetchEnvelope(txID) - if err != nil { - return errors.WithMessagef(err, "failed getting rwset for tx [%s]", txID) - } - } - - // shall we commit this unknown envelope - if ok, err := c.filterUnknownEnvelope(txID, envelopeRaw); err != nil || !ok { - logger.Debugf("[%s] unknown envelope will not be processed [%b,%s]", txID, ok, err) - return nil - } - - if err := c.EnvelopeService().StoreEnvelope(txID, envelopeRaw); err != nil { - return errors.WithMessagef(err, "failed to store unknown envelope for [%s]", txID) - } - rws, _, err := c.RWSetLoader.GetRWSetFromEvn(txID) - if err != nil { - return errors.WithMessagef(err, "failed to get rws from envelope [%s]", txID) - } - rws.Done() - return c.commit(txID, nil, block, indexInBlock, envelope) -} - -func (c *Channel) filterUnknownEnvelope(txID string, envelope []byte) (bool, error) { - rws, _, err := c.RWSetLoader.GetInspectingRWSetFromEvn(txID, envelope) - if err != nil { - return false, errors.WithMessagef(err, "failed to get rws from envelope [%s]", txID) - } - defer rws.Done() - - logger.Debugf("[%s] contains namespaces [%v] or `initialized` key", txID, rws.Namespaces()) - for _, ns := range rws.Namespaces() { - for _, namespace := range c.ProcessNamespaces { - if namespace == ns { - logger.Debugf("[%s] contains namespaces [%v], select it", txID, rws.Namespaces()) - return true, nil - } - } - - // search a read dependency on a key containing "initialized" - for pos := 0; pos < rws.NumReads(ns); pos++ { - k, err := rws.GetReadKeyAt(ns, pos) - if err != nil { - return false, errors.WithMessagef(err, "Error reading key at [%d]", pos) - } - if strings.Contains(k, "initialized") { - logger.Debugf("[%s] contains 'initialized' key [%v] in [%s], select it", txID, ns, rws.Namespaces()) - return true, nil - } - } - } - - status, _, _, _ := c.Status(txID) - return status == driver.Busy, nil -} - -func (c *Channel) commitStoredEnvelope(txID string, block uint64, indexInBlock int) error { - logger.Debugf("found envelope for transaction [%s], committing it...", txID) - if err := c.extractStoredEnvelopeToVault(txID); err != nil { - return err - } - // commit - return c.commitLocal(txID, block, indexInBlock, nil) -} - -func (c *Channel) extractStoredEnvelopeToVault(txID string) error { - rws, _, err := c.RWSetLoader.GetRWSetFromEvn(txID) - if err != nil { - // If another replica of the same node created the RWSet - rws, _, err = c.RWSetLoader.GetRWSetFromETx(txID) - if err != nil { - return errors.WithMessagef(err, "failed to extract rws from envelope and etx [%s]", txID) - } - } - rws.Done() - return nil -} - -func (c *Channel) commit(txID string, deps []string, block uint64, indexInBlock int, envelope *common.Envelope) error { - logger.Debugf("[%s] is known.", txID) - - switch { - case len(deps) != 0: - if err := c.commitExternal(txID, block, indexInBlock); err != nil { - return err - } - default: - if err := c.commitLocal(txID, block, indexInBlock, envelope); err != nil { - return err - } - } - return nil -} - -func (c *Channel) commitDeps(txID string, block uint64, indexInBlock int) error { - // This should not generate a panic if the transaction is deemed invalid - logger.Debugf("[%s] is unknown but have dependencies, commit as multi-shard pvt", txID) - - // Validate and commit - vc, err := c.ExternalCommitter.Validate(txID) - if err != nil { - return errors.WithMessagef(err, "failed validating transaction [%s]", txID) - } - switch vc { - case driver.Valid: - if err := c.ExternalCommitter.CommitTX(txID, block, indexInBlock); err != nil { - return errors.WithMessagef(err, "failed committing tx [%s]", txID) - } - return nil - case driver.Invalid: - if err := c.ExternalCommitter.DiscardTX(txID); err != nil { - logger.Errorf("failed committing tx [%s] with err [%s]", txID, err) - } - return nil - } - return nil -} - -func (c *Channel) commitExternal(txID string, block uint64, indexInBlock int) error { - logger.Debugf("[%s] Committing as multi-shard pvt.", txID) - - // Ask for finality - _, _, parties, err := c.ExternalCommitter.Status(txID) - if err != nil { - return errors.Wrapf(err, "failed getting parties for [%s]", txID) - } - if err := c.IsFinalForParties(txID, parties...); err != nil { - return err - } - - // Validate and commit - vc, err := c.ExternalCommitter.Validate(txID) - if err != nil { - return errors.WithMessagef(err, "failed validating transaction [%s]", txID) - } - switch vc { - case driver.Valid: - if err := c.ExternalCommitter.CommitTX(txID, block, indexInBlock); err != nil { - return errors.WithMessagef(err, "failed committing tx [%s]", txID) - } - return nil - case driver.Invalid: - if err := c.ExternalCommitter.DiscardTX(txID); err != nil { - logger.Errorf("failed committing tx [%s] with err [%s]", txID, err) - } - return nil - } - return nil -} - -func (c *Channel) commitLocal(txID string, block uint64, indexInBlock int, envelope *common.Envelope) error { - // This is a normal transaction, validated by Fabric. - // Commit it cause Fabric says it is valid. - logger.Debugf("[%s] committing", txID) - - // Match rwsets if envelope is not empty - if envelope != nil { - logger.Debugf("[%s] matching rwsets", txID) - - pt, headerType, err := newProcessedTransactionFromEnvelope(envelope) - if err != nil && headerType == -1 { - logger.Errorf("[%s] failed to unmarshal envelope [%s]", txID, err) - return err - } - if headerType == int32(common.HeaderType_ENDORSER_TRANSACTION) { - if !c.Vault.RWSExists(txID) && c.EnvelopeService().Exists(txID) { - // Then match rwsets - if err := c.extractStoredEnvelopeToVault(txID); err != nil { - return errors.WithMessagef(err, "failed to load stored enveloper into the vault") - } - if err := c.Vault.Match(txID, pt.Results()); err != nil { - logger.Errorf("[%s] rwsets do not match [%s]", txID, err) - return errors.Wrapf(committer.ErrDiscardTX, "[%s] rwsets do not match [%s]", txID, err) - } - } else { - // Store it - envelopeRaw, err := proto.Marshal(envelope) - if err != nil { - return errors.WithMessagef(err, "failed to store unknown envelope for [%s]", txID) - } - if err := c.EnvelopeService().StoreEnvelope(txID, envelopeRaw); err != nil { - return errors.WithMessagef(err, "failed to store unknown envelope for [%s]", txID) - } - rws, _, err := c.RWSetLoader.GetRWSetFromEvn(txID) - if err != nil { - return errors.WithMessagef(err, "failed to get rws from envelope [%s]", txID) - } - rws.Done() - } - } - } - - // Post-Processes - logger.Debugf("[%s] post process rwset", txID) - - if err := c.postProcessTx(txID); err != nil { - // This should generate a panic - return err - } - - // Commit - logger.Debugf("[%s] commit in vault", txID) - if err := c.Vault.CommitTX(txID, block, indexInBlock); err != nil { - // This should generate a panic - return err - } - - return nil -} - -func (c *Channel) postProcessTx(txID string) error { - if err := c.Network.ProcessorManager().ProcessByID(c.ChannelName, txID); err != nil { - // This should generate a panic - return err - } - return nil -} - -func (c *Channel) notifyTxStatus(txID string, vc driver.ValidationCode, message string) { - // We publish two events here: - // 1. The first will be caught by the listeners that are listening for any transaction id. - // 2. The second will be caught by the listeners that are listening for the specific transaction id. - sb, topic := compose.CreateTxTopic(c.Network.Name(), c.ChannelName, "") - c.EventsPublisher.Publish(&driver.TransactionStatusChanged{ - ThisTopic: topic, - TxID: txID, - VC: vc, - ValidationMessage: message, - }) - c.EventsPublisher.Publish(&driver.TransactionStatusChanged{ - ThisTopic: compose.AppendAttributesOrPanic(sb, txID), - TxID: txID, - VC: vc, - ValidationMessage: message, - }) -} - -type TxEventsListener struct { - listener driver.TxStatusChangeListener -} - -func (l *TxEventsListener) OnReceive(event events.Event) { - tsc := event.Message().(*driver.TransactionStatusChanged) - if err := l.listener.OnStatusChange(tsc.TxID, int(tsc.VC), tsc.ValidationMessage); err != nil { - logger.Errorf("failed to notify listener for tx [%s] with err [%s]", tsc.TxID, err) - } -} diff --git a/platform/fabric/core/generic/committer/committer.go b/platform/fabric/core/generic/committer/committer.go index 988ff3118..31dadf8d0 100644 --- a/platform/fabric/core/generic/committer/committer.go +++ b/platform/fabric/core/generic/committer/committer.go @@ -9,103 +9,340 @@ package committer import ( "context" "runtime/debug" + "strconv" + "strings" "sync" "time" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" + "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/compose" + "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/fabricutils" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/membership" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/rwset" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/transaction" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/events" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/tracing" "github.com/hyperledger/fabric-protos-go/common" + "github.com/hyperledger/fabric-protos-go/peer" + "github.com/hyperledger/fabric/bccsp/factory" + "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/common/configtx" "github.com/hyperledger/fabric/protoutil" "github.com/pkg/errors" "go.uber.org/zap/zapcore" ) const ( - ConfigTXPrefix = "configtx_" + channelConfigKey = "CHANNEL_CONFIG_ENV_BYTES" + peerNamespace = "_configtx" + ConfigTXPrefix = "configtx_" ) var ( - logger = flogging.MustGetLogger("fabric-sdk.Committer") + // TODO: introduced due to a race condition in idemix. + commitConfigMutex = &sync.Mutex{} + logger = flogging.MustGetLogger("fabric-sdk.Committer") // ErrDiscardTX this error can be used to signal that a valid transaction should be discarded anyway ErrDiscardTX = errors.New("discard tx") ) -type Finality interface { +type FabricFinality interface { IsFinal(txID string, address string) error } -type Network interface { - Committer(channel string) (driver.Committer, error) - Channel(channel string) (driver.Channel, error) - PickPeer(funcType driver.PeerFunctionType) *grpc.ConnectionConfig - Ledger(channel string) (driver.Ledger, error) - Config() *config.Config +type TransactionHandler = func(block *common.Block, i int, event *TxEvent, env *common.Envelope, chHdr *common.ChannelHeader) error + +type OrderingService interface { + SetConfigOrderers(o channelconfig.Orderer, orderers []*grpc.ConnectionConfig) error } -type TransactionHandler = func(block *common.Block, i int, event *TxEvent, env *common.Envelope, chHdr *common.ChannelHeader) error +type Service struct { + ConfigService driver.ConfigService + ChannelConfig driver.ChannelConfig + + Vault driver.Vault + EnvelopeService driver.EnvelopeService + StatusReporters []driver.StatusReporter + ProcessNamespaces []string + Ledger driver.Ledger + RWSetLoaderService driver.RWSetLoader + ProcessorManager driver.ProcessorManager + MembershipService *membership.Service + OrderingService OrderingService + FabricFinality FabricFinality + Tracer tracing.Tracer + + // events + Subscribers *events.Subscribers + EventsSubscriber events.Subscriber + EventsPublisher events.Publisher -type Committer struct { - Channel string - ChannelConfig *config.Channel - Network Network - Finality Finality WaitForEventTimeout time.Duration - Tracer tracing.Tracer Handlers map[common.HeaderType]TransactionHandler QuietNotifier bool listeners map[string][]chan TxEvent mutex sync.Mutex pollingTimeout time.Duration - publisher events.Publisher } -func New(channelConfig *config.Channel, network Network, finality Finality, waitForEventTimeout time.Duration, quiet bool, metrics tracing.Tracer, publisher events.Publisher) (*Committer, error) { - if channelConfig == nil { - return nil, errors.Errorf("expected channel config, got nil") - } - - d := &Committer{ - Channel: channelConfig.Name, +func NewService( + ConfigService driver.ConfigService, + channelConfig driver.ChannelConfig, + vault driver.Vault, + envelopeService driver.EnvelopeService, + ledger driver.Ledger, + RWSetLoaderService driver.RWSetLoader, + processorManager driver.ProcessorManager, + eventsSubscriber events.Subscriber, + eventsPublisher events.Publisher, + ChannelMembershipService *membership.Service, + OrderingService OrderingService, + fabricFinality FabricFinality, + waitForEventTimeout time.Duration, + quiet bool, + metrics tracing.Tracer, +) *Service { + s := &Service{ + ConfigService: ConfigService, ChannelConfig: channelConfig, - Network: network, + Vault: vault, + EnvelopeService: envelopeService, + StatusReporters: []driver.StatusReporter{}, + ProcessNamespaces: []string{}, + Ledger: ledger, + RWSetLoaderService: RWSetLoaderService, + ProcessorManager: processorManager, + MembershipService: ChannelMembershipService, + OrderingService: OrderingService, + Subscribers: events.NewSubscribers(), + EventsSubscriber: eventsSubscriber, + EventsPublisher: eventsPublisher, + FabricFinality: fabricFinality, WaitForEventTimeout: waitForEventTimeout, QuietNotifier: quiet, - listeners: map[string][]chan TxEvent{}, - mutex: sync.Mutex{}, - Finality: finality, - pollingTimeout: channelConfig.CommitterPollingTimeout(), Tracer: metrics, - publisher: publisher, + listeners: map[string][]chan TxEvent{}, Handlers: map[common.HeaderType]TransactionHandler{}, + pollingTimeout: 1 * time.Second, } - d.Handlers[common.HeaderType_CONFIG] = d.HandleConfig - d.Handlers[common.HeaderType_ENDORSER_TRANSACTION] = d.HandleEndorserTransaction - return d, nil + s.Handlers[common.HeaderType_CONFIG] = s.HandleConfig + s.Handlers[common.HeaderType_ENDORSER_TRANSACTION] = s.HandleEndorserTransaction + return s } -// Commit commits the transactions in the block passed as argument -func (c *Committer) Commit(block *common.Block) error { - c.Tracer.StartAt("commit", time.Now()) - for i, tx := range block.Data.Data { +func (c *Service) Status(txID string) (driver.ValidationCode, string, error) { + vc, message, err := c.Vault.Status(txID) + if err != nil { + logger.Errorf("failed to get status of [%s]: %s", txID, err) + return driver.Unknown, "", err + } + if vc == driver.Unknown { + // give it a second chance + if c.EnvelopeService.Exists(txID) { + if err := c.extractStoredEnvelopeToVault(txID); err != nil { + return driver.Unknown, "", errors.WithMessagef(err, "failed to extract stored enveloper for [%s]", txID) + } + vc = driver.Busy + } else { + // check status reporter, if any + for _, reporter := range c.StatusReporters { + externalStatus, externalMessage, _, err := reporter.Status(txID) + if err == nil && externalStatus != driver.Unknown { + vc = externalStatus + message = externalMessage + } + } + } + } + return vc, message, nil +} + +func (c *Service) ProcessNamespace(nss ...string) error { + c.ProcessNamespaces = append(c.ProcessNamespaces, nss...) + return nil +} + +func (c *Service) AddStatusReporter(sr driver.StatusReporter) error { + c.StatusReporters = append(c.StatusReporters, sr) + return nil +} + +func (c *Service) DiscardTx(txID string, message string) error { + logger.Debugf("discarding transaction [%s] with message [%s]", txID, message) + + defer c.notifyTxStatus(txID, driver.Invalid, message) + vc, _, err := c.Status(txID) + if err != nil { + return errors.WithMessagef(err, "failed getting tx's status in state db [%s]", txID) + } + if vc == driver.Unknown { + // give it a second chance + if c.EnvelopeService.Exists(txID) { + if err := c.extractStoredEnvelopeToVault(txID); err != nil { + return errors.WithMessagef(err, "failed to extract stored enveloper for [%s]", txID) + } + } else { + // check status reporter, if any + found := false + for _, reporter := range c.StatusReporters { + externalStatus, _, _, err := reporter.Status(txID) + if err == nil && externalStatus != driver.Unknown { + found = true + break + } + } + if !found { + logger.Debugf("Discarding transaction [%s] skipped, tx is unknown", txID) + return nil + } + } + } + + if err := c.Vault.DiscardTx(txID, message); err != nil { + logger.Errorf("failed discarding tx [%s] in vault: %s", txID, err) + } + return nil +} + +func (c *Service) CommitTX(txID string, block uint64, indexInBlock int, envelope *common.Envelope) (err error) { + logger.Debugf("Committing transaction [%s,%d,%d]", txID, block, indexInBlock) + defer logger.Debugf("Committing transaction [%s,%d,%d] done [%s]", txID, block, indexInBlock, err) + defer func() { + if err == nil { + c.notifyTxStatus(txID, driver.Valid, "") + } + }() + + vc, _, err := c.Status(txID) + if err != nil { + return errors.WithMessagef(err, "failed getting tx's status in state db [%s]", txID) + } + switch vc { + case driver.Valid: + // This should generate a panic + logger.Debugf("[%s] is already valid", txID) + return errors.Errorf("[%s] is already valid", txID) + case driver.Invalid: + // This should generate a panic + logger.Debugf("[%s] is invalid", txID) + return errors.Errorf("[%s] is invalid", txID) + case driver.Unknown: + return c.commitUnknown(txID, block, indexInBlock, envelope) + case driver.Busy: + return c.commit(txID, block, indexInBlock, envelope) + default: + return errors.Errorf("invalid status code [%d] for [%s]", vc, txID) + } +} + +func (c *Service) SubscribeTxStatusChanges(txID string, listener driver.TxStatusChangeListener) error { + _, topic := compose.CreateTxTopic(c.ConfigService.NetworkName(), c.ChannelConfig.ID(), txID) + l := &TxEventsListener{listener: listener} + logger.Debugf("[%s] Subscribing to transaction status changes", txID) + c.EventsSubscriber.Subscribe(topic, l) + logger.Debugf("[%s] store mapping", txID) + c.Subscribers.Set(topic, listener, l) + logger.Debugf("[%s] Subscribing to transaction status changes done", txID) + return nil +} + +func (c *Service) UnsubscribeTxStatusChanges(txID string, listener driver.TxStatusChangeListener) error { + _, topic := compose.CreateTxTopic(c.ConfigService.NetworkName(), c.ChannelConfig.ID(), txID) + l, ok := c.Subscribers.Get(topic, listener) + if !ok { + return errors.Errorf("listener not found for txID [%s]", txID) + } + el, ok := l.(events.Listener) + if !ok { + return errors.Errorf("listener not found for txID [%s]", txID) + } + c.Subscribers.Delete(topic, listener) + c.EventsSubscriber.Unsubscribe(topic, el) + return nil +} + +// CommitConfig is used to validate and apply configuration transactions for a Channel. +func (c *Service) CommitConfig(blockNumber uint64, raw []byte, env *common.Envelope) error { + commitConfigMutex.Lock() + defer commitConfigMutex.Unlock() + + c.MembershipService.ResourcesApplyLock.Lock() + defer c.MembershipService.ResourcesApplyLock.Unlock() + + if env == nil { + return errors.Errorf("Channel config found nil") + } + + payload, err := protoutil.UnmarshalPayload(env.Payload) + if err != nil { + return errors.Wrapf(err, "cannot get payload from config transaction, block number [%d]", blockNumber) + } + + ctx, err := configtx.UnmarshalConfigEnvelope(payload.Data) + if err != nil { + return errors.Wrapf(err, "error unmarshalling config which passed initial validity checks") + } - env, err := protoutil.UnmarshalEnvelope(tx) + txid := ConfigTXPrefix + strconv.FormatUint(ctx.Config.Sequence, 10) + vc, _, err := c.Vault.Status(txid) + if err != nil { + return errors.Wrapf(err, "failed getting tx's status [%s]", txid) + } + switch vc { + case driver.Valid: + logger.Infof("config block [%s] already committed, skip it.", txid) + return nil + case driver.Unknown: + logger.Infof("config block [%s] not committed, commit it.", txid) + // this is okay + default: + return errors.Errorf("invalid configtx's [%s] status [%d]", txid, vc) + } + + var bundle *channelconfig.Bundle + if c.MembershipService.Resources() == nil { + // set up the genesis block + bundle, err = channelconfig.NewBundle(c.ChannelConfig.ID(), ctx.Config, factory.GetDefault()) if err != nil { - logger.Errorf("Error getting tx from block: %s", err) - return err + return errors.Wrapf(err, "failed to build a new bundle") } - payl, err := protoutil.UnmarshalPayload(env.Payload) + } else { + configTxValidator := c.MembershipService.Resources().ConfigtxValidator() + err := configTxValidator.Validate(ctx) if err != nil { - logger.Errorf("[%s] unmarshal payload failed: %s", c.Channel, err) + return errors.Wrapf(err, "failed to validate config transaction, block number [%d]", blockNumber) + } + + bundle, err = channelconfig.NewBundle(configTxValidator.ChannelID(), ctx.Config, factory.GetDefault()) + if err != nil { + return errors.Wrapf(err, "failed to create next bundle") + } + + channelconfig.LogSanityChecks(bundle) + if err := capabilitiesSupported(bundle); err != nil { return err } - chdr, err := protoutil.UnmarshalChannelHeader(payl.Header.ChannelHeader) + } + + if err := c.commitConfig(txid, blockNumber, ctx.Config.Sequence, raw); err != nil { + return errors.Wrapf(err, "failed committing configtx to the vault") + } + + return c.applyBundle(bundle) +} + +// Commit commits the transactions in the block passed as argument +func (c *Service) Commit(block *common.Block) error { + c.Tracer.StartAt("commit", time.Now()) + for i, tx := range block.Data.Data { + + env, _, chdr, err := fabricutils.UnmarshalTx(tx) if err != nil { - logger.Errorf("[%s] unmarshal channel header failed: %s", c.Channel, err) + logger.Errorf("[%s] unmarshal tx failed: %s", c.ChannelConfig.ID(), err) return err } @@ -118,12 +355,12 @@ func (c *Committer) Commit(block *common.Block) error { } } else { if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("[%s] Received unhandled transaction type: %s", c.Channel, chdr.Type) + logger.Debugf("[%s] Received unhandled transaction type: %s", c.ChannelConfig.ID(), chdr.Type) } } c.Tracer.AddEventAt("commit", "end", time.Now()) - c.Notify(event) + c.notify(event) if logger.IsEnabledFor(zapcore.DebugLevel) { logger.Debugf("commit transaction [%s] in filteredBlock [%d]", chdr.TxId, block.Header.Number) } @@ -135,18 +372,13 @@ func (c *Committer) Commit(block *common.Block) error { // IsFinal takes in input a transaction id and waits for its confirmation // with the respect to the passed context that can be used to set a deadline // for the waiting time. -func (c *Committer) IsFinal(ctx context.Context, txID string) error { +func (c *Service) IsFinal(ctx context.Context, txID string) error { if logger.IsEnabledFor(zapcore.DebugLevel) { logger.Debugf("Is [%s] final?", txID) } - committer, err := c.Network.Committer(c.Channel) - if err != nil { - return err - } - for iter := 0; iter < c.ChannelConfig.CommitterFinalityNumRetries(); iter++ { - vd, _, deps, err := committer.Status(txID) + vd, _, err := c.Status(txID) if err != nil { logger.Errorf("Is [%s] final? Failed getting transaction status from vault", txID) return errors.WithMessagef(err, "failed getting transaction status from vault [%s]", txID) @@ -160,31 +392,8 @@ func (c *Committer) IsFinal(ctx context.Context, txID string) error { logger.Debugf("Tx [%s] is not valid", txID) return errors.Errorf("transaction [%s] is not valid", txID) case driver.Busy: - logger.Debugf("Tx [%s] is known with deps [%v]", txID, deps) - if len(deps) == 0 { - continue - } - for _, id := range deps { - logger.Debugf("Check finality of dependant transaction [%s]", id) - if err := c.IsFinal(ctx, id); err != nil { - logger.Errorf("Check finality of dependant transaction [%s], failed [%s]", id, err) - return err - } - } - return nil - case driver.HasDependencies: - logger.Debugf("Tx [%s] is unknown with deps [%v]", txID, deps) - if len(deps) == 0 { - return c.Finality.IsFinal(txID, c.Network.PickPeer(driver.PeerForFinality).Address) - } - for _, id := range deps { - logger.Debugf("Check finality of dependant transaction [%s]", id) - if err := c.IsFinal(ctx, id); err != nil { - logger.Errorf("Check finality of dependant transaction [%s], failed [%s]", id, err) - return err - } - } - return nil + logger.Debugf("Tx [%s] is known", txID) + continue case driver.Unknown: if iter <= 1 { logger.Debugf("Tx [%s] is unknown with no deps, wait a bit and retry [%d]", txID, iter) @@ -194,14 +403,14 @@ func (c *Committer) IsFinal(ctx context.Context, txID string) error { if logger.IsEnabledFor(zapcore.DebugLevel) { logger.Debugf("Tx [%s] is unknown with no deps, remote check [%d][%s]", txID, iter, debug.Stack()) } - peer := c.Network.PickPeer(driver.PeerForFinality).Address - err := c.Finality.IsFinal(txID, peer) + peer := c.ConfigService.PickPeer(driver.PeerForFinality).Address + err := c.FabricFinality.IsFinal(txID, peer) if err == nil { logger.Debugf("Tx [%s] is final, remote check on [%s]", txID, peer) return nil } - if vd, _, _, err2 := committer.Status(txID); err2 == nil && vd == driver.Unknown { + if vd, _, err2 := c.Status(txID); err2 == nil && vd == driver.Unknown { logger.Debugf("Tx [%s] is not final for remote [%s], return [%s], [%d][%s]", txID, peer, err, vd, err2) return err } @@ -215,7 +424,114 @@ func (c *Committer) IsFinal(ctx context.Context, txID string) error { return c.listenTo(ctx, txID, c.WaitForEventTimeout) } -func (c *Committer) addListener(txid string, ch chan TxEvent) { +func (c *Service) GetProcessNamespace() []string { + return c.ProcessNamespaces +} + +func (c *Service) ReloadConfigTransactions() error { + c.MembershipService.ResourcesApplyLock.Lock() + defer c.MembershipService.ResourcesApplyLock.Unlock() + + qe, err := c.Vault.NewQueryExecutor() + if err != nil { + return errors.WithMessagef(err, "failed getting query executor") + } + defer qe.Done() + + logger.Infof("looking up the latest config block available") + var sequence uint64 = 0 + for { + txID := ConfigTXPrefix + strconv.FormatUint(sequence, 10) + vc, _, err := c.Vault.Status(txID) + if err != nil { + return errors.WithMessagef(err, "failed getting tx's status [%s]", txID) + } + logger.Infof("check config block at txID [%s], status [%v]...", txID, vc) + done := false + switch vc { + case driver.Valid: + logger.Infof("config block available, txID [%s], loading...", txID) + + key, err := rwset.CreateCompositeKey(channelConfigKey, []string{strconv.FormatUint(sequence, 10)}) + if err != nil { + return errors.Wrapf(err, "cannot create configtx rws key") + } + envelope, err := qe.GetState(peerNamespace, key) + if err != nil { + return errors.Wrapf(err, "failed setting configtx state in rws") + } + env, err := protoutil.UnmarshalEnvelope(envelope) + if err != nil { + return errors.Wrapf(err, "cannot get payload from config transaction [%s]", txID) + } + payload, err := protoutil.UnmarshalPayload(env.Payload) + if err != nil { + return errors.Wrapf(err, "cannot get payload from config transaction [%s]", txID) + } + ctx, err := configtx.UnmarshalConfigEnvelope(payload.Data) + if err != nil { + return errors.Wrapf(err, "error unmarshalling config which passed initial validity checks [%s]", txID) + } + + var bundle *channelconfig.Bundle + if c.MembershipService.Resources() == nil { + // set up the genesis block + bundle, err = channelconfig.NewBundle(c.ChannelConfig.ID(), ctx.Config, factory.GetDefault()) + if err != nil { + return errors.Wrapf(err, "failed to build a new bundle") + } + } else { + configTxValidator := c.MembershipService.Resources().ConfigtxValidator() + err := configTxValidator.Validate(ctx) + if err != nil { + return errors.Wrapf(err, "failed to validate config transaction [%s]", txID) + } + + bundle, err = channelconfig.NewBundle(configTxValidator.ChannelID(), ctx.Config, factory.GetDefault()) + if err != nil { + return errors.Wrapf(err, "failed to create next bundle") + } + + channelconfig.LogSanityChecks(bundle) + if err := capabilitiesSupported(bundle); err != nil { + return err + } + } + + if err := c.applyBundle(bundle); err != nil { + return err + } + + sequence = sequence + 1 + continue + case driver.Unknown: + if sequence == 0 { + // Give a chance to 1, in certain setting the first block starts with 1 + sequence++ + continue + } + + logger.Infof("config block at txID [%s] unavailable, stop loading", txID) + done = true + default: + return errors.Errorf("invalid configtx's [%s] status [%d]", txID, vc) + } + if done { + logger.Infof("loading config block done") + break + } + } + if sequence == 1 { + logger.Infof("no config block available, must start from genesis") + // no configuration block found + return nil + } + logger.Infof("latest config block available at sequence [%d]", sequence-1) + + return nil +} + +func (c *Service) addListener(txid string, ch chan TxEvent) { c.mutex.Lock() defer c.mutex.Unlock() @@ -228,7 +544,7 @@ func (c *Committer) addListener(txid string, ch chan TxEvent) { c.listeners[txid] = ls } -func (c *Committer) deleteListener(txid string, ch chan TxEvent) { +func (c *Service) deleteListener(txid string, ch chan TxEvent) { c.mutex.Lock() defer c.mutex.Unlock() @@ -245,7 +561,7 @@ func (c *Committer) deleteListener(txid string, ch chan TxEvent) { } } -func (c *Committer) Notify(event TxEvent) { +func (c *Service) notify(event TxEvent) { c.mutex.Lock() defer c.mutex.Unlock() @@ -260,24 +576,14 @@ func (c *Committer) Notify(event TxEvent) { for _, listener := range listeners { listener <- event } - - for _, txid := range event.DependantTxIDs { - listeners := c.listeners[txid] - if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("Notify the finality of [%s] (dependant) to [%d] listeners, event: [%v]", txid, len(listeners), event) - } - for _, listener := range listeners { - listener <- event - } - } } // notifyChaincodeListeners notifies the chaincode event to the registered chaincode listeners. -func (c *Committer) notifyChaincodeListeners(event *ChaincodeEvent) { - c.publisher.Publish(event) +func (c *Service) notifyChaincodeListeners(event *ChaincodeEvent) { + c.EventsPublisher.Publish(event) } -func (c *Committer) listenTo(ctx context.Context, txid string, timeout time.Duration) error { +func (c *Service) listenTo(ctx context.Context, txid string, timeout time.Duration) error { c.Tracer.Start("committer-listenTo-start") if logger.IsEnabledFor(zapcore.DebugLevel) { @@ -290,10 +596,6 @@ func (c *Committer) listenTo(ctx context.Context, txid string, timeout time.Dura c.addListener(txid, ch) defer c.deleteListener(txid, ch) - committer, err := c.Network.Committer(c.Channel) - if err != nil { - return err - } iterations := int(timeout.Milliseconds() / c.pollingTimeout.Milliseconds()) if iterations == 0 { iterations = 1 @@ -317,7 +619,7 @@ func (c *Committer) listenTo(ctx context.Context, txid string, timeout time.Dura if logger.IsEnabledFor(zapcore.DebugLevel) { logger.Debugf("Got a timeout for finality of [%s], check the status", txid) } - vd, _, _, err := committer.Status(txid) + vd, _, err := c.Status(txid) if err == nil { switch vd { case driver.Valid: @@ -346,3 +648,292 @@ func (c *Committer) listenTo(ctx context.Context, txid string, timeout time.Dura c.Tracer.End("committer-listenTo-end") return errors.Errorf("failed to listen to transaction [%s] for timeout", txid) } + +func (c *Service) commitConfig(txID string, blockNumber uint64, seq uint64, envelope []byte) error { + logger.Infof("[Channel: %s] commit config transaction number [bn:%d][seq:%d]", c.ChannelConfig.ID(), blockNumber, seq) + + rws, err := c.Vault.NewRWSet(txID) + if err != nil { + return errors.Wrapf(err, "cannot create rws for configtx") + } + defer rws.Done() + + key, err := rwset.CreateCompositeKey(channelConfigKey, []string{strconv.FormatUint(seq, 10)}) + if err != nil { + return errors.Wrapf(err, "cannot create configtx rws key") + } + if err := rws.SetState(peerNamespace, key, envelope); err != nil { + return errors.Wrapf(err, "failed setting configtx state in rws") + } + rws.Done() + if err := c.CommitTX(txID, blockNumber, 0, nil); err != nil { + if err2 := c.DiscardTx(txID, ""); err2 != nil { + logger.Errorf("failed committing configtx rws [%s]", err2) + } + return errors.Wrapf(err, "failed committing configtx rws") + } + return nil +} + +func (c *Service) commit(txID string, block uint64, indexInBlock int, envelope *common.Envelope) error { + // This is a normal transaction, validated by Fabric. + // Commit it cause Fabric says it is valid. + logger.Debugf("[%s] committing", txID) + + // Match rwsets if envelope is not empty + if envelope != nil { + logger.Debugf("[%s] matching rwsets", txID) + + pt, headerType, err := transaction.NewProcessedTransactionFromEnvelope(envelope) + if err != nil && headerType == -1 { + logger.Errorf("[%s] failed to unmarshal envelope [%s]", txID, err) + return err + } + if headerType == int32(common.HeaderType_ENDORSER_TRANSACTION) { + if !c.Vault.RWSExists(txID) && c.EnvelopeService.Exists(txID) { + // Then match rwsets + if err := c.extractStoredEnvelopeToVault(txID); err != nil { + return errors.WithMessagef(err, "failed to load stored enveloper into the vault") + } + if err := c.Vault.Match(txID, pt.Results()); err != nil { + logger.Errorf("[%s] rwsets do not match [%s]", txID, err) + return errors.Wrapf(ErrDiscardTX, "[%s] rwsets do not match [%s]", txID, err) + } + } else { + // Store it + envelopeRaw, err := proto.Marshal(envelope) + if err != nil { + return errors.WithMessagef(err, "failed to store unknown envelope for [%s]", txID) + } + if err := c.EnvelopeService.StoreEnvelope(txID, envelopeRaw); err != nil { + return errors.WithMessagef(err, "failed to store unknown envelope for [%s]", txID) + } + rws, _, err := c.RWSetLoaderService.GetRWSetFromEvn(txID) + if err != nil { + return errors.WithMessagef(err, "failed to get rws from envelope [%s]", txID) + } + rws.Done() + } + } + } + + // Post-Processes + logger.Debugf("[%s] post process rwset", txID) + + if err := c.postProcessTx(txID); err != nil { + // This should generate a panic + return err + } + + // Commit + logger.Debugf("[%s] commit in vault", txID) + if err := c.Vault.CommitTX(txID, block, indexInBlock); err != nil { + // This should generate a panic + return err + } + + return nil +} + +func (c *Service) commitUnknown(txID string, block uint64, indexInBlock int, envelope *common.Envelope) error { + // if an envelope exists for the passed txID, then commit it + if c.EnvelopeService.Exists(txID) { + return c.commitStoredEnvelope(txID, block, indexInBlock) + } + + var envelopeRaw []byte + var err error + if envelope != nil { + // Store it + envelopeRaw, err = proto.Marshal(envelope) + if err != nil { + return errors.WithMessagef(err, "failed to store unknown envelope for [%s]", txID) + } + } else { + // fetch envelope and store it + envelopeRaw, err = c.fetchEnvelope(txID) + if err != nil { + return errors.WithMessagef(err, "failed getting rwset for tx [%s]", txID) + } + } + + // shall we commit this unknown envelope + if ok, err := c.filterUnknownEnvelope(txID, envelopeRaw); err != nil || !ok { + logger.Debugf("[%s] unknown envelope will not be processed [%b,%s]", txID, ok, err) + return nil + } + + if err := c.EnvelopeService.StoreEnvelope(txID, envelopeRaw); err != nil { + return errors.WithMessagef(err, "failed to store unknown envelope for [%s]", txID) + } + rws, _, err := c.RWSetLoaderService.GetRWSetFromEvn(txID) + if err != nil { + return errors.WithMessagef(err, "failed to get rws from envelope [%s]", txID) + } + rws.Done() + return c.commit(txID, block, indexInBlock, envelope) +} + +func (c *Service) commitStoredEnvelope(txID string, block uint64, indexInBlock int) error { + logger.Debugf("found envelope for transaction [%s], committing it...", txID) + if err := c.extractStoredEnvelopeToVault(txID); err != nil { + return err + } + // commit + return c.commit(txID, block, indexInBlock, nil) +} + +func (c *Service) applyBundle(bundle *channelconfig.Bundle) error { + c.MembershipService.ResourcesLock.Lock() + defer c.MembershipService.ResourcesLock.Unlock() + c.MembershipService.ChannelResources = bundle + + // update the list of orderers + ordererConfig, exists := c.MembershipService.ChannelResources.OrdererConfig() + if exists { + logger.Debugf("[Channel: %s] Orderer config has changed, updating the list of orderers", c.ChannelConfig.ID()) + + var newOrderers []*grpc.ConnectionConfig + orgs := ordererConfig.Organizations() + for _, org := range orgs { + msp := org.MSP() + var tlsRootCerts [][]byte + tlsRootCerts = append(tlsRootCerts, msp.GetTLSRootCerts()...) + tlsRootCerts = append(tlsRootCerts, msp.GetTLSIntermediateCerts()...) + for _, endpoint := range org.Endpoints() { + logger.Debugf("[Channel: %s] Adding orderer endpoint: [%s:%s:%s]", c.ChannelConfig.ID(), org.Name(), org.MSPID(), endpoint) + // TODO: load from configuration + newOrderers = append(newOrderers, &grpc.ConnectionConfig{ + Address: endpoint, + ConnectionTimeout: 10 * time.Second, + TLSEnabled: true, + TLSRootCertBytes: tlsRootCerts, + }) + } + } + if len(newOrderers) != 0 { + logger.Debugf("[Channel: %s] Updating the list of orderers: (%d) found", c.ChannelConfig.ID(), len(newOrderers)) + if err := c.OrderingService.SetConfigOrderers(ordererConfig, newOrderers); err != nil { + return err + } + } else { + logger.Debugf("[Channel: %s] No orderers found in Channel config", c.ChannelConfig.ID()) + } + } else { + logger.Debugf("no orderer configuration found in Channel config") + } + + return nil +} + +func (c *Service) fetchEnvelope(txID string) ([]byte, error) { + pt, err := c.Ledger.GetTransactionByID(txID) + if err != nil { + return nil, errors.WithMessagef(err, "failed fetching tx [%s]", txID) + } + if !pt.IsValid() { + return nil, errors.Errorf("fetched tx [%s] should have been valid, instead it is [%s]", txID, peer.TxValidationCode_name[pt.ValidationCode()]) + } + return pt.Envelope(), nil +} + +func (c *Service) filterUnknownEnvelope(txID string, envelope []byte) (bool, error) { + rws, _, err := c.RWSetLoaderService.GetInspectingRWSetFromEvn(txID, envelope) + if err != nil { + return false, errors.WithMessagef(err, "failed to get rws from envelope [%s]", txID) + } + defer rws.Done() + + logger.Debugf("[%s] contains namespaces [%v] or `initialized` key", txID, rws.Namespaces()) + for _, ns := range rws.Namespaces() { + for _, namespace := range c.ProcessNamespaces { + if namespace == ns { + logger.Debugf("[%s] contains namespaces [%v], select it", txID, rws.Namespaces()) + return true, nil + } + } + + // search a read dependency on a key containing "initialized" + for pos := 0; pos < rws.NumReads(ns); pos++ { + k, err := rws.GetReadKeyAt(ns, pos) + if err != nil { + return false, errors.WithMessagef(err, "Error reading key at [%d]", pos) + } + if strings.Contains(k, "initialized") { + logger.Debugf("[%s] contains 'initialized' key [%v] in [%s], select it", txID, ns, rws.Namespaces()) + return true, nil + } + } + } + + status, _, _ := c.Status(txID) + return status == driver.Busy, nil +} + +func (c *Service) extractStoredEnvelopeToVault(txID string) error { + rws, _, err := c.RWSetLoaderService.GetRWSetFromEvn(txID) + if err != nil { + // If another replica of the same node created the RWSet + rws, _, err = c.RWSetLoaderService.GetRWSetFromETx(txID) + if err != nil { + return errors.WithMessagef(err, "failed to extract rws from envelope and etx [%s]", txID) + } + } + rws.Done() + return nil +} + +func (c *Service) postProcessTx(txID string) error { + if err := c.ProcessorManager.ProcessByID(c.ChannelConfig.ID(), txID); err != nil { + // This should generate a panic + return err + } + return nil +} + +func (c *Service) notifyTxStatus(txID string, vc driver.ValidationCode, message string) { + // We publish two events here: + // 1. The first will be caught by the listeners that are listening for any transaction id. + // 2. The second will be caught by the listeners that are listening for the specific transaction id. + sb, topic := compose.CreateTxTopic(c.ConfigService.NetworkName(), c.ChannelConfig.ID(), "") + c.EventsPublisher.Publish(&driver.TransactionStatusChanged{ + ThisTopic: topic, + TxID: txID, + VC: vc, + ValidationMessage: message, + }) + c.EventsPublisher.Publish(&driver.TransactionStatusChanged{ + ThisTopic: compose.AppendAttributesOrPanic(sb, txID), + TxID: txID, + VC: vc, + ValidationMessage: message, + }) +} + +func capabilitiesSupported(res channelconfig.Resources) error { + ac, ok := res.ApplicationConfig() + if !ok { + return errors.Errorf("[Channel %s] does not have application config so is incompatible", res.ConfigtxValidator().ChannelID()) + } + + if err := ac.Capabilities().Supported(); err != nil { + return errors.Wrapf(err, "[Channel %s] incompatible", res.ConfigtxValidator().ChannelID()) + } + + if err := res.ChannelConfig().Capabilities().Supported(); err != nil { + return errors.Wrapf(err, "[Channel %s] incompatible", res.ConfigtxValidator().ChannelID()) + } + + return nil +} + +type TxEventsListener struct { + listener driver.TxStatusChangeListener +} + +func (l *TxEventsListener) OnReceive(event events.Event) { + tsc := event.Message().(*driver.TransactionStatusChanged) + if err := l.listener.OnStatusChange(tsc.TxID, int(tsc.VC), tsc.ValidationMessage); err != nil { + logger.Errorf("failed to notify listener for tx [%s] with err [%s]", tsc.TxID, err) + } +} diff --git a/platform/fabric/core/generic/committer/config.go b/platform/fabric/core/generic/committer/config.go index 2e94ca7c5..d81101056 100644 --- a/platform/fabric/core/generic/committer/config.go +++ b/platform/fabric/core/generic/committer/config.go @@ -12,16 +12,12 @@ import ( "go.uber.org/zap/zapcore" ) -func (c *Committer) HandleConfig(block *common.Block, i int, event *TxEvent, env *common.Envelope, chHdr *common.ChannelHeader) error { +func (c *Service) HandleConfig(block *common.Block, i int, event *TxEvent, env *common.Envelope, chHdr *common.ChannelHeader) error { if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("[%s] Config transaction received: %s", c.Channel, chHdr.TxId) + logger.Debugf("[%s] Config transaction received: %s", c.ChannelConfig.ID(), chHdr.TxId) } - committer, err := c.Network.Committer(c.Channel) - if err != nil { - return errors.Wrapf(err, "cannot get Committer for channel [%s]", c.Channel) - } - if err := committer.CommitConfig(block.Header.Number, block.Data.Data[i], env); err != nil { - return errors.Wrapf(err, "cannot commit config envelope for channel [%s]", c.Channel) + if err := c.CommitConfig(block.Header.Number, block.Data.Data[i], env); err != nil { + return errors.Wrapf(err, "cannot commit config envelope for channel [%s]", c.ChannelConfig.ID()) } return nil } diff --git a/platform/fabric/core/generic/committer/driver/driver.go b/platform/fabric/core/generic/committer/driver/driver.go deleted file mode 100644 index fbf553caf..000000000 --- a/platform/fabric/core/generic/committer/driver/driver.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package driver - -import ( - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" -) - -// Vault models a key-value store that can be updated by committing rwsets -type Vault interface { - CommitTX(txid string, block uint64, indexInBloc int) error - DiscardTx(txID string, message string) error -} - -type Committer interface { - Status(txid string) (driver.ValidationCode, []string, []view.Identity, error) - Validate(txid string) (driver.ValidationCode, error) - CommitTX(txid string, block uint64, indexInBloc int) error - DiscardTX(txid string) error -} - -// Driver is the interface that must be implemented by a committer -// driver. -type Driver interface { - // Open returns a new Committer with the respect to the passed vault. - // The name is a string in a driver-specific format. - // The returned Committer is only used by one goroutine at a time. - Open(name string, sp view2.ServiceProvider, vault Vault) (Committer, error) -} diff --git a/platform/fabric/core/generic/committer/endorsertx.go b/platform/fabric/core/generic/committer/endorsertx.go index 59943a4b8..97e7e3385 100644 --- a/platform/fabric/core/generic/committer/endorsertx.go +++ b/platform/fabric/core/generic/committer/endorsertx.go @@ -17,9 +17,9 @@ import ( type ValidationFlags []uint8 -func (c *Committer) HandleEndorserTransaction(block *common.Block, i int, event *TxEvent, env *common.Envelope, chHdr *common.ChannelHeader) error { +func (c *Service) HandleEndorserTransaction(block *common.Block, i int, event *TxEvent, env *common.Envelope, chHdr *common.ChannelHeader) error { if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("[%s] Endorser transaction received: %s", c.Channel, chHdr.TxId) + logger.Debugf("[%s] Endorser transaction received: %s", c.ChannelConfig.ID(), chHdr.TxId) } if len(block.Metadata.Metadata) < int(common.BlockMetadataIndex_TRANSACTIONS_FILTER) { return errors.Errorf("block metadata lacks transaction filter") @@ -57,7 +57,7 @@ func (c *Committer) HandleEndorserTransaction(block *common.Block, i int, event } // GetChaincodeEvents reads the chaincode events and notifies the listeners registered to the specific chaincode. -func (c *Committer) GetChaincodeEvents(env *common.Envelope, block *common.Block) error { +func (c *Service) GetChaincodeEvents(env *common.Envelope, block *common.Block) error { chaincodeEvent, err := readChaincodeEvent(env, block.Header.Number) if err != nil { return errors.Wrapf(err, "error reading chaincode event") @@ -73,12 +73,7 @@ func (c *Committer) GetChaincodeEvents(env *common.Envelope, block *common.Block // CommitEndorserTransaction commits the transaction to the vault. // It returns true, if the transaction was already processed, false otherwise. -func (c *Committer) CommitEndorserTransaction(txID string, block *common.Block, indexInBlock int, env *common.Envelope, event *TxEvent) (bool, error) { - committer, err := c.Network.Committer(c.Channel) - if err != nil { - return false, errors.Wrapf(err, "cannot get Committer for channel [%s]", c.Channel) - } - +func (c *Service) CommitEndorserTransaction(txID string, block *common.Block, indexInBlock int, env *common.Envelope, event *TxEvent) (bool, error) { blockNum := block.Header.Number if logger.IsEnabledFor(zapcore.DebugLevel) { logger.Debugf("transaction [%s] in block [%d] is valid for fabric, commit!", txID, blockNum) @@ -88,11 +83,10 @@ func (c *Committer) CommitEndorserTransaction(txID string, block *common.Block, event.Block = blockNum event.IndexInBlock = indexInBlock - vc, _, deps, err := committer.Status(txID) + vc, _, err := c.Status(txID) if err != nil { return false, errors.Wrapf(err, "failed getting tx's status [%s]", txID) } - event.DependantTxIDs = append(event.DependantTxIDs, deps...) switch vc { case driver.Valid: @@ -110,34 +104,28 @@ func (c *Committer) CommitEndorserTransaction(txID string, block *common.Block, } if block != nil { - if err := committer.CommitTX(event.TxID, event.Block, event.IndexInBlock, env); err != nil { - return false, errors.Wrapf(err, "failed committing transaction [%s] with deps [%v]", txID, deps) + if err := c.CommitTX(event.TxID, event.Block, event.IndexInBlock, env); err != nil { + return false, errors.Wrapf(err, "failed committing transaction [%s]", txID) } return false, nil } - if err := committer.CommitTX(event.TxID, event.Block, event.IndexInBlock, nil); err != nil { - return false, errors.Wrapf(err, "failed committing transaction [%s] with deps [%v]", txID, deps) + if err := c.CommitTX(event.TxID, event.Block, event.IndexInBlock, nil); err != nil { + return false, errors.Wrapf(err, "failed committing transaction [%s]", txID) } return false, nil } // DiscardEndorserTransaction discards the transaction from the vault -func (c *Committer) DiscardEndorserTransaction(txID string, block *common.Block, event *TxEvent) error { - committer, err := c.Network.Committer(c.Channel) - if err != nil { - return errors.Wrapf(err, "cannot get Committer for channel [%s]", c.Channel) - } - +func (c *Service) DiscardEndorserTransaction(txID string, block *common.Block, event *TxEvent) error { blockNum := block.Header.Number if logger.IsEnabledFor(zapcore.DebugLevel) { logger.Debugf("transaction [%s] in block [%d] is not valid for fabric [%s], discard!", txID, blockNum, event.ValidationCode) } - vc, _, deps, err := committer.Status(txID) + vc, _, err := c.Status(txID) if err != nil { return errors.Wrapf(err, "failed getting tx's status [%s]", txID) } - event.DependantTxIDs = append(event.DependantTxIDs, deps...) switch vc { case driver.Valid: // TODO: this might be due the fact that there are transactions with the same tx-id, the first is valid, the others are all invalid @@ -149,7 +137,7 @@ func (c *Committer) DiscardEndorserTransaction(txID string, block *common.Block, // Nothing to commit default: event.Err = errors.Errorf("transaction [%s] status is not valid [%d], message [%s]", txID, event.ValidationCode, event.ValidationMessage) - err = committer.DiscardTx(event.TxID, event.ValidationMessage) + err = c.DiscardTx(event.TxID, event.ValidationMessage) if err != nil { logger.Errorf("failed discarding tx in state db with err [%s]", err) } diff --git a/platform/fabric/core/generic/committer/event.go b/platform/fabric/core/generic/committer/event.go index 23b347bd4..daec14b9a 100644 --- a/platform/fabric/core/generic/committer/event.go +++ b/platform/fabric/core/generic/committer/event.go @@ -17,7 +17,6 @@ import ( // TxEvent contains information for token transaction commit type TxEvent struct { TxID string - DependantTxIDs []string Committed bool ValidationCode peer.TxValidationCode ValidationMessage string diff --git a/platform/fabric/core/generic/committer/external.go b/platform/fabric/core/generic/committer/external.go deleted file mode 100644 index c95d551be..000000000 --- a/platform/fabric/core/generic/committer/external.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package committer - -import ( - "sort" - "sync" - - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - - "github.com/pkg/errors" - - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/committer/driver" - fdriver "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" -) - -var ( - driversMu sync.RWMutex - drivers = make(map[string]driver.Driver) -) - -// Register makes a kvs driver available by the provided name. -// If Register is called twice with the same name or if driver is nil, -// it panics. -func Register(name string, driver driver.Driver) { - driversMu.Lock() - defer driversMu.Unlock() - if driver == nil { - panic("Register driver is nil") - } - if _, dup := drivers[name]; dup { - panic("Register called twice for driver " + name) - } - drivers[name] = driver -} - -// Drivers returns a sorted list of the names of the registered drivers. -func Drivers() []string { - driversMu.RLock() - defer driversMu.RUnlock() - list := make([]string, 0, len(drivers)) - for name := range drivers { - list = append(list, name) - } - sort.Strings(list) - return list -} - -type ExternalCommitter struct { - c driver.Committer -} - -func (c *ExternalCommitter) Status(txID string) (fdriver.ValidationCode, []string, []view.Identity, error) { - if c.c == nil { - return fdriver.Unknown, nil, nil, nil - } - return c.c.Status(txID) -} - -func (c *ExternalCommitter) Validate(txid string) (fdriver.ValidationCode, error) { - if c.c == nil { - panic("no external Committer defined, programming error") - } - return c.c.Validate(txid) -} - -func (c *ExternalCommitter) CommitTX(txid string, block uint64, indexInBloc int) error { - if c.c == nil { - panic("no external Committer defined, programming error") - } - return c.c.CommitTX(txid, block, indexInBloc) -} - -func (c *ExternalCommitter) DiscardTX(txid string) error { - if c.c == nil { - panic("no external Committer defined, programming error") - } - return c.c.DiscardTX(txid) -} - -func GetExternalCommitter(name string, sp view2.ServiceProvider, vault driver.Vault) (*ExternalCommitter, error) { - // TODO: support multiple external committers - dNames := Drivers() - if len(dNames) == 0 { - return &ExternalCommitter{c: nil}, nil - } - c, err := drivers[dNames[0]].Open(name, sp, vault) - if err != nil { - return nil, errors.Wrapf(err, "failed opening external Committer [%s]", dNames[0]) - } - return &ExternalCommitter{c: c}, nil -} diff --git a/platform/fabric/core/generic/config/config.go b/platform/fabric/core/generic/config/config.go deleted file mode 100644 index 43f4e4a55..000000000 --- a/platform/fabric/core/generic/config/config.go +++ /dev/null @@ -1,271 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package config - -import ( - "strconv" - "strings" - "time" - - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" - "github.com/pkg/errors" -) - -const ( - DefaultMSPCacheSize = 3 - DefaultBroadcastNumRetries = 3 - VaultPersistenceOptsKey = "vault.persistence.opts" - DefaultOrderingConnectionPoolSize = 10 -) - -var logger = flogging.MustGetLogger("fabric-sdk.core.generic.config") - -// configService models a configuration registry -type configService interface { - // GetString returns the value associated with the key as a string - GetString(key string) string - // GetDuration returns the value associated with the key as a duration - GetDuration(key string) time.Duration - // GetBool returns the value associated with the key as a boolean - GetBool(key string) bool - // IsSet checks to see if the key has been set in any of the data locations - IsSet(key string) bool - // UnmarshalKey takes a single key and unmarshals it into a Struct - UnmarshalKey(key string, rawVal interface{}) error - // GetPath allows configuration strings that specify a (config-file) relative path - GetPath(key string) string - // TranslatePath translates the passed path relative to the config path - TranslatePath(path string) string - // GetInt returns the value associated with the key as an int - GetInt(key string) int -} - -type Config struct { - name string - prefix string - configService configService -} - -func New(configService configService, name string, defaultConfig bool) (*Config, error) { - if configService.IsSet("fabric." + name) { - return &Config{ - name: name, - prefix: name + ".", - configService: configService, - }, nil - } - - if defaultConfig { - return &Config{ - name: name, - prefix: "", - configService: configService, - }, nil - } - - return nil, errors.Errorf("configuration for [%s] not found", name) -} - -func (c *Config) Name() string { - return c.name -} - -func (c *Config) TLSEnabled() bool { - return c.configService.GetBool("fabric." + c.prefix + "tls.enabled") -} - -func (c *Config) TLSClientAuthRequired() bool { - return c.configService.GetBool("fabric." + c.prefix + "tls.clientAuthRequired") -} - -func (c *Config) TLSServerHostOverride() string { - return c.configService.GetString("fabric." + c.prefix + "tls.serverhostoverride") -} - -func (c *Config) ClientConnTimeout() time.Duration { - return c.configService.GetDuration("fabric." + c.prefix + "client.connTimeout") -} - -func (c *Config) TLSClientKeyFile() string { - return c.configService.GetPath("fabric." + c.prefix + "tls.clientKey.file") -} - -func (c *Config) TLSClientCertFile() string { - return c.configService.GetPath("fabric." + c.prefix + "tls.clientCert.file") -} - -func (c *Config) KeepAliveClientInterval() time.Duration { - return c.configService.GetDuration("fabric." + c.prefix + "keepalive.interval") -} - -func (c *Config) KeepAliveClientTimeout() time.Duration { - return c.configService.GetDuration("fabric." + c.prefix + "keepalive.timeout") -} - -func (c *Config) Orderers() ([]*grpc.ConnectionConfig, error) { - var res []*grpc.ConnectionConfig - if err := c.configService.UnmarshalKey("fabric."+c.prefix+"orderers", &res); err != nil { - return nil, err - } - - for _, v := range res { - v.TLSEnabled = c.TLSEnabled() - } - - return res, nil -} - -func (c *Config) Peers() (map[driver.PeerFunctionType][]*grpc.ConnectionConfig, error) { - var connectionConfigs []*grpc.ConnectionConfig - if err := c.configService.UnmarshalKey("fabric."+c.prefix+"peers", &connectionConfigs); err != nil { - return nil, err - } - - res := map[driver.PeerFunctionType][]*grpc.ConnectionConfig{} - for _, v := range connectionConfigs { - v.TLSEnabled = c.TLSEnabled() - if v.TLSDisabled { - v.TLSEnabled = false - } - usage := strings.ToLower(v.Usage) - switch { - case len(usage) == 0: - res[driver.PeerForAnything] = append(res[driver.PeerForAnything], v) - case usage == "delivery": - res[driver.PeerForDelivery] = append(res[driver.PeerForDelivery], v) - case usage == "discovery": - res[driver.PeerForDiscovery] = append(res[driver.PeerForDiscovery], v) - case usage == "finality": - res[driver.PeerForFinality] = append(res[driver.PeerForFinality], v) - case usage == "query": - res[driver.PeerForQuery] = append(res[driver.PeerForQuery], v) - default: - logger.Warn("connection usage [%s] not recognized [%v]", usage, v) - } - } - return res, nil -} - -func (c *Config) Channels() ([]*Channel, error) { - var channels []*Channel - if err := c.configService.UnmarshalKey("fabric."+c.prefix+"channels", &channels); err != nil { - return nil, err - } - for _, channel := range channels { - if err := channel.Verify(); err != nil { - return nil, err - } - } - return channels, nil -} - -func (c *Config) VaultPersistenceType() string { - return c.configService.GetString("fabric." + c.prefix + "vault.persistence.type") -} - -func (c *Config) VaultPersistencePrefix() string { - return VaultPersistenceOptsKey -} - -func (c *Config) VaultTXStoreCacheSize() int { - defaultCacheSize := 100 - v := c.configService.GetString("fabric." + c.prefix + "vault.txidstore.cache.size") - cacheSize, err := strconv.Atoi(v) - if err != nil { - return defaultCacheSize - } - - if cacheSize < 0 { - return defaultCacheSize - } - - return cacheSize -} - -// DefaultMSP returns the default MSP -func (c *Config) DefaultMSP() string { - return c.configService.GetString("fabric." + c.prefix + "defaultMSP") -} - -func (c *Config) MSPs() ([]MSP, error) { - var confs []MSP - if err := c.configService.UnmarshalKey("fabric."+c.prefix+"msps", &confs); err != nil { - return nil, err - } - return confs, nil -} - -// TranslatePath translates the passed path relative to the path from which the configuration has been loaded -func (c *Config) TranslatePath(path string) string { - return c.configService.TranslatePath(path) -} - -func (c *Config) Resolvers() ([]Resolver, error) { - var resolvers []Resolver - if err := c.configService.UnmarshalKey("fabric."+c.prefix+"endpoint.resolvers", &resolvers); err != nil { - return nil, err - } - return resolvers, nil -} - -func (c *Config) GetString(key string) string { - return c.configService.GetString("fabric." + c.prefix + key) -} - -func (c *Config) GetDuration(key string) time.Duration { - return c.configService.GetDuration("fabric." + c.prefix + key) -} - -func (c *Config) GetBool(key string) bool { - return c.configService.GetBool("fabric." + c.prefix + key) -} - -func (c *Config) IsSet(key string) bool { - return c.configService.IsSet("fabric." + c.prefix + key) -} - -func (c *Config) UnmarshalKey(key string, rawVal interface{}) error { - return c.configService.UnmarshalKey("fabric."+c.prefix+key, rawVal) -} - -func (c *Config) GetPath(key string) string { - return c.configService.GetPath("fabric." + c.prefix + key) -} - -func (c *Config) MSPCacheSize() int { - v := c.configService.GetString("fabric." + c.prefix + "mspCacheSize") - if len(v) == 0 { - return DefaultMSPCacheSize - } - i, err := strconv.Atoi(v) - if err != nil { - return DefaultMSPCacheSize - } - return i -} - -func (c *Config) BroadcastNumRetries() int { - v := c.configService.GetInt("fabric." + c.prefix + "ordering.numRetries") - if v == 0 { - return DefaultBroadcastNumRetries - } - return v -} - -func (c *Config) BroadcastRetryInterval() time.Duration { - return c.configService.GetDuration("fabric." + c.prefix + "ordering.retryInterval") -} - -func (c *Config) OrdererConnectionPoolSize() int { - k := "fabric." + c.prefix + "ordering.connectionPoolSize" - if c.configService.IsSet(k) { - return c.configService.GetInt(k) - } - return DefaultOrderingConnectionPoolSize -} diff --git a/platform/fabric/core/generic/config/ds.go b/platform/fabric/core/generic/config/ds.go index ec67f5d92..11d341273 100644 --- a/platform/fabric/core/generic/config/ds.go +++ b/platform/fabric/core/generic/config/ds.go @@ -8,6 +8,8 @@ package config import ( "time" + + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" ) type BCCSP struct { @@ -88,6 +90,14 @@ type Chaincode struct { Private bool `yaml:"Private,omitempty"` } +func (c Chaincode) ID() string { + return c.Name +} + +func (c Chaincode) IsPrivate() bool { + return c.Private +} + type Finality struct { WaitForEventTimeout time.Duration `yaml:"WaitForEventTimeout,omitempty"` ForPartiesWaitTimeout time.Duration `yaml:"ForPartiesWaitTimeout,omitempty"` @@ -134,6 +144,10 @@ func (c *Channel) Verify() error { return nil } +func (c *Channel) ID() string { + return c.Name +} + func (c *Channel) DiscoveryDefaultTTLS() time.Duration { if c.Discovery.Timeout == 0 { return 5 * time.Minute @@ -155,6 +169,14 @@ func (c *Channel) DeliverySleepAfterFailure() time.Duration { return c.Delivery.SleepAfterFailure } +func (c *Channel) ChaincodeConfigs() []driver.ChaincodeConfig { + res := make([]driver.ChaincodeConfig, len(c.Chaincodes)) + for i, config := range c.Chaincodes { + res[i] = config + } + return res +} + func (c *Channel) FinalityWaitTimeout() time.Duration { if c.Finality.WaitForEventTimeout == 0 { return 20 * time.Second @@ -197,6 +219,14 @@ func (c *Channel) FinalityForPartiesWaitTimeout() time.Duration { return c.Finality.ForPartiesWaitTimeout } +func (c *Channel) GetNumRetries() uint { + return c.NumRetries +} + +func (c *Channel) GetRetrySleep() time.Duration { + return c.RetrySleep +} + type Network struct { Default bool `yaml:"default,omitempty"` DefaultMSP string `yaml:"defaultMSP"` diff --git a/platform/fabric/core/generic/config/service.go b/platform/fabric/core/generic/config/service.go new file mode 100644 index 000000000..4f1800426 --- /dev/null +++ b/platform/fabric/core/generic/config/service.go @@ -0,0 +1,320 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package config + +import ( + "fmt" + "math/rand" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" + + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" +) + +const ( + defaultMSPCacheSize = 3 + defaultBroadcastNumRetries = 3 + vaultPersistenceOptsKey = "vault.persistence.opts" + defaultOrderingConnectionPoolSize = 10 + defaultNumRetries = 3 + defaultRetrySleep = 1 * time.Second + defaultCacheSize = 100 +) + +var logger = flogging.MustGetLogger("fabric-sdk.core.generic.config") + +var funcTypeMap = map[string]driver.PeerFunctionType{ + "": driver.PeerForAnything, + "delivery": driver.PeerForDelivery, + "discovery": driver.PeerForDiscovery, + "finality": driver.PeerForFinality, + "query": driver.PeerForQuery, +} + +type Service struct { + driver.Configuration + name string + prefix string + + configuredOrderers int + orderers []*grpc.ConnectionConfig + peerMapping map[driver.PeerFunctionType][]*grpc.ConnectionConfig + channels map[string]*Channel + defaultChannel string +} + +func NewService(configService driver.Configuration, name string, defaultConfig bool) (*Service, error) { + var prefix string + if configService.IsSet("fabric." + name) { + prefix = name + "." + } + if len(prefix) == 0 && !defaultConfig { + return nil, errors.Errorf("configuration for [%s] not found", name) + } + + tlsEnabled := configService.GetBool(fmt.Sprintf("fabric.%stls.enabled", prefix)) + orderers, err := readItems[*grpc.ConnectionConfig](configService, prefix, "orderers") + if err != nil { + return nil, err + } + for _, v := range orderers { + v.TLSEnabled = tlsEnabled + } + peers, err := readItems[*grpc.ConnectionConfig](configService, prefix, "peers") + if err != nil { + return nil, err + } + peerMapping := createPeerMap(peers, tlsEnabled) + + channels, err := readItems[*Channel](configService, prefix, "channels") + if err != nil { + return nil, err + } + channelMap, defaultChannel, err := createChannelMap(channels) + if err != nil { + return nil, err + } + + return &Service{ + Configuration: configService, + name: name, + prefix: prefix, + configuredOrderers: len(orderers), + orderers: orderers, + peerMapping: peerMapping, + channels: channelMap, + defaultChannel: defaultChannel, + }, nil +} + +func createChannelMap(channels []*Channel) (map[string]*Channel, string, error) { + channelMap := make(map[string]*Channel, len(channels)) + var defaultChannel string + for _, channel := range channels { + if err := channel.Verify(); err != nil { + return nil, "", err + } + channelMap[channel.Name] = channel + if channel.Default { + defaultChannel = channel.Name + } + } + return channelMap, defaultChannel, nil +} + +func createPeerMap(peers []*grpc.ConnectionConfig, tlsEnabled bool) map[driver.PeerFunctionType][]*grpc.ConnectionConfig { + peerMapping := map[driver.PeerFunctionType][]*grpc.ConnectionConfig{} + for _, v := range peers { + v.TLSEnabled = tlsEnabled && !v.TLSDisabled + + if funcType, ok := funcTypeMap[strings.ToLower(v.Usage)]; ok { + peerMapping[funcType] = append(peerMapping[funcType], v) + } else { + logger.Warn("connection usage [%s] not recognized [%v]", v.Usage, v) + } + } + return peerMapping +} + +func readItems[T any](configService driver.Configuration, prefix, key string) ([]T, error) { + var items []T + if err := configService.UnmarshalKey(fmt.Sprintf("fabric.%s%s", prefix, key), &items); err != nil { + return nil, err + } + return items, nil +} + +func (s *Service) NetworkName() string { + return s.name +} + +func (s *Service) TLSEnabled() bool { + return s.GetBool("tls.enabled") +} + +func (s *Service) TLSClientAuthRequired() bool { + return s.GetBool("tls.clientAuthRequired") +} + +func (s *Service) TLSServerHostOverride() string { + return s.GetString("tls.serverhostoverride") +} + +func (s *Service) ClientConnTimeout() time.Duration { + return s.GetDuration("client.connTimeout") +} + +func (s *Service) TLSClientKeyFile() string { + return s.GetPath("tls.clientKey.file") +} + +func (s *Service) TLSClientCertFile() string { + return s.GetPath("tls.clientCert.file") +} + +func (s *Service) KeepAliveClientInterval() time.Duration { + return s.GetDuration("keepalive.interval") +} + +func (s *Service) KeepAliveClientTimeout() time.Duration { + return s.GetDuration("keepalive.timeout") +} + +func (s *Service) NewDefaultChannelConfig(name string) driver.ChannelConfig { + return &Channel{ + Name: name, + Default: false, + Quiet: false, + NumRetries: defaultNumRetries, + RetrySleep: defaultRetrySleep, + Chaincodes: nil, + } +} + +func (s *Service) Orderers() []*grpc.ConnectionConfig { + return s.orderers +} + +func (s *Service) VaultPersistenceType() string { + return s.GetString("vault.persistence.type") +} + +func (s *Service) VaultPersistencePrefix() string { + return vaultPersistenceOptsKey +} + +func (s *Service) VaultTXStoreCacheSize() int { + if cacheSize, err := strconv.Atoi(s.GetString("vault.txidstore.cache.size")); err == nil && cacheSize >= 0 { + return cacheSize + } + return defaultCacheSize +} + +// DefaultMSP returns the default MSP +func (s *Service) DefaultMSP() string { + return s.GetString("defaultMSP") +} + +func (s *Service) MSPs() ([]MSP, error) { + var confs []MSP + if err := s.UnmarshalKey("msps", &confs); err != nil { + return nil, err + } + return confs, nil +} + +// TranslatePath translates the passed path relative to the path from which the configuration has been loaded +func (s *Service) TranslatePath(path string) string { + return s.Configuration.TranslatePath(path) +} + +func (s *Service) DefaultChannel() string { + return s.defaultChannel +} + +func (s *Service) ChannelIDs() []string { + channelIDs := make([]string, len(s.channels)) + var i int + for channelID := range s.channels { + channelIDs[i] = channelID + i++ + } + return channelIDs +} + +func (s *Service) Channel(name string) driver.ChannelConfig { + return s.channels[name] +} + +func (s *Service) Resolvers() ([]Resolver, error) { + var resolvers []Resolver + if err := s.UnmarshalKey("endpoint.resolvers", &resolvers); err != nil { + return nil, err + } + return resolvers, nil +} + +func (s *Service) GetString(key string) string { + return s.Configuration.GetString("fabric." + s.prefix + key) +} + +func (s *Service) GetDuration(key string) time.Duration { + return s.Configuration.GetDuration("fabric." + s.prefix + key) +} + +func (s *Service) GetBool(key string) bool { + return s.Configuration.GetBool("fabric." + s.prefix + key) +} + +func (s *Service) IsSet(key string) bool { + return s.Configuration.IsSet("fabric." + s.prefix + key) +} + +func (s *Service) UnmarshalKey(key string, rawVal interface{}) error { + return s.Configuration.UnmarshalKey("fabric."+s.prefix+key, rawVal) +} + +func (s *Service) GetPath(key string) string { + return s.Configuration.GetPath("fabric." + s.prefix + key) +} + +func (s *Service) MSPCacheSize() int { + if cacheSize, err := strconv.Atoi(s.GetString("mspCacheSize")); err == nil { + return cacheSize + } + return defaultMSPCacheSize +} + +func (s *Service) BroadcastNumRetries() int { + if v := s.GetInt("ordering.numRetries"); v != 0 { + return v + } + return defaultBroadcastNumRetries +} + +func (s *Service) BroadcastRetryInterval() time.Duration { + return s.GetDuration("ordering.retryInterval") +} + +func (s *Service) OrdererConnectionPoolSize() int { + if s.IsSet("ordering.connectionPoolSize") { + return s.GetInt("ordering.connectionPoolSize") + } + return defaultOrderingConnectionPoolSize +} + +func (s *Service) SetConfigOrderers(orderers []*grpc.ConnectionConfig) error { + s.orderers = append(s.orderers[:s.configuredOrderers], orderers...) + logger.Debugf("New Orderers [%d]", len(s.orderers)) + + return nil +} + +func (s *Service) PickOrderer() *grpc.ConnectionConfig { + if len(s.orderers) == 0 { + return nil + } + return s.orderers[rand.Intn(len(s.orderers))] +} + +func (s *Service) PickPeer(ft driver.PeerFunctionType) *grpc.ConnectionConfig { + source, ok := s.peerMapping[ft] + if !ok { + source = s.peerMapping[driver.PeerForAnything] + } + return source[rand.Intn(len(source))] +} + +func (s *Service) IsChannelQuiet(name string) bool { + channel, ok := s.channels[name] + return ok && channel.Quiet +} diff --git a/platform/fabric/core/generic/delivery.go b/platform/fabric/core/generic/delivery.go index 15a3ab026..dc4439ffb 100644 --- a/platform/fabric/core/generic/delivery.go +++ b/platform/fabric/core/generic/delivery.go @@ -8,69 +8,134 @@ package generic import ( "context" + "time" + + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/fabricutils" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/transaction" delivery2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/delivery" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/hash" "github.com/hyperledger/fabric-protos-go/common" pb "github.com/hyperledger/fabric-protos-go/peer" - "github.com/hyperledger/fabric/protoutil" ) type ValidationFlags []uint8 -func (c *Channel) StartDelivery(ctx context.Context) error { - c.DeliveryService.Start(ctx) +type DeliveryService struct { + channel string + channelConfig driver.ChannelConfig + hasher hash.Hasher + NetworkName string + LocalMembership driver.LocalMembership + ConfigService driver.ConfigService + PeerManager delivery2.PeerManager + Ledger driver.Ledger + waitForEventTimeout time.Duration + + deliveryService *delivery2.Delivery +} + +func NewDeliveryService( + channel string, + channelConfig driver.ChannelConfig, + hasher hash.Hasher, + networkName string, + localMembership driver.LocalMembership, + configService driver.ConfigService, + peerManager delivery2.PeerManager, + ledger driver.Ledger, + waitForEventTimeout time.Duration, + txIDStore driver.TXIDStore, + callback delivery2.Callback, +) (*DeliveryService, error) { + deliveryService, err := delivery2.New( + networkName, + channelConfig, + hasher, + localMembership, + configService, + peerManager, + ledger, + callback, + txIDStore, + channelConfig.CommitterWaitForEventTimeout(), + ) + if err != nil { + return nil, err + } + + return &DeliveryService{ + channel: channel, + channelConfig: channelConfig, + hasher: hasher, + NetworkName: networkName, + LocalMembership: localMembership, + ConfigService: configService, + PeerManager: peerManager, + Ledger: ledger, + waitForEventTimeout: waitForEventTimeout, + deliveryService: deliveryService, + }, nil +} + +func (c *DeliveryService) Start(ctx context.Context) error { + c.deliveryService.Start(ctx) return nil } -func (c *Channel) Scan(ctx context.Context, txID string, callback driver.DeliveryCallback) error { - vault := &fakeVault{txID: txID} - deliveryService, err := delivery2.New(c.ChannelConfig, c.SP, c.Network, func(block *common.Block) (bool, error) { - for i, tx := range block.Data.Data { - validationCode := ValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])[i] +func (c *DeliveryService) Stop() { + c.deliveryService.Stop() +} - if pb.TxValidationCode(validationCode) != pb.TxValidationCode_VALID { - continue - } +func (c *DeliveryService) Scan(ctx context.Context, txID string, callback driver.DeliveryCallback) error { + vault := &fakeVault{txID: txID} + deliveryService, err := delivery2.New( + c.NetworkName, + c.channelConfig, + c.hasher, + c.LocalMembership, + c.ConfigService, + c.PeerManager, + c.Ledger, + func(block *common.Block) (bool, error) { + for i, tx := range block.Data.Data { + validationCode := ValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])[i] - env, err := protoutil.UnmarshalEnvelope(tx) - if err != nil { - logger.Errorf("Error getting tx from block: %s", err) - return false, err - } - payload, err := protoutil.UnmarshalPayload(env.Payload) - if err != nil { - logger.Errorf("[%s] unmarshal payload failed: %s", c.ChannelName, err) - return false, err - } - channelHeader, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader) - if err != nil { - logger.Errorf("[%s] unmarshal Channel header failed: %s", c.ChannelName, err) - return false, err - } + if pb.TxValidationCode(validationCode) != pb.TxValidationCode_VALID { + continue + } + _, _, channelHeader, err := fabricutils.UnmarshalTx(tx) + if err != nil { + logger.Errorf("[%s] unmarshal tx failed: %s", c.channel, err) + return false, err + } - if common.HeaderType(channelHeader.Type) != common.HeaderType_ENDORSER_TRANSACTION { - continue - } + if common.HeaderType(channelHeader.Type) != common.HeaderType_ENDORSER_TRANSACTION { + continue + } - ptx, err := newProcessedTransactionFromEnvelopeRaw(tx) - if err != nil { - return false, err - } + ptx, err := transaction.NewProcessedTransactionFromEnvelopeRaw(tx) + if err != nil { + return false, err + } - stop, err := callback(ptx) - if err != nil { - // if an error occurred, stop processing - return false, err - } - if stop { - return true, nil + stop, err := callback(ptx) + if err != nil { + // if an error occurred, stop processing + return false, err + } + if stop { + return true, nil + } + vault.txID = channelHeader.TxId + logger.Debugf("commit transaction [%s] in block [%d]", channelHeader.TxId, block.Header.Number) } - vault.txID = channelHeader.TxId - logger.Debugf("commit transaction [%s] in block [%d]", channelHeader.TxId, block.Header.Number) - } - return false, nil - }, vault, c.ChannelConfig.CommitterWaitForEventTimeout()) + return false, nil + }, + vault, + c.channelConfig.CommitterWaitForEventTimeout(), + ) if err != nil { return err } diff --git a/platform/fabric/core/generic/delivery/deliverclient.go b/platform/fabric/core/generic/delivery/deliverclient.go index 59191990c..dfdca0559 100644 --- a/platform/fabric/core/generic/delivery/deliverclient.go +++ b/platform/fabric/core/generic/delivery/deliverclient.go @@ -13,6 +13,7 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" grpc2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" "github.com/hyperledger/fabric-protos-go/common" ab "github.com/hyperledger/fabric-protos-go/orderer" @@ -115,7 +116,7 @@ func (d *deliverClient) Certificate() *tls.Certificate { } // CreateDeliverEnvelope creates a signed envelope with SeekPosition_Newest for block -func CreateDeliverEnvelope(channelID string, signingIdentity SigningIdentity, cert *tls.Certificate, hasher Hasher, start *ab.SeekPosition) (*common.Envelope, error) { +func CreateDeliverEnvelope(channelID string, signingIdentity driver.SigningIdentity, cert *tls.Certificate, hasher Hasher, start *ab.SeekPosition) (*common.Envelope, error) { if logger.IsEnabledFor(zapcore.DebugLevel) { logger.Debugf("create delivery envelope starting from: [%s]", start.String()) } @@ -284,7 +285,7 @@ func CreateHeader(txType common.HeaderType, channelID string, creator []byte, tl } // CreateEnvelope creates a common.Envelope with given tx bytes, header, and SigningIdentity -func CreateEnvelope(data []byte, header *common.Header, signingIdentity SigningIdentity) (*common.Envelope, error) { +func CreateEnvelope(data []byte, header *common.Header, signingIdentity driver.SigningIdentity) (*common.Envelope, error) { payload := &common.Payload{ Header: header, Data: data, diff --git a/platform/fabric/core/generic/delivery/delivery.go b/platform/fabric/core/generic/delivery/delivery.go index 4964f8180..9a1be6eda 100644 --- a/platform/fabric/core/generic/delivery/delivery.go +++ b/platform/fabric/core/generic/delivery/delivery.go @@ -12,13 +12,10 @@ import ( "time" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/committer" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/hash" "github.com/hyperledger/fabric-protos-go/common" ab "github.com/hyperledger/fabric-protos-go/orderer" pb "github.com/hyperledger/fabric-protos-go/peer" @@ -49,19 +46,19 @@ type Vault interface { GetLastTxID() (string, error) } -type Network interface { - Name() string - Channel(name string) (driver.Channel, error) - PickPeer(funcType driver.PeerFunctionType) *grpc.ConnectionConfig - LocalMembership() driver.LocalMembership - Config() *config.Config +type PeerManager interface { + NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer.Client, error) } type Delivery struct { channel string - channelConfig *config.Channel - sp view2.ServiceProvider - network Network + channelConfig driver.ChannelConfig + hasher Hasher + NetworkName string + LocalMembership driver.LocalMembership + ConfigService driver.ConfigService + PeerManager PeerManager + Ledger driver.Ledger waitForEventTimeout time.Duration callback Callback vault Vault @@ -70,16 +67,31 @@ type Delivery struct { stop chan bool } -func New(channelConfig *config.Channel, sp view2.ServiceProvider, network Network, callback Callback, vault Vault, waitForEventTimeout time.Duration) (*Delivery, error) { +func New( + networkName string, + channelConfig driver.ChannelConfig, + hasher Hasher, + LocalMembership driver.LocalMembership, + ConfigService driver.ConfigService, + PeerManager PeerManager, + Ledger driver.Ledger, + callback Callback, + vault Vault, + waitForEventTimeout time.Duration, +) (*Delivery, error) { if channelConfig == nil { return nil, errors.Errorf("expected channel config, got nil") } d := &Delivery{ - channel: channelConfig.Name, + NetworkName: networkName, + channel: channelConfig.ID(), channelConfig: channelConfig, - sp: sp, - network: network, + hasher: hasher, + LocalMembership: LocalMembership, + ConfigService: ConfigService, + PeerManager: PeerManager, + Ledger: Ledger, waitForEventTimeout: waitForEventTimeout, callback: callback, vault: vault, @@ -115,14 +127,14 @@ func (d *Delivery) Run(ctx context.Context) error { default: if df == nil { if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("deliver service [%s], connecting...", d.network.Name(), d.channel) + logger.Debugf("deliver service [%s], connecting...", d.NetworkName, d.channel) } df, err = d.connect(ctx) if err != nil { - logger.Errorf("failed connecting to delivery service [%s:%s] [%s]. Wait 10 sec before reconnecting", d.network.Name(), d.channel, err) + logger.Errorf("failed connecting to delivery service [%s:%s] [%s]. Wait 10 sec before reconnecting", d.NetworkName, d.channel, err) time.Sleep(waitTime) if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("reconnecting to delivery service [%s:%s]", d.network.Name(), d.channel) + logger.Debugf("reconnecting to delivery service [%s:%s]", d.NetworkName, d.channel) } continue } @@ -132,7 +144,7 @@ func (d *Delivery) Run(ctx context.Context) error { if err != nil { df = nil logger.Errorf("delivery service [%s:%s:%s], failed receiving response [%s]", - d.client.Address(), d.network.Name(), d.channel, + d.client.Address(), d.NetworkName, d.channel, errors.WithMessagef(err, "error receiving deliver response from peer %s", d.client.Address())) continue } @@ -141,14 +153,14 @@ func (d *Delivery) Run(ctx context.Context) error { case *pb.DeliverResponse_Block: if r.Block == nil || r.Block.Data == nil || r.Block.Header == nil || r.Block.Metadata == nil { if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("deliver service [%s:%s:%s], received nil block", d.client.Address(), d.network.Name(), d.channel) + logger.Debugf("deliver service [%s:%s:%s], received nil block", d.client.Address(), d.NetworkName, d.channel) } time.Sleep(waitTime) df = nil } if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("delivery service [%s:%s:%s], commit block [%d]", d.client.Address(), d.network.Name(), d.channel, r.Block.Header.Number) + logger.Debugf("delivery service [%s:%s:%s], commit block [%d]", d.client.Address(), d.NetworkName, d.channel, r.Block.Header.Number) } d.lastBlockReceived = r.Block.Header.Number @@ -164,14 +176,14 @@ func (d *Delivery) Run(ctx context.Context) error { case *pb.DeliverResponse_Status: if r.Status == common.Status_NOT_FOUND { df = nil - logger.Warnf("delivery service [%s:%s:%s] status [%s], wait a few seconds before retrying", d.client.Address(), d.network.Name(), d.channel, r.Status) + logger.Warnf("delivery service [%s:%s:%s] status [%s], wait a few seconds before retrying", d.client.Address(), d.NetworkName, d.channel, r.Status) time.Sleep(waitTime) } else { - logger.Warnf("delivery service [%s:%s:%s] status [%s]", d.client.Address(), d.network.Name(), d.channel, r.Status) + logger.Warnf("delivery service [%s:%s:%s] status [%s]", d.client.Address(), d.NetworkName, d.channel, r.Status) } default: df = nil - logger.Errorf("delivery service [%s:%s:%s], got [%s]", d.client.Address(), d.network.Name(), d.channel, r) + logger.Errorf("delivery service [%s:%s:%s], got [%s]", d.client.Address(), d.NetworkName, d.channel, r) } } } @@ -181,19 +193,16 @@ func (d *Delivery) connect(ctx context.Context) (DeliverStream, error) { // first cleanup everything d.cleanup() - peerConnConf := d.network.PickPeer(driver.PeerForDelivery) + peerConnConf := d.ConfigService.PickPeer(driver.PeerForDelivery) address := peerConnConf.Address if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("connecting to deliver service at [%s] for [%s:%s]", address, d.network.Name(), d.channel) + logger.Debugf("connecting to deliver service at [%s] for [%s:%s]", address, d.NetworkName, d.channel) } - ch, err := d.network.Channel(d.channel) - if err != nil { - return nil, errors.WithMessagef(err, "failed connecting to channel [%s:%s]", d.network.Name(), d.channel) - } - d.client, err = ch.NewPeerClientForAddress(*peerConnConf) + var err error + d.client, err = d.PeerManager.NewPeerClientForAddress(*peerConnConf) if err != nil { - return nil, errors.WithMessagef(err, "failed creating peer client for address [%s][%s:%s]", address, d.network.Name(), d.channel) + return nil, errors.WithMessagef(err, "failed creating peer client for address [%s][%s:%s]", address, d.NetworkName, d.channel) } deliverClient, err := NewDeliverClient(d.client) if err != nil { @@ -206,9 +215,9 @@ func (d *Delivery) connect(ctx context.Context) (DeliverStream, error) { blockEnvelope, err := CreateDeliverEnvelope( d.channel, - d.network.LocalMembership().DefaultSigningIdentity(), + d.LocalMembership.DefaultSigningIdentity(), deliverClient.Certificate(), - hash.GetHasher(d.sp), + d.hasher, d.GetStartPosition(), ) if err != nil { @@ -252,12 +261,7 @@ func (d *Delivery) GetStartPosition() *ab.SeekPosition { if len(lastTxID) != 0 && !strings.HasPrefix(lastTxID, committer.ConfigTXPrefix) { // Retrieve block from Fabric - ch, err := d.network.Channel(d.channel) - if err != nil { - logger.Errorf("failed getting channel [%s], restarting from genesis: [%s]", d.channel, err) - return StartGenesis - } - blockNumber, err := ch.GetBlockNumberByTxID(lastTxID) + blockNumber, err := d.Ledger.GetBlockNumberByTxID(lastTxID) if err != nil { logger.Errorf("failed getting block number for transaction [%s], restart from genesis [%s]", lastTxID, err) return StartGenesis diff --git a/platform/fabric/core/generic/delivery/identity.go b/platform/fabric/core/generic/delivery/identity.go deleted file mode 100644 index a3e6354c2..000000000 --- a/platform/fabric/core/generic/delivery/identity.go +++ /dev/null @@ -1,12 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package delivery - -type SigningIdentity interface { - Serialize() ([]byte, error) - Sign(msg []byte) ([]byte, error) -} diff --git a/platform/fabric/core/generic/driver/driver.go b/platform/fabric/core/generic/driver/driver.go index 5ff68f53f..ee236d32c 100644 --- a/platform/fabric/core/generic/driver/driver.go +++ b/platform/fabric/core/generic/driver/driver.go @@ -7,6 +7,9 @@ SPDX-License-Identifier: Apache-2.0 package driver import ( + "fmt" + "reflect" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" @@ -14,9 +17,12 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/id" metrics2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/metrics" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp" + mspdriver "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/sig" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs" metrics3 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/metrics" "github.com/pkg/errors" ) @@ -28,15 +34,18 @@ type Driver struct{} func (d *Driver) New(sp view.ServiceProvider, network string, defaultNetwork bool) (driver.FabricNetworkService, error) { logger.Debugf("creating new fabric network service for network [%s]", network) // bridge services - c, err := config.New(view.GetConfigService(sp), network, defaultNetwork) + configService, err := config.NewService(view.GetConfigService(sp), network, defaultNetwork) if err != nil { return nil, err } - sigService := generic.NewSigService(sp) + kvss := kvs.GetService(sp) + + deserialier := sig.NewMultiplexDeserializer() + sigService := sig.NewService(deserialier, kvss) // Endpoint service resolverService, err := endpoint.NewResolverService( - c, + configService, view.GetEndpointService(sp), ) if err != nil { @@ -52,12 +61,13 @@ func (d *Driver) New(sp view.ServiceProvider, network string, defaultNetwork boo // Local MSP Manager mspService := msp.NewLocalMSPManager( - sp, - c, + configService, + kvss, sigService, view.GetEndpointService(sp), view.GetIdentityProvider(sp).DefaultIdentity(), - c.MSPCacheSize(), + deserialier, + configService.MSPCacheSize(), ) if err := mspService.Load(); err != nil { return nil, errors.Wrap(err, "failed loading local msp service") @@ -71,7 +81,16 @@ func (d *Driver) New(sp view.ServiceProvider, network string, defaultNetwork boo // New Network metrics := metrics2.NewMetrics(metrics3.GetProvider(sp)) - net, err := generic.NewNetwork(sp, network, c, idProvider, mspService, sigService, metrics, generic.NewChannel) + net, err := generic.NewNetwork( + sp, + network, + configService, + idProvider, + mspService, + sigService, + metrics, + generic.NewChannel, + ) if err != nil { return nil, errors.Wrap(err, "failed instantiating fabric service provider") } @@ -85,3 +104,11 @@ func (d *Driver) New(sp view.ServiceProvider, network string, defaultNetwork boo func init() { core.Register("generic", &Driver{}) } + +func DeserializerManager(sp view.ServiceProvider) mspdriver.DeserializerManager { + dm, err := sp.GetService(reflect.TypeOf((*mspdriver.DeserializerManager)(nil))) + if err != nil { + panic(fmt.Sprintf("failed looking up deserializer manager [%s]", err)) + } + return dm.(mspdriver.DeserializerManager) +} diff --git a/platform/fabric/core/generic/endpoint/resolver.go b/platform/fabric/core/generic/endpoint/resolver.go index b28005268..7d19fd93c 100644 --- a/platform/fabric/core/generic/endpoint/resolver.go +++ b/platform/fabric/core/generic/endpoint/resolver.go @@ -42,14 +42,19 @@ type Service interface { AddPublicKeyExtractor(extractor view2.PublicKeyExtractor) error } +type Config interface { + Resolvers() ([]config.Resolver, error) + TranslatePath(path string) string +} + type ResolverService struct { - config *config.Config + config Config service Service resolvers []*Resolver } // NewResolverService returns a new instance of the view-sdk endpoint resolverService -func NewResolverService(config *config.Config, service Service) (*ResolverService, error) { +func NewResolverService(config Config, service Service) (*ResolverService, error) { er := &ResolverService{ config: config, service: service, diff --git a/platform/fabric/core/generic/fabricutils/utils.go b/platform/fabric/core/generic/fabricutils/utils.go index 3cdcc355b..d7d7ea00f 100644 --- a/platform/fabric/core/generic/fabricutils/utils.go +++ b/platform/fabric/core/generic/fabricutils/utils.go @@ -28,6 +28,23 @@ type SerializableSigner interface { Serialize() ([]byte, error) } +func UnmarshalTx(tx []byte) (*common.Envelope, *common.Payload, *common.ChannelHeader, error) { + env, err := protoutil.UnmarshalEnvelope(tx) + if err != nil { + + return nil, nil, nil, errors.Wrap(err, "Error getting tx from block") + } + payl, err := protoutil.UnmarshalPayload(env.Payload) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "unmarshal payload failed") + } + chdr, err := protoutil.UnmarshalChannelHeader(payl.Header.ChannelHeader) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "unmarshal channel header failed") + } + return env, payl, chdr, nil +} + // CreateEndorserSignedTX assembles an Envelope message from proposal, endorsements, // and a signer. This function should be called by a client when it has // collected enough endorsements for a proposal to create a transaction and diff --git a/platform/fabric/core/generic/finality.go b/platform/fabric/core/generic/finality.go deleted file mode 100644 index e35ea639e..000000000 --- a/platform/fabric/core/generic/finality.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package generic - -import ( - "context" - - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" -) - -func (c *Channel) IsFinal(ctx context.Context, txID string) error { - if ctx == nil { - ctx = context.Background() - } - return c.Finality.IsFinal(ctx, txID) -} - -func (c *Channel) IsFinalForParties(txID string, parties ...view.Identity) error { - return c.Finality.IsFinalForParties(txID, parties...) -} diff --git a/platform/fabric/core/generic/finality/fabric.go b/platform/fabric/core/generic/finality/fabric.go index d8b13cb60..fcdc307f2 100644 --- a/platform/fabric/core/generic/finality/fabric.go +++ b/platform/fabric/core/generic/finality/fabric.go @@ -10,23 +10,20 @@ import ( "context" "time" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/delivery" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" ab "github.com/hyperledger/fabric-protos-go/orderer" "github.com/pkg/errors" "go.uber.org/zap/zapcore" ) -type Network interface { - Name() string - PickPeer(driver.PeerFunctionType) *grpc.ConnectionConfig - LocalMembership() driver.LocalMembership - Channel(id string) (driver.Channel, error) - IdentityProvider() driver.IdentityProvider - Config() *config.Config +var logger = flogging.MustGetLogger("fabric-sdk.core") + +type PeerManager interface { + NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer.Client, error) } type Hasher interface { @@ -34,22 +31,33 @@ type Hasher interface { } type FabricFinality struct { - channel string - network Network - hasher Hasher - waitForEventTimeout time.Duration + channel string + ConfigService driver.ConfigService + PeerManager PeerManager + DefaultSigningIdentity driver.SigningIdentity + hasher Hasher + waitForEventTimeout time.Duration } -func NewFabricFinality(channel string, network Network, hasher Hasher, waitForEventTimeout time.Duration) (*FabricFinality, error) { +func NewFabricFinality( + channel string, + ConfigService driver.ConfigService, + PeerManager PeerManager, + DefaultSigningIdentity driver.SigningIdentity, + hasher Hasher, + waitForEventTimeout time.Duration, +) (*FabricFinality, error) { if len(channel) == 0 { return nil, errors.Errorf("expected a channel, got empty string") } d := &FabricFinality{ - channel: channel, - network: network, - hasher: hasher, - waitForEventTimeout: waitForEventTimeout, + channel: channel, + ConfigService: ConfigService, + PeerManager: PeerManager, + DefaultSigningIdentity: DefaultSigningIdentity, + hasher: hasher, + waitForEventTimeout: waitForEventTimeout, } return d, nil @@ -63,11 +71,7 @@ func (d *FabricFinality) IsFinal(txID string, address string) error { var ctx context.Context var cancelFunc context.CancelFunc - ch, err := d.network.Channel(d.channel) - if err != nil { - return errors.WithMessagef(err, "failed connecting to channel [%s]", d.channel) - } - client, err := ch.NewPeerClientForAddress(*d.network.PickPeer(driver.PeerForFinality)) + client, err := d.PeerManager.NewPeerClientForAddress(*d.ConfigService.PickPeer(driver.PeerForFinality)) if err != nil { return errors.WithMessagef(err, "failed creating peer client for address [%s]", address) } @@ -87,7 +91,7 @@ func (d *FabricFinality) IsFinal(txID string, address string) error { blockEnvelope, err := delivery.CreateDeliverEnvelope( d.channel, - d.network.LocalMembership().DefaultSigningIdentity(), + d.DefaultSigningIdentity, deliverClient.Certificate(), d.hasher, &ab.SeekPosition{ diff --git a/platform/fabric/core/generic/finality/finality.go b/platform/fabric/core/generic/finality/finality.go deleted file mode 100644 index b56e57f85..000000000 --- a/platform/fabric/core/generic/finality/finality.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package finality - -import ( - "context" - - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" - - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" - "go.uber.org/zap/zapcore" -) - -var logger = flogging.MustGetLogger("fabric-sdk.finality") - -type Config interface { - TLSEnabled() bool -} - -type Committer interface { - // IsFinal takes in input a transaction id and waits for its confirmation. - IsFinal(ctx context.Context, txID string) error -} - -type Finality struct { - channel string - network Network - sp view2.ServiceProvider - committer Committer - TLSEnabled bool - channelConfig *config.Channel -} - -func NewService(sp view2.ServiceProvider, network Network, channelConfig *config.Channel, committer Committer) (*Finality, error) { - return &Finality{ - sp: sp, - network: network, - committer: committer, - channel: channelConfig.Name, - channelConfig: channelConfig, - TLSEnabled: true, - }, nil -} - -func (f *Finality) IsFinal(ctx context.Context, txID string) error { - if ctx == nil { - ctx = context.Background() - } - return f.committer.IsFinal(ctx, txID) -} - -func (f *Finality) IsFinalForParties(txID string, parties ...view.Identity) error { - if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("Is [%s] final for parties [%v]?", txID, parties) - } - - for _, party := range parties { - _, err := view2.GetManager(f.sp).InitiateView( - NewIsFinalInitiatorView( - f.network.Config().Name(), f.channel, txID, party, - f.channelConfig.FinalityForPartiesWaitTimeout(), - ), - ) - if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("Is [%s] final on [%s]: [%s]?", txID, party, err) - } - if err != nil { - return err - } - } - return nil -} diff --git a/platform/fabric/core/generic/finality/view.go b/platform/fabric/core/generic/finality/view.go deleted file mode 100644 index 1b18191a7..000000000 --- a/platform/fabric/core/generic/finality/view.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package finality - -import ( - "time" - - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/session" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" - "github.com/pkg/errors" -) - -type IsFinalRequest struct { - Network string - Channel string - TxID string - Timeout time.Duration -} - -type IsFinalResponse struct { - Err error -} - -type IsFinalInitiatorView struct { - request *IsFinalRequest - recipient view.Identity -} - -func NewIsFinalInitiatorView(network, channel, txID string, recipient view.Identity, timeout time.Duration) *IsFinalInitiatorView { - return &IsFinalInitiatorView{request: &IsFinalRequest{ - Network: network, Channel: channel, TxID: txID, - Timeout: timeout, - }, recipient: recipient} -} - -func (i *IsFinalInitiatorView) Call(context view.Context) (interface{}, error) { - session, err := session.NewJSON(context, i, i.recipient) - if err != nil { - return nil, errors.Wrapf(err, "failed to create session to [%s]", i.recipient) - } - if err := session.Send(i.request); err != nil { - return nil, errors.Wrapf(err, "failed to send request to [%s]", i.recipient) - } - response := &IsFinalResponse{} - if err := session.ReceiveWithTimeout(response, i.request.Timeout); err != nil { - return nil, errors.Wrapf(err, "failed to receive response from [%s]", i.recipient) - } - return nil, response.Err -} diff --git a/platform/fabric/core/generic/id/info_test.go b/platform/fabric/core/generic/id/info_test.go index 8d2208f5a..a5bbf9f5b 100644 --- a/platform/fabric/core/generic/id/info_test.go +++ b/platform/fabric/core/generic/id/info_test.go @@ -10,9 +10,10 @@ import ( "strings" "testing" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/sig" + idemix2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/idemix" x5092 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/x509" - sig2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/core/sig" _ "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/db/driver/memory" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs/mock" @@ -27,13 +28,13 @@ func TestInfoIdemix(t *testing.T) { kvss, err := kvs.NewWithConfig(registry, "memory", "", &mock.ConfigProvider{}) assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - sigService := sig2.NewSignService(registry, nil, kvss) + sigService := sig.NewService(sig.NewMultiplexDeserializer(), kvss) assert.NoError(t, registry.RegisterService(sigService)) config, err := msp2.GetLocalMspConfigWithType("./testdata/idemix", nil, "idemix", "idemix") assert.NoError(t, err) - p, err := idemix2.NewProviderWithEidRhNymPolicy(config, registry) + p, err := idemix2.NewProviderWithEidRhNymPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p) diff --git a/platform/fabric/core/generic/ledger.go b/platform/fabric/core/generic/ledger.go index cf816aab5..5438c5f30 100644 --- a/platform/fabric/core/generic/ledger.go +++ b/platform/fabric/core/generic/ledger.go @@ -8,55 +8,68 @@ package generic import ( "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/transaction" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" "github.com/hyperledger/fabric-protos-go/common" "github.com/hyperledger/fabric-protos-go/peer" - "github.com/hyperledger/fabric/protoutil" + "github.com/pkg/errors" ) -// NewRWSet returns a RWSet for this ledger. -// A client may obtain more than one such simulator; they are made unique -// by way of the supplied txid -func (c *Channel) NewRWSet(txid string) (driver.RWSet, error) { - return c.Vault.NewRWSet(txid) +type Ledger struct { + ChannelName string + ChaincodeManager driver.ChaincodeManager + LocalMembership driver.LocalMembership + ConfigService driver.ConfigService } -// GetRWSet returns a RWSet for this ledger whose content is unmarshalled -// from the passed bytes. -// A client may obtain more than one such simulator; they are made unique -// by way of the supplied txid -func (c *Channel) GetRWSet(txid string, rwset []byte) (driver.RWSet, error) { - return c.Vault.GetRWSet(txid, rwset) +func NewLedger( + channelName string, + chaincodeManager driver.ChaincodeManager, + localMembership driver.LocalMembership, + configService driver.ConfigService, +) *Ledger { + return &Ledger{ChannelName: channelName, ChaincodeManager: chaincodeManager, LocalMembership: localMembership, ConfigService: configService} } -// GetEphemeralRWSet returns an ephemeral RWSet for this ledger whose content is unmarshalled -// from the passed bytes. -// If namespaces is not empty, the returned RWSet will be filtered by the passed namespaces -func (c *Channel) GetEphemeralRWSet(rwset []byte, namespaces ...string) (driver.RWSet, error) { - return c.Vault.InspectRWSet(rwset, namespaces...) +func (c *Ledger) GetTransactionByID(txID string) (driver.ProcessedTransaction, error) { + pt := &peer.ProcessedTransaction{} + if err := c.queryChaincode(GetTransactionByID, txID, pt); err != nil { + return nil, err + } + return transaction.NewProcessedTransaction(pt) } -// NewQueryExecutor gives handle to a query executor. -// A client can obtain more than one 'QueryExecutor's for parallel execution. -// Any synchronization should be performed at the implementation level if required -func (c *Channel) NewQueryExecutor() (driver.QueryExecutor, error) { - return c.Vault.NewQueryExecutor() +func (c *Ledger) GetBlockNumberByTxID(txID string) (uint64, error) { + block := &common.Block{} + if err := c.queryChaincode(GetBlockByTxID, txID, block); err != nil { + return 0, err + } + return block.Header.Number, nil } // GetBlockByNumber fetches a block by number -func (c *Channel) GetBlockByNumber(number uint64) (driver.Block, error) { - res, err := c.Chaincode("qscc").NewInvocation(GetBlockByNumber, c.ChannelName, number).WithSignerIdentity( - c.Network.LocalMembership().DefaultIdentity(), - ).WithEndorsersByConnConfig(c.Network.PickPeer(driver.PeerForQuery)).Query() - if err != nil { +func (c *Ledger) GetBlockByNumber(number uint64) (driver.Block, error) { + block := &common.Block{} + if err := c.queryChaincode(GetBlockByNumber, number, block); err != nil { return nil, err } + return &Block{Block: block}, nil +} - b, err := protoutil.UnmarshalBlock(res) +func (c *Ledger) queryChaincode(function string, param any, result proto.Message) error { + raw, err := c.ChaincodeManager.Chaincode("qscc"). + NewInvocation(function, c.ChannelName, param). + WithSignerIdentity(c.LocalMembership.DefaultIdentity()). + WithEndorsersByConnConfig(c.ConfigService.PickPeer(driver.PeerForQuery)). + Query() if err != nil { - return nil, err + return errors.Wrap(err, "query chaincode failed") + } + + if err := proto.Unmarshal(raw, result); err != nil { + return errors.Wrap(err, "unmashal failed") } - return &Block{Block: b}, nil + return nil } // Block wraps a Fabric block @@ -75,7 +88,7 @@ func (b *Block) ProcessedTransaction(i int) (driver.ProcessedTransaction, error) if err := proto.Unmarshal(b.Data.Data[i], env); err != nil { return nil, err } - return newProcessedTransaction(&peer.ProcessedTransaction{ + return transaction.NewProcessedTransaction(&peer.ProcessedTransaction{ TransactionEnvelope: env, ValidationCode: int32(b.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER][i]), }) diff --git a/platform/fabric/core/generic/membership.go b/platform/fabric/core/generic/membership.go deleted file mode 100644 index f8df1a9f9..000000000 --- a/platform/fabric/core/generic/membership.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package generic - -import ( - "github.com/hyperledger/fabric/msp" - - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" -) - -// GetMSPIDs retrieves the MSP IDs of the organizations in the current Channel -// configuration. -func (c *Channel) GetMSPIDs() []string { - ac, ok := c.Resources().ApplicationConfig() - if !ok || ac.Organizations() == nil { - return nil - } - - var mspIDs []string - for _, org := range ac.Organizations() { - mspIDs = append(mspIDs, org.MSPID()) - } - - return mspIDs -} - -// MSPManager returns the msp.MSPManager that reflects the current Channel -// configuration. Users should not memoize references to this object. -func (c *Channel) MSPManager() driver.MSPManager { - return &mspManager{MSPManager: c.Resources().MSPManager()} -} - -type MSPManager interface { - DeserializeIdentity(serializedIdentity []byte) (msp.Identity, error) -} - -type mspManager struct { - MSPManager -} - -func (m *mspManager) DeserializeIdentity(serializedIdentity []byte) (driver.MSPIdentity, error) { - return m.MSPManager.DeserializeIdentity(serializedIdentity) -} diff --git a/platform/fabric/core/generic/membership/membership.go b/platform/fabric/core/generic/membership/membership.go new file mode 100644 index 000000000..a1b27348e --- /dev/null +++ b/platform/fabric/core/generic/membership/membership.go @@ -0,0 +1,90 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package membership + +import ( + "sync" + + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + api2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" + "github.com/hyperledger/fabric/common/channelconfig" + "github.com/hyperledger/fabric/msp" + "github.com/pkg/errors" +) + +type Service struct { + // ResourcesApplyLock is used to serialize calls to CommitConfig and bundle update processing. + ResourcesApplyLock sync.Mutex + // ResourcesLock is used to serialize access to resources + ResourcesLock sync.RWMutex + // resources is used to acquire configuration bundle resources. + ChannelResources channelconfig.Resources +} + +func NewService() *Service { + return &Service{} +} + +// Resources returns the active Channel configuration bundle. +func (c *Service) Resources() channelconfig.Resources { + c.ResourcesLock.RLock() + res := c.ChannelResources + c.ResourcesLock.RUnlock() + return res +} + +func (c *Service) IsValid(identity view.Identity) error { + id, err := c.Resources().MSPManager().DeserializeIdentity(identity) + if err != nil { + return errors.Wrapf(err, "failed deserializing identity [%s]", identity.String()) + } + + return id.Validate() +} + +func (c *Service) GetVerifier(identity view.Identity) (api2.Verifier, error) { + id, err := c.Resources().MSPManager().DeserializeIdentity(identity) + if err != nil { + return nil, errors.Wrapf(err, "failed deserializing identity [%s]", identity.String()) + } + return id, nil +} + +// GetMSPIDs retrieves the MSP IDs of the organizations in the current Channel +// configuration. +func (c *Service) GetMSPIDs() []string { + ac, ok := c.Resources().ApplicationConfig() + if !ok || ac.Organizations() == nil { + return nil + } + + var mspIDs []string + for _, org := range ac.Organizations() { + mspIDs = append(mspIDs, org.MSPID()) + } + + return mspIDs +} + +// MSPManager returns the msp.MSPManager that reflects the current Channel +// configuration. Users should not memoize references to this object. +func (c *Service) MSPManager() driver.MSPManager { + return &mspManager{FabricMSPManager: c.Resources().MSPManager()} +} + +type FabricMSPManager interface { + DeserializeIdentity(serializedIdentity []byte) (msp.Identity, error) +} + +type mspManager struct { + FabricMSPManager +} + +func (m *mspManager) DeserializeIdentity(serializedIdentity []byte) (driver.MSPIdentity, error) { + return m.FabricMSPManager.DeserializeIdentity(serializedIdentity) +} diff --git a/platform/fabric/core/generic/msp/driver/driver.go b/platform/fabric/core/generic/msp/driver/driver.go index 792bedff1..0330ed4f9 100644 --- a/platform/fabric/core/generic/msp/driver/driver.go +++ b/platform/fabric/core/generic/msp/driver/driver.go @@ -8,10 +8,8 @@ package driver import ( "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" - fdriver "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/core/sig" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/sig" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) @@ -19,24 +17,28 @@ type MSP struct { Name string `yaml:"name,omitempty"` Type string `yaml:"type,omitempty"` EnrollmentID string - GetIdentity fdriver.GetIdentityFunc + GetIdentity driver.GetIdentityFunc } type Config interface { - Name() string + NetworkName() string DefaultMSP() string MSPs() ([]config.MSP, error) TranslatePath(path string) string } type SignerService interface { - RegisterSigner(identity view.Identity, signer fdriver.Signer, verifier fdriver.Verifier) error + RegisterSigner(identity view.Identity, signer driver.Signer, verifier driver.Verifier) error + IsMe(id view.Identity) bool } type BinderService interface { Bind(longTerm view.Identity, ephemeral view.Identity) error + GetIdentity(label string, pkiID []byte) (view.Identity, error) } +//go:generate counterfeiter -o mock/config_provider.go -fake-name ConfigProvider . ConfigProvider + type ConfigProvider interface { driver.ConfigService } @@ -47,11 +49,10 @@ type DeserializerManager interface { type Manager interface { AddDeserializer(deserializer sig.Deserializer) - AddMSP(name string, mspType string, enrollmentID string, idGetter fdriver.GetIdentityFunc) + AddMSP(name string, mspType string, enrollmentID string, idGetter driver.GetIdentityFunc) Config() Config DefaultMSP() string SignerService() SignerService - ServiceProvider() view2.ServiceProvider CacheSize() int SetDefaultIdentity(id string, defaultIdentity view.Identity, defaultSigningIdentity SigningIdentity) } diff --git a/platform/fabric/core/generic/msp/driver/mock/config_provider.go b/platform/fabric/core/generic/msp/driver/mock/config_provider.go new file mode 100644 index 000000000..2c65b8a7e --- /dev/null +++ b/platform/fabric/core/generic/msp/driver/mock/config_provider.go @@ -0,0 +1,2383 @@ +// Code generated by counterfeiter. DO NOT EDIT. +package mock + +import ( + "sync" + "time" + + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/driver" + drivera "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" +) + +type ConfigProvider struct { + BroadcastNumRetriesStub func() int + broadcastNumRetriesMutex sync.RWMutex + broadcastNumRetriesArgsForCall []struct { + } + broadcastNumRetriesReturns struct { + result1 int + } + broadcastNumRetriesReturnsOnCall map[int]struct { + result1 int + } + BroadcastRetryIntervalStub func() time.Duration + broadcastRetryIntervalMutex sync.RWMutex + broadcastRetryIntervalArgsForCall []struct { + } + broadcastRetryIntervalReturns struct { + result1 time.Duration + } + broadcastRetryIntervalReturnsOnCall map[int]struct { + result1 time.Duration + } + ChannelStub func(string) drivera.ChannelConfig + channelMutex sync.RWMutex + channelArgsForCall []struct { + arg1 string + } + channelReturns struct { + result1 drivera.ChannelConfig + } + channelReturnsOnCall map[int]struct { + result1 drivera.ChannelConfig + } + ChannelIDsStub func() []string + channelIDsMutex sync.RWMutex + channelIDsArgsForCall []struct { + } + channelIDsReturns struct { + result1 []string + } + channelIDsReturnsOnCall map[int]struct { + result1 []string + } + ClientConnTimeoutStub func() time.Duration + clientConnTimeoutMutex sync.RWMutex + clientConnTimeoutArgsForCall []struct { + } + clientConnTimeoutReturns struct { + result1 time.Duration + } + clientConnTimeoutReturnsOnCall map[int]struct { + result1 time.Duration + } + ConfigFileUsedStub func() string + configFileUsedMutex sync.RWMutex + configFileUsedArgsForCall []struct { + } + configFileUsedReturns struct { + result1 string + } + configFileUsedReturnsOnCall map[int]struct { + result1 string + } + DefaultChannelStub func() string + defaultChannelMutex sync.RWMutex + defaultChannelArgsForCall []struct { + } + defaultChannelReturns struct { + result1 string + } + defaultChannelReturnsOnCall map[int]struct { + result1 string + } + GetBoolStub func(string) bool + getBoolMutex sync.RWMutex + getBoolArgsForCall []struct { + arg1 string + } + getBoolReturns struct { + result1 bool + } + getBoolReturnsOnCall map[int]struct { + result1 bool + } + GetDurationStub func(string) time.Duration + getDurationMutex sync.RWMutex + getDurationArgsForCall []struct { + arg1 string + } + getDurationReturns struct { + result1 time.Duration + } + getDurationReturnsOnCall map[int]struct { + result1 time.Duration + } + GetIntStub func(string) int + getIntMutex sync.RWMutex + getIntArgsForCall []struct { + arg1 string + } + getIntReturns struct { + result1 int + } + getIntReturnsOnCall map[int]struct { + result1 int + } + GetPathStub func(string) string + getPathMutex sync.RWMutex + getPathArgsForCall []struct { + arg1 string + } + getPathReturns struct { + result1 string + } + getPathReturnsOnCall map[int]struct { + result1 string + } + GetStringStub func(string) string + getStringMutex sync.RWMutex + getStringArgsForCall []struct { + arg1 string + } + getStringReturns struct { + result1 string + } + getStringReturnsOnCall map[int]struct { + result1 string + } + GetStringSliceStub func(string) []string + getStringSliceMutex sync.RWMutex + getStringSliceArgsForCall []struct { + arg1 string + } + getStringSliceReturns struct { + result1 []string + } + getStringSliceReturnsOnCall map[int]struct { + result1 []string + } + IsChannelQuietStub func(string) bool + isChannelQuietMutex sync.RWMutex + isChannelQuietArgsForCall []struct { + arg1 string + } + isChannelQuietReturns struct { + result1 bool + } + isChannelQuietReturnsOnCall map[int]struct { + result1 bool + } + IsSetStub func(string) bool + isSetMutex sync.RWMutex + isSetArgsForCall []struct { + arg1 string + } + isSetReturns struct { + result1 bool + } + isSetReturnsOnCall map[int]struct { + result1 bool + } + KeepAliveClientIntervalStub func() time.Duration + keepAliveClientIntervalMutex sync.RWMutex + keepAliveClientIntervalArgsForCall []struct { + } + keepAliveClientIntervalReturns struct { + result1 time.Duration + } + keepAliveClientIntervalReturnsOnCall map[int]struct { + result1 time.Duration + } + KeepAliveClientTimeoutStub func() time.Duration + keepAliveClientTimeoutMutex sync.RWMutex + keepAliveClientTimeoutArgsForCall []struct { + } + keepAliveClientTimeoutReturns struct { + result1 time.Duration + } + keepAliveClientTimeoutReturnsOnCall map[int]struct { + result1 time.Duration + } + NetworkNameStub func() string + networkNameMutex sync.RWMutex + networkNameArgsForCall []struct { + } + networkNameReturns struct { + result1 string + } + networkNameReturnsOnCall map[int]struct { + result1 string + } + NewDefaultChannelConfigStub func(string) drivera.ChannelConfig + newDefaultChannelConfigMutex sync.RWMutex + newDefaultChannelConfigArgsForCall []struct { + arg1 string + } + newDefaultChannelConfigReturns struct { + result1 drivera.ChannelConfig + } + newDefaultChannelConfigReturnsOnCall map[int]struct { + result1 drivera.ChannelConfig + } + OrdererConnectionPoolSizeStub func() int + ordererConnectionPoolSizeMutex sync.RWMutex + ordererConnectionPoolSizeArgsForCall []struct { + } + ordererConnectionPoolSizeReturns struct { + result1 int + } + ordererConnectionPoolSizeReturnsOnCall map[int]struct { + result1 int + } + OrderersStub func() []*grpc.ConnectionConfig + orderersMutex sync.RWMutex + orderersArgsForCall []struct { + } + orderersReturns struct { + result1 []*grpc.ConnectionConfig + } + orderersReturnsOnCall map[int]struct { + result1 []*grpc.ConnectionConfig + } + PickOrdererStub func() *grpc.ConnectionConfig + pickOrdererMutex sync.RWMutex + pickOrdererArgsForCall []struct { + } + pickOrdererReturns struct { + result1 *grpc.ConnectionConfig + } + pickOrdererReturnsOnCall map[int]struct { + result1 *grpc.ConnectionConfig + } + PickPeerStub func(drivera.PeerFunctionType) *grpc.ConnectionConfig + pickPeerMutex sync.RWMutex + pickPeerArgsForCall []struct { + arg1 drivera.PeerFunctionType + } + pickPeerReturns struct { + result1 *grpc.ConnectionConfig + } + pickPeerReturnsOnCall map[int]struct { + result1 *grpc.ConnectionConfig + } + SetConfigOrderersStub func([]*grpc.ConnectionConfig) error + setConfigOrderersMutex sync.RWMutex + setConfigOrderersArgsForCall []struct { + arg1 []*grpc.ConnectionConfig + } + setConfigOrderersReturns struct { + result1 error + } + setConfigOrderersReturnsOnCall map[int]struct { + result1 error + } + TLSClientAuthRequiredStub func() bool + tLSClientAuthRequiredMutex sync.RWMutex + tLSClientAuthRequiredArgsForCall []struct { + } + tLSClientAuthRequiredReturns struct { + result1 bool + } + tLSClientAuthRequiredReturnsOnCall map[int]struct { + result1 bool + } + TLSClientCertFileStub func() string + tLSClientCertFileMutex sync.RWMutex + tLSClientCertFileArgsForCall []struct { + } + tLSClientCertFileReturns struct { + result1 string + } + tLSClientCertFileReturnsOnCall map[int]struct { + result1 string + } + TLSClientKeyFileStub func() string + tLSClientKeyFileMutex sync.RWMutex + tLSClientKeyFileArgsForCall []struct { + } + tLSClientKeyFileReturns struct { + result1 string + } + tLSClientKeyFileReturnsOnCall map[int]struct { + result1 string + } + TLSEnabledStub func() bool + tLSEnabledMutex sync.RWMutex + tLSEnabledArgsForCall []struct { + } + tLSEnabledReturns struct { + result1 bool + } + tLSEnabledReturnsOnCall map[int]struct { + result1 bool + } + TLSServerHostOverrideStub func() string + tLSServerHostOverrideMutex sync.RWMutex + tLSServerHostOverrideArgsForCall []struct { + } + tLSServerHostOverrideReturns struct { + result1 string + } + tLSServerHostOverrideReturnsOnCall map[int]struct { + result1 string + } + TranslatePathStub func(string) string + translatePathMutex sync.RWMutex + translatePathArgsForCall []struct { + arg1 string + } + translatePathReturns struct { + result1 string + } + translatePathReturnsOnCall map[int]struct { + result1 string + } + UnmarshalKeyStub func(string, interface{}) error + unmarshalKeyMutex sync.RWMutex + unmarshalKeyArgsForCall []struct { + arg1 string + arg2 interface{} + } + unmarshalKeyReturns struct { + result1 error + } + unmarshalKeyReturnsOnCall map[int]struct { + result1 error + } + VaultPersistencePrefixStub func() string + vaultPersistencePrefixMutex sync.RWMutex + vaultPersistencePrefixArgsForCall []struct { + } + vaultPersistencePrefixReturns struct { + result1 string + } + vaultPersistencePrefixReturnsOnCall map[int]struct { + result1 string + } + VaultPersistenceTypeStub func() string + vaultPersistenceTypeMutex sync.RWMutex + vaultPersistenceTypeArgsForCall []struct { + } + vaultPersistenceTypeReturns struct { + result1 string + } + vaultPersistenceTypeReturnsOnCall map[int]struct { + result1 string + } + VaultTXStoreCacheSizeStub func() int + vaultTXStoreCacheSizeMutex sync.RWMutex + vaultTXStoreCacheSizeArgsForCall []struct { + } + vaultTXStoreCacheSizeReturns struct { + result1 int + } + vaultTXStoreCacheSizeReturnsOnCall map[int]struct { + result1 int + } + invocations map[string][][]interface{} + invocationsMutex sync.RWMutex +} + +func (fake *ConfigProvider) BroadcastNumRetries() int { + fake.broadcastNumRetriesMutex.Lock() + ret, specificReturn := fake.broadcastNumRetriesReturnsOnCall[len(fake.broadcastNumRetriesArgsForCall)] + fake.broadcastNumRetriesArgsForCall = append(fake.broadcastNumRetriesArgsForCall, struct { + }{}) + stub := fake.BroadcastNumRetriesStub + fakeReturns := fake.broadcastNumRetriesReturns + fake.recordInvocation("BroadcastNumRetries", []interface{}{}) + fake.broadcastNumRetriesMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) BroadcastNumRetriesCallCount() int { + fake.broadcastNumRetriesMutex.RLock() + defer fake.broadcastNumRetriesMutex.RUnlock() + return len(fake.broadcastNumRetriesArgsForCall) +} + +func (fake *ConfigProvider) BroadcastNumRetriesCalls(stub func() int) { + fake.broadcastNumRetriesMutex.Lock() + defer fake.broadcastNumRetriesMutex.Unlock() + fake.BroadcastNumRetriesStub = stub +} + +func (fake *ConfigProvider) BroadcastNumRetriesReturns(result1 int) { + fake.broadcastNumRetriesMutex.Lock() + defer fake.broadcastNumRetriesMutex.Unlock() + fake.BroadcastNumRetriesStub = nil + fake.broadcastNumRetriesReturns = struct { + result1 int + }{result1} +} + +func (fake *ConfigProvider) BroadcastNumRetriesReturnsOnCall(i int, result1 int) { + fake.broadcastNumRetriesMutex.Lock() + defer fake.broadcastNumRetriesMutex.Unlock() + fake.BroadcastNumRetriesStub = nil + if fake.broadcastNumRetriesReturnsOnCall == nil { + fake.broadcastNumRetriesReturnsOnCall = make(map[int]struct { + result1 int + }) + } + fake.broadcastNumRetriesReturnsOnCall[i] = struct { + result1 int + }{result1} +} + +func (fake *ConfigProvider) BroadcastRetryInterval() time.Duration { + fake.broadcastRetryIntervalMutex.Lock() + ret, specificReturn := fake.broadcastRetryIntervalReturnsOnCall[len(fake.broadcastRetryIntervalArgsForCall)] + fake.broadcastRetryIntervalArgsForCall = append(fake.broadcastRetryIntervalArgsForCall, struct { + }{}) + stub := fake.BroadcastRetryIntervalStub + fakeReturns := fake.broadcastRetryIntervalReturns + fake.recordInvocation("BroadcastRetryInterval", []interface{}{}) + fake.broadcastRetryIntervalMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) BroadcastRetryIntervalCallCount() int { + fake.broadcastRetryIntervalMutex.RLock() + defer fake.broadcastRetryIntervalMutex.RUnlock() + return len(fake.broadcastRetryIntervalArgsForCall) +} + +func (fake *ConfigProvider) BroadcastRetryIntervalCalls(stub func() time.Duration) { + fake.broadcastRetryIntervalMutex.Lock() + defer fake.broadcastRetryIntervalMutex.Unlock() + fake.BroadcastRetryIntervalStub = stub +} + +func (fake *ConfigProvider) BroadcastRetryIntervalReturns(result1 time.Duration) { + fake.broadcastRetryIntervalMutex.Lock() + defer fake.broadcastRetryIntervalMutex.Unlock() + fake.BroadcastRetryIntervalStub = nil + fake.broadcastRetryIntervalReturns = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) BroadcastRetryIntervalReturnsOnCall(i int, result1 time.Duration) { + fake.broadcastRetryIntervalMutex.Lock() + defer fake.broadcastRetryIntervalMutex.Unlock() + fake.BroadcastRetryIntervalStub = nil + if fake.broadcastRetryIntervalReturnsOnCall == nil { + fake.broadcastRetryIntervalReturnsOnCall = make(map[int]struct { + result1 time.Duration + }) + } + fake.broadcastRetryIntervalReturnsOnCall[i] = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) Channel(arg1 string) drivera.ChannelConfig { + fake.channelMutex.Lock() + ret, specificReturn := fake.channelReturnsOnCall[len(fake.channelArgsForCall)] + fake.channelArgsForCall = append(fake.channelArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.ChannelStub + fakeReturns := fake.channelReturns + fake.recordInvocation("Channel", []interface{}{arg1}) + fake.channelMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) ChannelCallCount() int { + fake.channelMutex.RLock() + defer fake.channelMutex.RUnlock() + return len(fake.channelArgsForCall) +} + +func (fake *ConfigProvider) ChannelCalls(stub func(string) drivera.ChannelConfig) { + fake.channelMutex.Lock() + defer fake.channelMutex.Unlock() + fake.ChannelStub = stub +} + +func (fake *ConfigProvider) ChannelArgsForCall(i int) string { + fake.channelMutex.RLock() + defer fake.channelMutex.RUnlock() + argsForCall := fake.channelArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) ChannelReturns(result1 drivera.ChannelConfig) { + fake.channelMutex.Lock() + defer fake.channelMutex.Unlock() + fake.ChannelStub = nil + fake.channelReturns = struct { + result1 drivera.ChannelConfig + }{result1} +} + +func (fake *ConfigProvider) ChannelReturnsOnCall(i int, result1 drivera.ChannelConfig) { + fake.channelMutex.Lock() + defer fake.channelMutex.Unlock() + fake.ChannelStub = nil + if fake.channelReturnsOnCall == nil { + fake.channelReturnsOnCall = make(map[int]struct { + result1 drivera.ChannelConfig + }) + } + fake.channelReturnsOnCall[i] = struct { + result1 drivera.ChannelConfig + }{result1} +} + +func (fake *ConfigProvider) ChannelIDs() []string { + fake.channelIDsMutex.Lock() + ret, specificReturn := fake.channelIDsReturnsOnCall[len(fake.channelIDsArgsForCall)] + fake.channelIDsArgsForCall = append(fake.channelIDsArgsForCall, struct { + }{}) + stub := fake.ChannelIDsStub + fakeReturns := fake.channelIDsReturns + fake.recordInvocation("ChannelIDs", []interface{}{}) + fake.channelIDsMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) ChannelIDsCallCount() int { + fake.channelIDsMutex.RLock() + defer fake.channelIDsMutex.RUnlock() + return len(fake.channelIDsArgsForCall) +} + +func (fake *ConfigProvider) ChannelIDsCalls(stub func() []string) { + fake.channelIDsMutex.Lock() + defer fake.channelIDsMutex.Unlock() + fake.ChannelIDsStub = stub +} + +func (fake *ConfigProvider) ChannelIDsReturns(result1 []string) { + fake.channelIDsMutex.Lock() + defer fake.channelIDsMutex.Unlock() + fake.ChannelIDsStub = nil + fake.channelIDsReturns = struct { + result1 []string + }{result1} +} + +func (fake *ConfigProvider) ChannelIDsReturnsOnCall(i int, result1 []string) { + fake.channelIDsMutex.Lock() + defer fake.channelIDsMutex.Unlock() + fake.ChannelIDsStub = nil + if fake.channelIDsReturnsOnCall == nil { + fake.channelIDsReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.channelIDsReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *ConfigProvider) ClientConnTimeout() time.Duration { + fake.clientConnTimeoutMutex.Lock() + ret, specificReturn := fake.clientConnTimeoutReturnsOnCall[len(fake.clientConnTimeoutArgsForCall)] + fake.clientConnTimeoutArgsForCall = append(fake.clientConnTimeoutArgsForCall, struct { + }{}) + stub := fake.ClientConnTimeoutStub + fakeReturns := fake.clientConnTimeoutReturns + fake.recordInvocation("ClientConnTimeout", []interface{}{}) + fake.clientConnTimeoutMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) ClientConnTimeoutCallCount() int { + fake.clientConnTimeoutMutex.RLock() + defer fake.clientConnTimeoutMutex.RUnlock() + return len(fake.clientConnTimeoutArgsForCall) +} + +func (fake *ConfigProvider) ClientConnTimeoutCalls(stub func() time.Duration) { + fake.clientConnTimeoutMutex.Lock() + defer fake.clientConnTimeoutMutex.Unlock() + fake.ClientConnTimeoutStub = stub +} + +func (fake *ConfigProvider) ClientConnTimeoutReturns(result1 time.Duration) { + fake.clientConnTimeoutMutex.Lock() + defer fake.clientConnTimeoutMutex.Unlock() + fake.ClientConnTimeoutStub = nil + fake.clientConnTimeoutReturns = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) ClientConnTimeoutReturnsOnCall(i int, result1 time.Duration) { + fake.clientConnTimeoutMutex.Lock() + defer fake.clientConnTimeoutMutex.Unlock() + fake.ClientConnTimeoutStub = nil + if fake.clientConnTimeoutReturnsOnCall == nil { + fake.clientConnTimeoutReturnsOnCall = make(map[int]struct { + result1 time.Duration + }) + } + fake.clientConnTimeoutReturnsOnCall[i] = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) ConfigFileUsed() string { + fake.configFileUsedMutex.Lock() + ret, specificReturn := fake.configFileUsedReturnsOnCall[len(fake.configFileUsedArgsForCall)] + fake.configFileUsedArgsForCall = append(fake.configFileUsedArgsForCall, struct { + }{}) + stub := fake.ConfigFileUsedStub + fakeReturns := fake.configFileUsedReturns + fake.recordInvocation("ConfigFileUsed", []interface{}{}) + fake.configFileUsedMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) ConfigFileUsedCallCount() int { + fake.configFileUsedMutex.RLock() + defer fake.configFileUsedMutex.RUnlock() + return len(fake.configFileUsedArgsForCall) +} + +func (fake *ConfigProvider) ConfigFileUsedCalls(stub func() string) { + fake.configFileUsedMutex.Lock() + defer fake.configFileUsedMutex.Unlock() + fake.ConfigFileUsedStub = stub +} + +func (fake *ConfigProvider) ConfigFileUsedReturns(result1 string) { + fake.configFileUsedMutex.Lock() + defer fake.configFileUsedMutex.Unlock() + fake.ConfigFileUsedStub = nil + fake.configFileUsedReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) ConfigFileUsedReturnsOnCall(i int, result1 string) { + fake.configFileUsedMutex.Lock() + defer fake.configFileUsedMutex.Unlock() + fake.ConfigFileUsedStub = nil + if fake.configFileUsedReturnsOnCall == nil { + fake.configFileUsedReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.configFileUsedReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) DefaultChannel() string { + fake.defaultChannelMutex.Lock() + ret, specificReturn := fake.defaultChannelReturnsOnCall[len(fake.defaultChannelArgsForCall)] + fake.defaultChannelArgsForCall = append(fake.defaultChannelArgsForCall, struct { + }{}) + stub := fake.DefaultChannelStub + fakeReturns := fake.defaultChannelReturns + fake.recordInvocation("DefaultChannel", []interface{}{}) + fake.defaultChannelMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) DefaultChannelCallCount() int { + fake.defaultChannelMutex.RLock() + defer fake.defaultChannelMutex.RUnlock() + return len(fake.defaultChannelArgsForCall) +} + +func (fake *ConfigProvider) DefaultChannelCalls(stub func() string) { + fake.defaultChannelMutex.Lock() + defer fake.defaultChannelMutex.Unlock() + fake.DefaultChannelStub = stub +} + +func (fake *ConfigProvider) DefaultChannelReturns(result1 string) { + fake.defaultChannelMutex.Lock() + defer fake.defaultChannelMutex.Unlock() + fake.DefaultChannelStub = nil + fake.defaultChannelReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) DefaultChannelReturnsOnCall(i int, result1 string) { + fake.defaultChannelMutex.Lock() + defer fake.defaultChannelMutex.Unlock() + fake.DefaultChannelStub = nil + if fake.defaultChannelReturnsOnCall == nil { + fake.defaultChannelReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.defaultChannelReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) GetBool(arg1 string) bool { + fake.getBoolMutex.Lock() + ret, specificReturn := fake.getBoolReturnsOnCall[len(fake.getBoolArgsForCall)] + fake.getBoolArgsForCall = append(fake.getBoolArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetBoolStub + fakeReturns := fake.getBoolReturns + fake.recordInvocation("GetBool", []interface{}{arg1}) + fake.getBoolMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) GetBoolCallCount() int { + fake.getBoolMutex.RLock() + defer fake.getBoolMutex.RUnlock() + return len(fake.getBoolArgsForCall) +} + +func (fake *ConfigProvider) GetBoolCalls(stub func(string) bool) { + fake.getBoolMutex.Lock() + defer fake.getBoolMutex.Unlock() + fake.GetBoolStub = stub +} + +func (fake *ConfigProvider) GetBoolArgsForCall(i int) string { + fake.getBoolMutex.RLock() + defer fake.getBoolMutex.RUnlock() + argsForCall := fake.getBoolArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) GetBoolReturns(result1 bool) { + fake.getBoolMutex.Lock() + defer fake.getBoolMutex.Unlock() + fake.GetBoolStub = nil + fake.getBoolReturns = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) GetBoolReturnsOnCall(i int, result1 bool) { + fake.getBoolMutex.Lock() + defer fake.getBoolMutex.Unlock() + fake.GetBoolStub = nil + if fake.getBoolReturnsOnCall == nil { + fake.getBoolReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.getBoolReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) GetDuration(arg1 string) time.Duration { + fake.getDurationMutex.Lock() + ret, specificReturn := fake.getDurationReturnsOnCall[len(fake.getDurationArgsForCall)] + fake.getDurationArgsForCall = append(fake.getDurationArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetDurationStub + fakeReturns := fake.getDurationReturns + fake.recordInvocation("GetDuration", []interface{}{arg1}) + fake.getDurationMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) GetDurationCallCount() int { + fake.getDurationMutex.RLock() + defer fake.getDurationMutex.RUnlock() + return len(fake.getDurationArgsForCall) +} + +func (fake *ConfigProvider) GetDurationCalls(stub func(string) time.Duration) { + fake.getDurationMutex.Lock() + defer fake.getDurationMutex.Unlock() + fake.GetDurationStub = stub +} + +func (fake *ConfigProvider) GetDurationArgsForCall(i int) string { + fake.getDurationMutex.RLock() + defer fake.getDurationMutex.RUnlock() + argsForCall := fake.getDurationArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) GetDurationReturns(result1 time.Duration) { + fake.getDurationMutex.Lock() + defer fake.getDurationMutex.Unlock() + fake.GetDurationStub = nil + fake.getDurationReturns = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) GetDurationReturnsOnCall(i int, result1 time.Duration) { + fake.getDurationMutex.Lock() + defer fake.getDurationMutex.Unlock() + fake.GetDurationStub = nil + if fake.getDurationReturnsOnCall == nil { + fake.getDurationReturnsOnCall = make(map[int]struct { + result1 time.Duration + }) + } + fake.getDurationReturnsOnCall[i] = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) GetInt(arg1 string) int { + fake.getIntMutex.Lock() + ret, specificReturn := fake.getIntReturnsOnCall[len(fake.getIntArgsForCall)] + fake.getIntArgsForCall = append(fake.getIntArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetIntStub + fakeReturns := fake.getIntReturns + fake.recordInvocation("GetInt", []interface{}{arg1}) + fake.getIntMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) GetIntCallCount() int { + fake.getIntMutex.RLock() + defer fake.getIntMutex.RUnlock() + return len(fake.getIntArgsForCall) +} + +func (fake *ConfigProvider) GetIntCalls(stub func(string) int) { + fake.getIntMutex.Lock() + defer fake.getIntMutex.Unlock() + fake.GetIntStub = stub +} + +func (fake *ConfigProvider) GetIntArgsForCall(i int) string { + fake.getIntMutex.RLock() + defer fake.getIntMutex.RUnlock() + argsForCall := fake.getIntArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) GetIntReturns(result1 int) { + fake.getIntMutex.Lock() + defer fake.getIntMutex.Unlock() + fake.GetIntStub = nil + fake.getIntReturns = struct { + result1 int + }{result1} +} + +func (fake *ConfigProvider) GetIntReturnsOnCall(i int, result1 int) { + fake.getIntMutex.Lock() + defer fake.getIntMutex.Unlock() + fake.GetIntStub = nil + if fake.getIntReturnsOnCall == nil { + fake.getIntReturnsOnCall = make(map[int]struct { + result1 int + }) + } + fake.getIntReturnsOnCall[i] = struct { + result1 int + }{result1} +} + +func (fake *ConfigProvider) GetPath(arg1 string) string { + fake.getPathMutex.Lock() + ret, specificReturn := fake.getPathReturnsOnCall[len(fake.getPathArgsForCall)] + fake.getPathArgsForCall = append(fake.getPathArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetPathStub + fakeReturns := fake.getPathReturns + fake.recordInvocation("GetPath", []interface{}{arg1}) + fake.getPathMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) GetPathCallCount() int { + fake.getPathMutex.RLock() + defer fake.getPathMutex.RUnlock() + return len(fake.getPathArgsForCall) +} + +func (fake *ConfigProvider) GetPathCalls(stub func(string) string) { + fake.getPathMutex.Lock() + defer fake.getPathMutex.Unlock() + fake.GetPathStub = stub +} + +func (fake *ConfigProvider) GetPathArgsForCall(i int) string { + fake.getPathMutex.RLock() + defer fake.getPathMutex.RUnlock() + argsForCall := fake.getPathArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) GetPathReturns(result1 string) { + fake.getPathMutex.Lock() + defer fake.getPathMutex.Unlock() + fake.GetPathStub = nil + fake.getPathReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) GetPathReturnsOnCall(i int, result1 string) { + fake.getPathMutex.Lock() + defer fake.getPathMutex.Unlock() + fake.GetPathStub = nil + if fake.getPathReturnsOnCall == nil { + fake.getPathReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getPathReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) GetString(arg1 string) string { + fake.getStringMutex.Lock() + ret, specificReturn := fake.getStringReturnsOnCall[len(fake.getStringArgsForCall)] + fake.getStringArgsForCall = append(fake.getStringArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetStringStub + fakeReturns := fake.getStringReturns + fake.recordInvocation("GetString", []interface{}{arg1}) + fake.getStringMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) GetStringCallCount() int { + fake.getStringMutex.RLock() + defer fake.getStringMutex.RUnlock() + return len(fake.getStringArgsForCall) +} + +func (fake *ConfigProvider) GetStringCalls(stub func(string) string) { + fake.getStringMutex.Lock() + defer fake.getStringMutex.Unlock() + fake.GetStringStub = stub +} + +func (fake *ConfigProvider) GetStringArgsForCall(i int) string { + fake.getStringMutex.RLock() + defer fake.getStringMutex.RUnlock() + argsForCall := fake.getStringArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) GetStringReturns(result1 string) { + fake.getStringMutex.Lock() + defer fake.getStringMutex.Unlock() + fake.GetStringStub = nil + fake.getStringReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) GetStringReturnsOnCall(i int, result1 string) { + fake.getStringMutex.Lock() + defer fake.getStringMutex.Unlock() + fake.GetStringStub = nil + if fake.getStringReturnsOnCall == nil { + fake.getStringReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.getStringReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) GetStringSlice(arg1 string) []string { + fake.getStringSliceMutex.Lock() + ret, specificReturn := fake.getStringSliceReturnsOnCall[len(fake.getStringSliceArgsForCall)] + fake.getStringSliceArgsForCall = append(fake.getStringSliceArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.GetStringSliceStub + fakeReturns := fake.getStringSliceReturns + fake.recordInvocation("GetStringSlice", []interface{}{arg1}) + fake.getStringSliceMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) GetStringSliceCallCount() int { + fake.getStringSliceMutex.RLock() + defer fake.getStringSliceMutex.RUnlock() + return len(fake.getStringSliceArgsForCall) +} + +func (fake *ConfigProvider) GetStringSliceCalls(stub func(string) []string) { + fake.getStringSliceMutex.Lock() + defer fake.getStringSliceMutex.Unlock() + fake.GetStringSliceStub = stub +} + +func (fake *ConfigProvider) GetStringSliceArgsForCall(i int) string { + fake.getStringSliceMutex.RLock() + defer fake.getStringSliceMutex.RUnlock() + argsForCall := fake.getStringSliceArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) GetStringSliceReturns(result1 []string) { + fake.getStringSliceMutex.Lock() + defer fake.getStringSliceMutex.Unlock() + fake.GetStringSliceStub = nil + fake.getStringSliceReturns = struct { + result1 []string + }{result1} +} + +func (fake *ConfigProvider) GetStringSliceReturnsOnCall(i int, result1 []string) { + fake.getStringSliceMutex.Lock() + defer fake.getStringSliceMutex.Unlock() + fake.GetStringSliceStub = nil + if fake.getStringSliceReturnsOnCall == nil { + fake.getStringSliceReturnsOnCall = make(map[int]struct { + result1 []string + }) + } + fake.getStringSliceReturnsOnCall[i] = struct { + result1 []string + }{result1} +} + +func (fake *ConfigProvider) IsChannelQuiet(arg1 string) bool { + fake.isChannelQuietMutex.Lock() + ret, specificReturn := fake.isChannelQuietReturnsOnCall[len(fake.isChannelQuietArgsForCall)] + fake.isChannelQuietArgsForCall = append(fake.isChannelQuietArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.IsChannelQuietStub + fakeReturns := fake.isChannelQuietReturns + fake.recordInvocation("IsChannelQuiet", []interface{}{arg1}) + fake.isChannelQuietMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) IsChannelQuietCallCount() int { + fake.isChannelQuietMutex.RLock() + defer fake.isChannelQuietMutex.RUnlock() + return len(fake.isChannelQuietArgsForCall) +} + +func (fake *ConfigProvider) IsChannelQuietCalls(stub func(string) bool) { + fake.isChannelQuietMutex.Lock() + defer fake.isChannelQuietMutex.Unlock() + fake.IsChannelQuietStub = stub +} + +func (fake *ConfigProvider) IsChannelQuietArgsForCall(i int) string { + fake.isChannelQuietMutex.RLock() + defer fake.isChannelQuietMutex.RUnlock() + argsForCall := fake.isChannelQuietArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) IsChannelQuietReturns(result1 bool) { + fake.isChannelQuietMutex.Lock() + defer fake.isChannelQuietMutex.Unlock() + fake.IsChannelQuietStub = nil + fake.isChannelQuietReturns = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) IsChannelQuietReturnsOnCall(i int, result1 bool) { + fake.isChannelQuietMutex.Lock() + defer fake.isChannelQuietMutex.Unlock() + fake.IsChannelQuietStub = nil + if fake.isChannelQuietReturnsOnCall == nil { + fake.isChannelQuietReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.isChannelQuietReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) IsSet(arg1 string) bool { + fake.isSetMutex.Lock() + ret, specificReturn := fake.isSetReturnsOnCall[len(fake.isSetArgsForCall)] + fake.isSetArgsForCall = append(fake.isSetArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.IsSetStub + fakeReturns := fake.isSetReturns + fake.recordInvocation("IsSet", []interface{}{arg1}) + fake.isSetMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) IsSetCallCount() int { + fake.isSetMutex.RLock() + defer fake.isSetMutex.RUnlock() + return len(fake.isSetArgsForCall) +} + +func (fake *ConfigProvider) IsSetCalls(stub func(string) bool) { + fake.isSetMutex.Lock() + defer fake.isSetMutex.Unlock() + fake.IsSetStub = stub +} + +func (fake *ConfigProvider) IsSetArgsForCall(i int) string { + fake.isSetMutex.RLock() + defer fake.isSetMutex.RUnlock() + argsForCall := fake.isSetArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) IsSetReturns(result1 bool) { + fake.isSetMutex.Lock() + defer fake.isSetMutex.Unlock() + fake.IsSetStub = nil + fake.isSetReturns = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) IsSetReturnsOnCall(i int, result1 bool) { + fake.isSetMutex.Lock() + defer fake.isSetMutex.Unlock() + fake.IsSetStub = nil + if fake.isSetReturnsOnCall == nil { + fake.isSetReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.isSetReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) KeepAliveClientInterval() time.Duration { + fake.keepAliveClientIntervalMutex.Lock() + ret, specificReturn := fake.keepAliveClientIntervalReturnsOnCall[len(fake.keepAliveClientIntervalArgsForCall)] + fake.keepAliveClientIntervalArgsForCall = append(fake.keepAliveClientIntervalArgsForCall, struct { + }{}) + stub := fake.KeepAliveClientIntervalStub + fakeReturns := fake.keepAliveClientIntervalReturns + fake.recordInvocation("KeepAliveClientInterval", []interface{}{}) + fake.keepAliveClientIntervalMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) KeepAliveClientIntervalCallCount() int { + fake.keepAliveClientIntervalMutex.RLock() + defer fake.keepAliveClientIntervalMutex.RUnlock() + return len(fake.keepAliveClientIntervalArgsForCall) +} + +func (fake *ConfigProvider) KeepAliveClientIntervalCalls(stub func() time.Duration) { + fake.keepAliveClientIntervalMutex.Lock() + defer fake.keepAliveClientIntervalMutex.Unlock() + fake.KeepAliveClientIntervalStub = stub +} + +func (fake *ConfigProvider) KeepAliveClientIntervalReturns(result1 time.Duration) { + fake.keepAliveClientIntervalMutex.Lock() + defer fake.keepAliveClientIntervalMutex.Unlock() + fake.KeepAliveClientIntervalStub = nil + fake.keepAliveClientIntervalReturns = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) KeepAliveClientIntervalReturnsOnCall(i int, result1 time.Duration) { + fake.keepAliveClientIntervalMutex.Lock() + defer fake.keepAliveClientIntervalMutex.Unlock() + fake.KeepAliveClientIntervalStub = nil + if fake.keepAliveClientIntervalReturnsOnCall == nil { + fake.keepAliveClientIntervalReturnsOnCall = make(map[int]struct { + result1 time.Duration + }) + } + fake.keepAliveClientIntervalReturnsOnCall[i] = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) KeepAliveClientTimeout() time.Duration { + fake.keepAliveClientTimeoutMutex.Lock() + ret, specificReturn := fake.keepAliveClientTimeoutReturnsOnCall[len(fake.keepAliveClientTimeoutArgsForCall)] + fake.keepAliveClientTimeoutArgsForCall = append(fake.keepAliveClientTimeoutArgsForCall, struct { + }{}) + stub := fake.KeepAliveClientTimeoutStub + fakeReturns := fake.keepAliveClientTimeoutReturns + fake.recordInvocation("KeepAliveClientTimeout", []interface{}{}) + fake.keepAliveClientTimeoutMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) KeepAliveClientTimeoutCallCount() int { + fake.keepAliveClientTimeoutMutex.RLock() + defer fake.keepAliveClientTimeoutMutex.RUnlock() + return len(fake.keepAliveClientTimeoutArgsForCall) +} + +func (fake *ConfigProvider) KeepAliveClientTimeoutCalls(stub func() time.Duration) { + fake.keepAliveClientTimeoutMutex.Lock() + defer fake.keepAliveClientTimeoutMutex.Unlock() + fake.KeepAliveClientTimeoutStub = stub +} + +func (fake *ConfigProvider) KeepAliveClientTimeoutReturns(result1 time.Duration) { + fake.keepAliveClientTimeoutMutex.Lock() + defer fake.keepAliveClientTimeoutMutex.Unlock() + fake.KeepAliveClientTimeoutStub = nil + fake.keepAliveClientTimeoutReturns = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) KeepAliveClientTimeoutReturnsOnCall(i int, result1 time.Duration) { + fake.keepAliveClientTimeoutMutex.Lock() + defer fake.keepAliveClientTimeoutMutex.Unlock() + fake.KeepAliveClientTimeoutStub = nil + if fake.keepAliveClientTimeoutReturnsOnCall == nil { + fake.keepAliveClientTimeoutReturnsOnCall = make(map[int]struct { + result1 time.Duration + }) + } + fake.keepAliveClientTimeoutReturnsOnCall[i] = struct { + result1 time.Duration + }{result1} +} + +func (fake *ConfigProvider) NetworkName() string { + fake.networkNameMutex.Lock() + ret, specificReturn := fake.networkNameReturnsOnCall[len(fake.networkNameArgsForCall)] + fake.networkNameArgsForCall = append(fake.networkNameArgsForCall, struct { + }{}) + stub := fake.NetworkNameStub + fakeReturns := fake.networkNameReturns + fake.recordInvocation("NetworkName", []interface{}{}) + fake.networkNameMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) NetworkNameCallCount() int { + fake.networkNameMutex.RLock() + defer fake.networkNameMutex.RUnlock() + return len(fake.networkNameArgsForCall) +} + +func (fake *ConfigProvider) NetworkNameCalls(stub func() string) { + fake.networkNameMutex.Lock() + defer fake.networkNameMutex.Unlock() + fake.NetworkNameStub = stub +} + +func (fake *ConfigProvider) NetworkNameReturns(result1 string) { + fake.networkNameMutex.Lock() + defer fake.networkNameMutex.Unlock() + fake.NetworkNameStub = nil + fake.networkNameReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) NetworkNameReturnsOnCall(i int, result1 string) { + fake.networkNameMutex.Lock() + defer fake.networkNameMutex.Unlock() + fake.NetworkNameStub = nil + if fake.networkNameReturnsOnCall == nil { + fake.networkNameReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.networkNameReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) NewDefaultChannelConfig(arg1 string) drivera.ChannelConfig { + fake.newDefaultChannelConfigMutex.Lock() + ret, specificReturn := fake.newDefaultChannelConfigReturnsOnCall[len(fake.newDefaultChannelConfigArgsForCall)] + fake.newDefaultChannelConfigArgsForCall = append(fake.newDefaultChannelConfigArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.NewDefaultChannelConfigStub + fakeReturns := fake.newDefaultChannelConfigReturns + fake.recordInvocation("NewDefaultChannelConfig", []interface{}{arg1}) + fake.newDefaultChannelConfigMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) NewDefaultChannelConfigCallCount() int { + fake.newDefaultChannelConfigMutex.RLock() + defer fake.newDefaultChannelConfigMutex.RUnlock() + return len(fake.newDefaultChannelConfigArgsForCall) +} + +func (fake *ConfigProvider) NewDefaultChannelConfigCalls(stub func(string) drivera.ChannelConfig) { + fake.newDefaultChannelConfigMutex.Lock() + defer fake.newDefaultChannelConfigMutex.Unlock() + fake.NewDefaultChannelConfigStub = stub +} + +func (fake *ConfigProvider) NewDefaultChannelConfigArgsForCall(i int) string { + fake.newDefaultChannelConfigMutex.RLock() + defer fake.newDefaultChannelConfigMutex.RUnlock() + argsForCall := fake.newDefaultChannelConfigArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) NewDefaultChannelConfigReturns(result1 drivera.ChannelConfig) { + fake.newDefaultChannelConfigMutex.Lock() + defer fake.newDefaultChannelConfigMutex.Unlock() + fake.NewDefaultChannelConfigStub = nil + fake.newDefaultChannelConfigReturns = struct { + result1 drivera.ChannelConfig + }{result1} +} + +func (fake *ConfigProvider) NewDefaultChannelConfigReturnsOnCall(i int, result1 drivera.ChannelConfig) { + fake.newDefaultChannelConfigMutex.Lock() + defer fake.newDefaultChannelConfigMutex.Unlock() + fake.NewDefaultChannelConfigStub = nil + if fake.newDefaultChannelConfigReturnsOnCall == nil { + fake.newDefaultChannelConfigReturnsOnCall = make(map[int]struct { + result1 drivera.ChannelConfig + }) + } + fake.newDefaultChannelConfigReturnsOnCall[i] = struct { + result1 drivera.ChannelConfig + }{result1} +} + +func (fake *ConfigProvider) OrdererConnectionPoolSize() int { + fake.ordererConnectionPoolSizeMutex.Lock() + ret, specificReturn := fake.ordererConnectionPoolSizeReturnsOnCall[len(fake.ordererConnectionPoolSizeArgsForCall)] + fake.ordererConnectionPoolSizeArgsForCall = append(fake.ordererConnectionPoolSizeArgsForCall, struct { + }{}) + stub := fake.OrdererConnectionPoolSizeStub + fakeReturns := fake.ordererConnectionPoolSizeReturns + fake.recordInvocation("OrdererConnectionPoolSize", []interface{}{}) + fake.ordererConnectionPoolSizeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) OrdererConnectionPoolSizeCallCount() int { + fake.ordererConnectionPoolSizeMutex.RLock() + defer fake.ordererConnectionPoolSizeMutex.RUnlock() + return len(fake.ordererConnectionPoolSizeArgsForCall) +} + +func (fake *ConfigProvider) OrdererConnectionPoolSizeCalls(stub func() int) { + fake.ordererConnectionPoolSizeMutex.Lock() + defer fake.ordererConnectionPoolSizeMutex.Unlock() + fake.OrdererConnectionPoolSizeStub = stub +} + +func (fake *ConfigProvider) OrdererConnectionPoolSizeReturns(result1 int) { + fake.ordererConnectionPoolSizeMutex.Lock() + defer fake.ordererConnectionPoolSizeMutex.Unlock() + fake.OrdererConnectionPoolSizeStub = nil + fake.ordererConnectionPoolSizeReturns = struct { + result1 int + }{result1} +} + +func (fake *ConfigProvider) OrdererConnectionPoolSizeReturnsOnCall(i int, result1 int) { + fake.ordererConnectionPoolSizeMutex.Lock() + defer fake.ordererConnectionPoolSizeMutex.Unlock() + fake.OrdererConnectionPoolSizeStub = nil + if fake.ordererConnectionPoolSizeReturnsOnCall == nil { + fake.ordererConnectionPoolSizeReturnsOnCall = make(map[int]struct { + result1 int + }) + } + fake.ordererConnectionPoolSizeReturnsOnCall[i] = struct { + result1 int + }{result1} +} + +func (fake *ConfigProvider) Orderers() []*grpc.ConnectionConfig { + fake.orderersMutex.Lock() + ret, specificReturn := fake.orderersReturnsOnCall[len(fake.orderersArgsForCall)] + fake.orderersArgsForCall = append(fake.orderersArgsForCall, struct { + }{}) + stub := fake.OrderersStub + fakeReturns := fake.orderersReturns + fake.recordInvocation("Orderers", []interface{}{}) + fake.orderersMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) OrderersCallCount() int { + fake.orderersMutex.RLock() + defer fake.orderersMutex.RUnlock() + return len(fake.orderersArgsForCall) +} + +func (fake *ConfigProvider) OrderersCalls(stub func() []*grpc.ConnectionConfig) { + fake.orderersMutex.Lock() + defer fake.orderersMutex.Unlock() + fake.OrderersStub = stub +} + +func (fake *ConfigProvider) OrderersReturns(result1 []*grpc.ConnectionConfig) { + fake.orderersMutex.Lock() + defer fake.orderersMutex.Unlock() + fake.OrderersStub = nil + fake.orderersReturns = struct { + result1 []*grpc.ConnectionConfig + }{result1} +} + +func (fake *ConfigProvider) OrderersReturnsOnCall(i int, result1 []*grpc.ConnectionConfig) { + fake.orderersMutex.Lock() + defer fake.orderersMutex.Unlock() + fake.OrderersStub = nil + if fake.orderersReturnsOnCall == nil { + fake.orderersReturnsOnCall = make(map[int]struct { + result1 []*grpc.ConnectionConfig + }) + } + fake.orderersReturnsOnCall[i] = struct { + result1 []*grpc.ConnectionConfig + }{result1} +} + +func (fake *ConfigProvider) PickOrderer() *grpc.ConnectionConfig { + fake.pickOrdererMutex.Lock() + ret, specificReturn := fake.pickOrdererReturnsOnCall[len(fake.pickOrdererArgsForCall)] + fake.pickOrdererArgsForCall = append(fake.pickOrdererArgsForCall, struct { + }{}) + stub := fake.PickOrdererStub + fakeReturns := fake.pickOrdererReturns + fake.recordInvocation("PickOrderer", []interface{}{}) + fake.pickOrdererMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) PickOrdererCallCount() int { + fake.pickOrdererMutex.RLock() + defer fake.pickOrdererMutex.RUnlock() + return len(fake.pickOrdererArgsForCall) +} + +func (fake *ConfigProvider) PickOrdererCalls(stub func() *grpc.ConnectionConfig) { + fake.pickOrdererMutex.Lock() + defer fake.pickOrdererMutex.Unlock() + fake.PickOrdererStub = stub +} + +func (fake *ConfigProvider) PickOrdererReturns(result1 *grpc.ConnectionConfig) { + fake.pickOrdererMutex.Lock() + defer fake.pickOrdererMutex.Unlock() + fake.PickOrdererStub = nil + fake.pickOrdererReturns = struct { + result1 *grpc.ConnectionConfig + }{result1} +} + +func (fake *ConfigProvider) PickOrdererReturnsOnCall(i int, result1 *grpc.ConnectionConfig) { + fake.pickOrdererMutex.Lock() + defer fake.pickOrdererMutex.Unlock() + fake.PickOrdererStub = nil + if fake.pickOrdererReturnsOnCall == nil { + fake.pickOrdererReturnsOnCall = make(map[int]struct { + result1 *grpc.ConnectionConfig + }) + } + fake.pickOrdererReturnsOnCall[i] = struct { + result1 *grpc.ConnectionConfig + }{result1} +} + +func (fake *ConfigProvider) PickPeer(arg1 drivera.PeerFunctionType) *grpc.ConnectionConfig { + fake.pickPeerMutex.Lock() + ret, specificReturn := fake.pickPeerReturnsOnCall[len(fake.pickPeerArgsForCall)] + fake.pickPeerArgsForCall = append(fake.pickPeerArgsForCall, struct { + arg1 drivera.PeerFunctionType + }{arg1}) + stub := fake.PickPeerStub + fakeReturns := fake.pickPeerReturns + fake.recordInvocation("PickPeer", []interface{}{arg1}) + fake.pickPeerMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) PickPeerCallCount() int { + fake.pickPeerMutex.RLock() + defer fake.pickPeerMutex.RUnlock() + return len(fake.pickPeerArgsForCall) +} + +func (fake *ConfigProvider) PickPeerCalls(stub func(drivera.PeerFunctionType) *grpc.ConnectionConfig) { + fake.pickPeerMutex.Lock() + defer fake.pickPeerMutex.Unlock() + fake.PickPeerStub = stub +} + +func (fake *ConfigProvider) PickPeerArgsForCall(i int) drivera.PeerFunctionType { + fake.pickPeerMutex.RLock() + defer fake.pickPeerMutex.RUnlock() + argsForCall := fake.pickPeerArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) PickPeerReturns(result1 *grpc.ConnectionConfig) { + fake.pickPeerMutex.Lock() + defer fake.pickPeerMutex.Unlock() + fake.PickPeerStub = nil + fake.pickPeerReturns = struct { + result1 *grpc.ConnectionConfig + }{result1} +} + +func (fake *ConfigProvider) PickPeerReturnsOnCall(i int, result1 *grpc.ConnectionConfig) { + fake.pickPeerMutex.Lock() + defer fake.pickPeerMutex.Unlock() + fake.PickPeerStub = nil + if fake.pickPeerReturnsOnCall == nil { + fake.pickPeerReturnsOnCall = make(map[int]struct { + result1 *grpc.ConnectionConfig + }) + } + fake.pickPeerReturnsOnCall[i] = struct { + result1 *grpc.ConnectionConfig + }{result1} +} + +func (fake *ConfigProvider) SetConfigOrderers(arg1 []*grpc.ConnectionConfig) error { + var arg1Copy []*grpc.ConnectionConfig + if arg1 != nil { + arg1Copy = make([]*grpc.ConnectionConfig, len(arg1)) + copy(arg1Copy, arg1) + } + fake.setConfigOrderersMutex.Lock() + ret, specificReturn := fake.setConfigOrderersReturnsOnCall[len(fake.setConfigOrderersArgsForCall)] + fake.setConfigOrderersArgsForCall = append(fake.setConfigOrderersArgsForCall, struct { + arg1 []*grpc.ConnectionConfig + }{arg1Copy}) + stub := fake.SetConfigOrderersStub + fakeReturns := fake.setConfigOrderersReturns + fake.recordInvocation("SetConfigOrderers", []interface{}{arg1Copy}) + fake.setConfigOrderersMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) SetConfigOrderersCallCount() int { + fake.setConfigOrderersMutex.RLock() + defer fake.setConfigOrderersMutex.RUnlock() + return len(fake.setConfigOrderersArgsForCall) +} + +func (fake *ConfigProvider) SetConfigOrderersCalls(stub func([]*grpc.ConnectionConfig) error) { + fake.setConfigOrderersMutex.Lock() + defer fake.setConfigOrderersMutex.Unlock() + fake.SetConfigOrderersStub = stub +} + +func (fake *ConfigProvider) SetConfigOrderersArgsForCall(i int) []*grpc.ConnectionConfig { + fake.setConfigOrderersMutex.RLock() + defer fake.setConfigOrderersMutex.RUnlock() + argsForCall := fake.setConfigOrderersArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) SetConfigOrderersReturns(result1 error) { + fake.setConfigOrderersMutex.Lock() + defer fake.setConfigOrderersMutex.Unlock() + fake.SetConfigOrderersStub = nil + fake.setConfigOrderersReturns = struct { + result1 error + }{result1} +} + +func (fake *ConfigProvider) SetConfigOrderersReturnsOnCall(i int, result1 error) { + fake.setConfigOrderersMutex.Lock() + defer fake.setConfigOrderersMutex.Unlock() + fake.SetConfigOrderersStub = nil + if fake.setConfigOrderersReturnsOnCall == nil { + fake.setConfigOrderersReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.setConfigOrderersReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *ConfigProvider) TLSClientAuthRequired() bool { + fake.tLSClientAuthRequiredMutex.Lock() + ret, specificReturn := fake.tLSClientAuthRequiredReturnsOnCall[len(fake.tLSClientAuthRequiredArgsForCall)] + fake.tLSClientAuthRequiredArgsForCall = append(fake.tLSClientAuthRequiredArgsForCall, struct { + }{}) + stub := fake.TLSClientAuthRequiredStub + fakeReturns := fake.tLSClientAuthRequiredReturns + fake.recordInvocation("TLSClientAuthRequired", []interface{}{}) + fake.tLSClientAuthRequiredMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) TLSClientAuthRequiredCallCount() int { + fake.tLSClientAuthRequiredMutex.RLock() + defer fake.tLSClientAuthRequiredMutex.RUnlock() + return len(fake.tLSClientAuthRequiredArgsForCall) +} + +func (fake *ConfigProvider) TLSClientAuthRequiredCalls(stub func() bool) { + fake.tLSClientAuthRequiredMutex.Lock() + defer fake.tLSClientAuthRequiredMutex.Unlock() + fake.TLSClientAuthRequiredStub = stub +} + +func (fake *ConfigProvider) TLSClientAuthRequiredReturns(result1 bool) { + fake.tLSClientAuthRequiredMutex.Lock() + defer fake.tLSClientAuthRequiredMutex.Unlock() + fake.TLSClientAuthRequiredStub = nil + fake.tLSClientAuthRequiredReturns = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) TLSClientAuthRequiredReturnsOnCall(i int, result1 bool) { + fake.tLSClientAuthRequiredMutex.Lock() + defer fake.tLSClientAuthRequiredMutex.Unlock() + fake.TLSClientAuthRequiredStub = nil + if fake.tLSClientAuthRequiredReturnsOnCall == nil { + fake.tLSClientAuthRequiredReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLSClientAuthRequiredReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) TLSClientCertFile() string { + fake.tLSClientCertFileMutex.Lock() + ret, specificReturn := fake.tLSClientCertFileReturnsOnCall[len(fake.tLSClientCertFileArgsForCall)] + fake.tLSClientCertFileArgsForCall = append(fake.tLSClientCertFileArgsForCall, struct { + }{}) + stub := fake.TLSClientCertFileStub + fakeReturns := fake.tLSClientCertFileReturns + fake.recordInvocation("TLSClientCertFile", []interface{}{}) + fake.tLSClientCertFileMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) TLSClientCertFileCallCount() int { + fake.tLSClientCertFileMutex.RLock() + defer fake.tLSClientCertFileMutex.RUnlock() + return len(fake.tLSClientCertFileArgsForCall) +} + +func (fake *ConfigProvider) TLSClientCertFileCalls(stub func() string) { + fake.tLSClientCertFileMutex.Lock() + defer fake.tLSClientCertFileMutex.Unlock() + fake.TLSClientCertFileStub = stub +} + +func (fake *ConfigProvider) TLSClientCertFileReturns(result1 string) { + fake.tLSClientCertFileMutex.Lock() + defer fake.tLSClientCertFileMutex.Unlock() + fake.TLSClientCertFileStub = nil + fake.tLSClientCertFileReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) TLSClientCertFileReturnsOnCall(i int, result1 string) { + fake.tLSClientCertFileMutex.Lock() + defer fake.tLSClientCertFileMutex.Unlock() + fake.TLSClientCertFileStub = nil + if fake.tLSClientCertFileReturnsOnCall == nil { + fake.tLSClientCertFileReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.tLSClientCertFileReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) TLSClientKeyFile() string { + fake.tLSClientKeyFileMutex.Lock() + ret, specificReturn := fake.tLSClientKeyFileReturnsOnCall[len(fake.tLSClientKeyFileArgsForCall)] + fake.tLSClientKeyFileArgsForCall = append(fake.tLSClientKeyFileArgsForCall, struct { + }{}) + stub := fake.TLSClientKeyFileStub + fakeReturns := fake.tLSClientKeyFileReturns + fake.recordInvocation("TLSClientKeyFile", []interface{}{}) + fake.tLSClientKeyFileMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) TLSClientKeyFileCallCount() int { + fake.tLSClientKeyFileMutex.RLock() + defer fake.tLSClientKeyFileMutex.RUnlock() + return len(fake.tLSClientKeyFileArgsForCall) +} + +func (fake *ConfigProvider) TLSClientKeyFileCalls(stub func() string) { + fake.tLSClientKeyFileMutex.Lock() + defer fake.tLSClientKeyFileMutex.Unlock() + fake.TLSClientKeyFileStub = stub +} + +func (fake *ConfigProvider) TLSClientKeyFileReturns(result1 string) { + fake.tLSClientKeyFileMutex.Lock() + defer fake.tLSClientKeyFileMutex.Unlock() + fake.TLSClientKeyFileStub = nil + fake.tLSClientKeyFileReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) TLSClientKeyFileReturnsOnCall(i int, result1 string) { + fake.tLSClientKeyFileMutex.Lock() + defer fake.tLSClientKeyFileMutex.Unlock() + fake.TLSClientKeyFileStub = nil + if fake.tLSClientKeyFileReturnsOnCall == nil { + fake.tLSClientKeyFileReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.tLSClientKeyFileReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) TLSEnabled() bool { + fake.tLSEnabledMutex.Lock() + ret, specificReturn := fake.tLSEnabledReturnsOnCall[len(fake.tLSEnabledArgsForCall)] + fake.tLSEnabledArgsForCall = append(fake.tLSEnabledArgsForCall, struct { + }{}) + stub := fake.TLSEnabledStub + fakeReturns := fake.tLSEnabledReturns + fake.recordInvocation("TLSEnabled", []interface{}{}) + fake.tLSEnabledMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) TLSEnabledCallCount() int { + fake.tLSEnabledMutex.RLock() + defer fake.tLSEnabledMutex.RUnlock() + return len(fake.tLSEnabledArgsForCall) +} + +func (fake *ConfigProvider) TLSEnabledCalls(stub func() bool) { + fake.tLSEnabledMutex.Lock() + defer fake.tLSEnabledMutex.Unlock() + fake.TLSEnabledStub = stub +} + +func (fake *ConfigProvider) TLSEnabledReturns(result1 bool) { + fake.tLSEnabledMutex.Lock() + defer fake.tLSEnabledMutex.Unlock() + fake.TLSEnabledStub = nil + fake.tLSEnabledReturns = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) TLSEnabledReturnsOnCall(i int, result1 bool) { + fake.tLSEnabledMutex.Lock() + defer fake.tLSEnabledMutex.Unlock() + fake.TLSEnabledStub = nil + if fake.tLSEnabledReturnsOnCall == nil { + fake.tLSEnabledReturnsOnCall = make(map[int]struct { + result1 bool + }) + } + fake.tLSEnabledReturnsOnCall[i] = struct { + result1 bool + }{result1} +} + +func (fake *ConfigProvider) TLSServerHostOverride() string { + fake.tLSServerHostOverrideMutex.Lock() + ret, specificReturn := fake.tLSServerHostOverrideReturnsOnCall[len(fake.tLSServerHostOverrideArgsForCall)] + fake.tLSServerHostOverrideArgsForCall = append(fake.tLSServerHostOverrideArgsForCall, struct { + }{}) + stub := fake.TLSServerHostOverrideStub + fakeReturns := fake.tLSServerHostOverrideReturns + fake.recordInvocation("TLSServerHostOverride", []interface{}{}) + fake.tLSServerHostOverrideMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) TLSServerHostOverrideCallCount() int { + fake.tLSServerHostOverrideMutex.RLock() + defer fake.tLSServerHostOverrideMutex.RUnlock() + return len(fake.tLSServerHostOverrideArgsForCall) +} + +func (fake *ConfigProvider) TLSServerHostOverrideCalls(stub func() string) { + fake.tLSServerHostOverrideMutex.Lock() + defer fake.tLSServerHostOverrideMutex.Unlock() + fake.TLSServerHostOverrideStub = stub +} + +func (fake *ConfigProvider) TLSServerHostOverrideReturns(result1 string) { + fake.tLSServerHostOverrideMutex.Lock() + defer fake.tLSServerHostOverrideMutex.Unlock() + fake.TLSServerHostOverrideStub = nil + fake.tLSServerHostOverrideReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) TLSServerHostOverrideReturnsOnCall(i int, result1 string) { + fake.tLSServerHostOverrideMutex.Lock() + defer fake.tLSServerHostOverrideMutex.Unlock() + fake.TLSServerHostOverrideStub = nil + if fake.tLSServerHostOverrideReturnsOnCall == nil { + fake.tLSServerHostOverrideReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.tLSServerHostOverrideReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) TranslatePath(arg1 string) string { + fake.translatePathMutex.Lock() + ret, specificReturn := fake.translatePathReturnsOnCall[len(fake.translatePathArgsForCall)] + fake.translatePathArgsForCall = append(fake.translatePathArgsForCall, struct { + arg1 string + }{arg1}) + stub := fake.TranslatePathStub + fakeReturns := fake.translatePathReturns + fake.recordInvocation("TranslatePath", []interface{}{arg1}) + fake.translatePathMutex.Unlock() + if stub != nil { + return stub(arg1) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) TranslatePathCallCount() int { + fake.translatePathMutex.RLock() + defer fake.translatePathMutex.RUnlock() + return len(fake.translatePathArgsForCall) +} + +func (fake *ConfigProvider) TranslatePathCalls(stub func(string) string) { + fake.translatePathMutex.Lock() + defer fake.translatePathMutex.Unlock() + fake.TranslatePathStub = stub +} + +func (fake *ConfigProvider) TranslatePathArgsForCall(i int) string { + fake.translatePathMutex.RLock() + defer fake.translatePathMutex.RUnlock() + argsForCall := fake.translatePathArgsForCall[i] + return argsForCall.arg1 +} + +func (fake *ConfigProvider) TranslatePathReturns(result1 string) { + fake.translatePathMutex.Lock() + defer fake.translatePathMutex.Unlock() + fake.TranslatePathStub = nil + fake.translatePathReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) TranslatePathReturnsOnCall(i int, result1 string) { + fake.translatePathMutex.Lock() + defer fake.translatePathMutex.Unlock() + fake.TranslatePathStub = nil + if fake.translatePathReturnsOnCall == nil { + fake.translatePathReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.translatePathReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) UnmarshalKey(arg1 string, arg2 interface{}) error { + fake.unmarshalKeyMutex.Lock() + ret, specificReturn := fake.unmarshalKeyReturnsOnCall[len(fake.unmarshalKeyArgsForCall)] + fake.unmarshalKeyArgsForCall = append(fake.unmarshalKeyArgsForCall, struct { + arg1 string + arg2 interface{} + }{arg1, arg2}) + stub := fake.UnmarshalKeyStub + fakeReturns := fake.unmarshalKeyReturns + fake.recordInvocation("UnmarshalKey", []interface{}{arg1, arg2}) + fake.unmarshalKeyMutex.Unlock() + if stub != nil { + return stub(arg1, arg2) + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) UnmarshalKeyCallCount() int { + fake.unmarshalKeyMutex.RLock() + defer fake.unmarshalKeyMutex.RUnlock() + return len(fake.unmarshalKeyArgsForCall) +} + +func (fake *ConfigProvider) UnmarshalKeyCalls(stub func(string, interface{}) error) { + fake.unmarshalKeyMutex.Lock() + defer fake.unmarshalKeyMutex.Unlock() + fake.UnmarshalKeyStub = stub +} + +func (fake *ConfigProvider) UnmarshalKeyArgsForCall(i int) (string, interface{}) { + fake.unmarshalKeyMutex.RLock() + defer fake.unmarshalKeyMutex.RUnlock() + argsForCall := fake.unmarshalKeyArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *ConfigProvider) UnmarshalKeyReturns(result1 error) { + fake.unmarshalKeyMutex.Lock() + defer fake.unmarshalKeyMutex.Unlock() + fake.UnmarshalKeyStub = nil + fake.unmarshalKeyReturns = struct { + result1 error + }{result1} +} + +func (fake *ConfigProvider) UnmarshalKeyReturnsOnCall(i int, result1 error) { + fake.unmarshalKeyMutex.Lock() + defer fake.unmarshalKeyMutex.Unlock() + fake.UnmarshalKeyStub = nil + if fake.unmarshalKeyReturnsOnCall == nil { + fake.unmarshalKeyReturnsOnCall = make(map[int]struct { + result1 error + }) + } + fake.unmarshalKeyReturnsOnCall[i] = struct { + result1 error + }{result1} +} + +func (fake *ConfigProvider) VaultPersistencePrefix() string { + fake.vaultPersistencePrefixMutex.Lock() + ret, specificReturn := fake.vaultPersistencePrefixReturnsOnCall[len(fake.vaultPersistencePrefixArgsForCall)] + fake.vaultPersistencePrefixArgsForCall = append(fake.vaultPersistencePrefixArgsForCall, struct { + }{}) + stub := fake.VaultPersistencePrefixStub + fakeReturns := fake.vaultPersistencePrefixReturns + fake.recordInvocation("VaultPersistencePrefix", []interface{}{}) + fake.vaultPersistencePrefixMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) VaultPersistencePrefixCallCount() int { + fake.vaultPersistencePrefixMutex.RLock() + defer fake.vaultPersistencePrefixMutex.RUnlock() + return len(fake.vaultPersistencePrefixArgsForCall) +} + +func (fake *ConfigProvider) VaultPersistencePrefixCalls(stub func() string) { + fake.vaultPersistencePrefixMutex.Lock() + defer fake.vaultPersistencePrefixMutex.Unlock() + fake.VaultPersistencePrefixStub = stub +} + +func (fake *ConfigProvider) VaultPersistencePrefixReturns(result1 string) { + fake.vaultPersistencePrefixMutex.Lock() + defer fake.vaultPersistencePrefixMutex.Unlock() + fake.VaultPersistencePrefixStub = nil + fake.vaultPersistencePrefixReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) VaultPersistencePrefixReturnsOnCall(i int, result1 string) { + fake.vaultPersistencePrefixMutex.Lock() + defer fake.vaultPersistencePrefixMutex.Unlock() + fake.VaultPersistencePrefixStub = nil + if fake.vaultPersistencePrefixReturnsOnCall == nil { + fake.vaultPersistencePrefixReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.vaultPersistencePrefixReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) VaultPersistenceType() string { + fake.vaultPersistenceTypeMutex.Lock() + ret, specificReturn := fake.vaultPersistenceTypeReturnsOnCall[len(fake.vaultPersistenceTypeArgsForCall)] + fake.vaultPersistenceTypeArgsForCall = append(fake.vaultPersistenceTypeArgsForCall, struct { + }{}) + stub := fake.VaultPersistenceTypeStub + fakeReturns := fake.vaultPersistenceTypeReturns + fake.recordInvocation("VaultPersistenceType", []interface{}{}) + fake.vaultPersistenceTypeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) VaultPersistenceTypeCallCount() int { + fake.vaultPersistenceTypeMutex.RLock() + defer fake.vaultPersistenceTypeMutex.RUnlock() + return len(fake.vaultPersistenceTypeArgsForCall) +} + +func (fake *ConfigProvider) VaultPersistenceTypeCalls(stub func() string) { + fake.vaultPersistenceTypeMutex.Lock() + defer fake.vaultPersistenceTypeMutex.Unlock() + fake.VaultPersistenceTypeStub = stub +} + +func (fake *ConfigProvider) VaultPersistenceTypeReturns(result1 string) { + fake.vaultPersistenceTypeMutex.Lock() + defer fake.vaultPersistenceTypeMutex.Unlock() + fake.VaultPersistenceTypeStub = nil + fake.vaultPersistenceTypeReturns = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) VaultPersistenceTypeReturnsOnCall(i int, result1 string) { + fake.vaultPersistenceTypeMutex.Lock() + defer fake.vaultPersistenceTypeMutex.Unlock() + fake.VaultPersistenceTypeStub = nil + if fake.vaultPersistenceTypeReturnsOnCall == nil { + fake.vaultPersistenceTypeReturnsOnCall = make(map[int]struct { + result1 string + }) + } + fake.vaultPersistenceTypeReturnsOnCall[i] = struct { + result1 string + }{result1} +} + +func (fake *ConfigProvider) VaultTXStoreCacheSize() int { + fake.vaultTXStoreCacheSizeMutex.Lock() + ret, specificReturn := fake.vaultTXStoreCacheSizeReturnsOnCall[len(fake.vaultTXStoreCacheSizeArgsForCall)] + fake.vaultTXStoreCacheSizeArgsForCall = append(fake.vaultTXStoreCacheSizeArgsForCall, struct { + }{}) + stub := fake.VaultTXStoreCacheSizeStub + fakeReturns := fake.vaultTXStoreCacheSizeReturns + fake.recordInvocation("VaultTXStoreCacheSize", []interface{}{}) + fake.vaultTXStoreCacheSizeMutex.Unlock() + if stub != nil { + return stub() + } + if specificReturn { + return ret.result1 + } + return fakeReturns.result1 +} + +func (fake *ConfigProvider) VaultTXStoreCacheSizeCallCount() int { + fake.vaultTXStoreCacheSizeMutex.RLock() + defer fake.vaultTXStoreCacheSizeMutex.RUnlock() + return len(fake.vaultTXStoreCacheSizeArgsForCall) +} + +func (fake *ConfigProvider) VaultTXStoreCacheSizeCalls(stub func() int) { + fake.vaultTXStoreCacheSizeMutex.Lock() + defer fake.vaultTXStoreCacheSizeMutex.Unlock() + fake.VaultTXStoreCacheSizeStub = stub +} + +func (fake *ConfigProvider) VaultTXStoreCacheSizeReturns(result1 int) { + fake.vaultTXStoreCacheSizeMutex.Lock() + defer fake.vaultTXStoreCacheSizeMutex.Unlock() + fake.VaultTXStoreCacheSizeStub = nil + fake.vaultTXStoreCacheSizeReturns = struct { + result1 int + }{result1} +} + +func (fake *ConfigProvider) VaultTXStoreCacheSizeReturnsOnCall(i int, result1 int) { + fake.vaultTXStoreCacheSizeMutex.Lock() + defer fake.vaultTXStoreCacheSizeMutex.Unlock() + fake.VaultTXStoreCacheSizeStub = nil + if fake.vaultTXStoreCacheSizeReturnsOnCall == nil { + fake.vaultTXStoreCacheSizeReturnsOnCall = make(map[int]struct { + result1 int + }) + } + fake.vaultTXStoreCacheSizeReturnsOnCall[i] = struct { + result1 int + }{result1} +} + +func (fake *ConfigProvider) Invocations() map[string][][]interface{} { + fake.invocationsMutex.RLock() + defer fake.invocationsMutex.RUnlock() + fake.broadcastNumRetriesMutex.RLock() + defer fake.broadcastNumRetriesMutex.RUnlock() + fake.broadcastRetryIntervalMutex.RLock() + defer fake.broadcastRetryIntervalMutex.RUnlock() + fake.channelMutex.RLock() + defer fake.channelMutex.RUnlock() + fake.channelIDsMutex.RLock() + defer fake.channelIDsMutex.RUnlock() + fake.clientConnTimeoutMutex.RLock() + defer fake.clientConnTimeoutMutex.RUnlock() + fake.configFileUsedMutex.RLock() + defer fake.configFileUsedMutex.RUnlock() + fake.defaultChannelMutex.RLock() + defer fake.defaultChannelMutex.RUnlock() + fake.getBoolMutex.RLock() + defer fake.getBoolMutex.RUnlock() + fake.getDurationMutex.RLock() + defer fake.getDurationMutex.RUnlock() + fake.getIntMutex.RLock() + defer fake.getIntMutex.RUnlock() + fake.getPathMutex.RLock() + defer fake.getPathMutex.RUnlock() + fake.getStringMutex.RLock() + defer fake.getStringMutex.RUnlock() + fake.getStringSliceMutex.RLock() + defer fake.getStringSliceMutex.RUnlock() + fake.isChannelQuietMutex.RLock() + defer fake.isChannelQuietMutex.RUnlock() + fake.isSetMutex.RLock() + defer fake.isSetMutex.RUnlock() + fake.keepAliveClientIntervalMutex.RLock() + defer fake.keepAliveClientIntervalMutex.RUnlock() + fake.keepAliveClientTimeoutMutex.RLock() + defer fake.keepAliveClientTimeoutMutex.RUnlock() + fake.networkNameMutex.RLock() + defer fake.networkNameMutex.RUnlock() + fake.newDefaultChannelConfigMutex.RLock() + defer fake.newDefaultChannelConfigMutex.RUnlock() + fake.ordererConnectionPoolSizeMutex.RLock() + defer fake.ordererConnectionPoolSizeMutex.RUnlock() + fake.orderersMutex.RLock() + defer fake.orderersMutex.RUnlock() + fake.pickOrdererMutex.RLock() + defer fake.pickOrdererMutex.RUnlock() + fake.pickPeerMutex.RLock() + defer fake.pickPeerMutex.RUnlock() + fake.setConfigOrderersMutex.RLock() + defer fake.setConfigOrderersMutex.RUnlock() + fake.tLSClientAuthRequiredMutex.RLock() + defer fake.tLSClientAuthRequiredMutex.RUnlock() + fake.tLSClientCertFileMutex.RLock() + defer fake.tLSClientCertFileMutex.RUnlock() + fake.tLSClientKeyFileMutex.RLock() + defer fake.tLSClientKeyFileMutex.RUnlock() + fake.tLSEnabledMutex.RLock() + defer fake.tLSEnabledMutex.RUnlock() + fake.tLSServerHostOverrideMutex.RLock() + defer fake.tLSServerHostOverrideMutex.RUnlock() + fake.translatePathMutex.RLock() + defer fake.translatePathMutex.RUnlock() + fake.unmarshalKeyMutex.RLock() + defer fake.unmarshalKeyMutex.RUnlock() + fake.vaultPersistencePrefixMutex.RLock() + defer fake.vaultPersistencePrefixMutex.RUnlock() + fake.vaultPersistenceTypeMutex.RLock() + defer fake.vaultPersistenceTypeMutex.RUnlock() + fake.vaultTXStoreCacheSizeMutex.RLock() + defer fake.vaultTXStoreCacheSizeMutex.RUnlock() + copiedInvocations := map[string][][]interface{}{} + for key, value := range fake.invocations { + copiedInvocations[key] = value + } + return copiedInvocations +} + +func (fake *ConfigProvider) recordInvocation(key string, args []interface{}) { + fake.invocationsMutex.Lock() + defer fake.invocationsMutex.Unlock() + if fake.invocations == nil { + fake.invocations = map[string][][]interface{}{} + } + if fake.invocations[key] == nil { + fake.invocations[key] = [][]interface{}{} + } + fake.invocations[key] = append(fake.invocations[key], args) +} + +var _ driver.ConfigProvider = new(ConfigProvider) diff --git a/platform/fabric/core/generic/msp/idemix/loader.go b/platform/fabric/core/generic/msp/idemix/loader.go index 2100b3a0b..dac3c3354 100644 --- a/platform/fabric/core/generic/msp/idemix/loader.go +++ b/platform/fabric/core/generic/msp/idemix/loader.go @@ -20,14 +20,17 @@ const ( MSPType = "idemix" ) -type IdentityLoader struct{} +type IdentityLoader struct { + KVS KVS + SignerService driver.SignerService +} func (i *IdentityLoader) Load(manager driver.Manager, c config.MSP) error { conf, err := msp2.GetLocalMspConfigWithType(manager.Config().TranslatePath(c.Path), nil, c.MSPID, c.MSPType) if err != nil { return errors.Wrapf(err, "failed reading idemix msp configuration from [%s]", manager.Config().TranslatePath(c.Path)) } - provider, err := NewProviderWithAnyPolicy(conf, manager.ServiceProvider()) + provider, err := NewProviderWithAnyPolicy(conf, i.KVS, i.SignerService) if err != nil { return errors.Wrapf(err, "failed instantiating idemix msp provider from [%s]", manager.Config().TranslatePath(c.Path)) } diff --git a/platform/fabric/core/generic/msp/idemix/provider.go b/platform/fabric/core/generic/msp/idemix/provider.go index f1897503f..e79ae383f 100644 --- a/platform/fabric/core/generic/msp/idemix/provider.go +++ b/platform/fabric/core/generic/msp/idemix/provider.go @@ -8,25 +8,21 @@ package idemix import ( "fmt" - "reflect" "strconv" - "go.uber.org/zap/zapcore" - "github.com/IBM/idemix" bccsp "github.com/IBM/idemix/bccsp/types" "github.com/IBM/idemix/idemixmsp" math "github.com/IBM/mathlib" "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" - driver2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/driver" + mspdriver "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/hash" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs" "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" m "github.com/hyperledger/fabric-protos-go/msp" "github.com/pkg/errors" + "go.uber.org/zap/zapcore" ) var logger = flogging.MustGetLogger("fabric-sdk.msp.idemix") @@ -40,65 +36,59 @@ const ( Any bccsp.SignatureType = 100 ) -type SignerService interface { - RegisterSigner(identity view.Identity, signer driver.Signer, verifier driver.Verifier) error -} - -func GetSignerService(ctx view2.ServiceProvider) SignerService { - s, err := ctx.GetService(reflect.TypeOf((*SignerService)(nil))) - if err != nil { - panic(err) - } - return s.(SignerService) +type KVS interface { + Exists(id string) bool + Put(id string, state interface{}) error + Get(id string, state interface{}) error } type Provider struct { *Idemix userKey bccsp.Key conf idemixmsp.IdemixMSPConfig - SignerService SignerService + SignerService mspdriver.SignerService sigType bccsp.SignatureType verType bccsp.VerificationType } -func NewProviderWithEidRhNymPolicy(conf1 *m.MSPConfig, sp view2.ServiceProvider) (*Provider, error) { - return NewProviderWithSigType(conf1, sp, bccsp.EidNymRhNym) +func NewProviderWithEidRhNymPolicy(conf1 *m.MSPConfig, KVS KVS, sp mspdriver.SignerService) (*Provider, error) { + return NewProviderWithSigType(conf1, KVS, sp, bccsp.EidNymRhNym) } -func NewProviderWithStandardPolicy(conf1 *m.MSPConfig, sp view2.ServiceProvider) (*Provider, error) { - return NewProviderWithSigType(conf1, sp, bccsp.Standard) +func NewProviderWithStandardPolicy(conf1 *m.MSPConfig, KVS KVS, sp mspdriver.SignerService) (*Provider, error) { + return NewProviderWithSigType(conf1, KVS, sp, bccsp.Standard) } -func NewProviderWithAnyPolicy(conf1 *m.MSPConfig, sp view2.ServiceProvider) (*Provider, error) { - return NewProviderWithSigType(conf1, sp, Any) +func NewProviderWithAnyPolicy(conf1 *m.MSPConfig, KVS KVS, sp mspdriver.SignerService) (*Provider, error) { + return NewProviderWithSigType(conf1, KVS, sp, Any) } -func NewProviderWithAnyPolicyAndCurve(conf1 *m.MSPConfig, sp view2.ServiceProvider, curveID math.CurveID) (*Provider, error) { - cryptoProvider, err := NewKSVBCCSP(kvs.GetService(sp), curveID, false) +func NewProviderWithAnyPolicyAndCurve(conf1 *m.MSPConfig, KVS KVS, sp mspdriver.SignerService, curveID math.CurveID) (*Provider, error) { + cryptoProvider, err := NewKSVBCCSP(KVS, curveID, false) if err != nil { return nil, err } - return NewProvider(conf1, GetSignerService(sp), Any, cryptoProvider) + return NewProvider(conf1, sp, Any, cryptoProvider) } -func NewProviderWithSigType(conf1 *m.MSPConfig, sp view2.ServiceProvider, sigType bccsp.SignatureType) (*Provider, error) { - cryptoProvider, err := NewKSVBCCSP(kvs.GetService(sp), math.FP256BN_AMCL, false) +func NewProviderWithSigType(conf1 *m.MSPConfig, KVS KVS, sp mspdriver.SignerService, sigType bccsp.SignatureType) (*Provider, error) { + cryptoProvider, err := NewKSVBCCSP(KVS, math.FP256BN_AMCL, false) if err != nil { return nil, err } - return NewProvider(conf1, GetSignerService(sp), sigType, cryptoProvider) + return NewProvider(conf1, sp, sigType, cryptoProvider) } -func NewProviderWithSigTypeAncCurve(conf1 *m.MSPConfig, sp view2.ServiceProvider, sigType bccsp.SignatureType, curveID math.CurveID) (*Provider, error) { - cryptoProvider, err := NewKSVBCCSP(kvs.GetService(sp), curveID, false) +func NewProviderWithSigTypeAncCurve(conf1 *m.MSPConfig, KVS KVS, sp mspdriver.SignerService, sigType bccsp.SignatureType, curveID math.CurveID) (*Provider, error) { + cryptoProvider, err := NewKSVBCCSP(KVS, curveID, false) if err != nil { return nil, err } - return NewProvider(conf1, GetSignerService(sp), sigType, cryptoProvider) + return NewProvider(conf1, sp, sigType, cryptoProvider) } -func NewProvider(conf1 *m.MSPConfig, signerService SignerService, sigType bccsp.SignatureType, cryptoProvider bccsp.BCCSP) (*Provider, error) { +func NewProvider(conf1 *m.MSPConfig, signerService mspdriver.SignerService, sigType bccsp.SignatureType, cryptoProvider bccsp.BCCSP) (*Provider, error) { logger.Debugf("Setting up Idemix-based MSP instance") if conf1 == nil { @@ -215,7 +205,7 @@ func NewProvider(conf1 *m.MSPConfig, signerService SignerService, sigType bccsp. }, nil } -func (p *Provider) Identity(opts *driver2.IdentityOptions) (view.Identity, []byte, error) { +func (p *Provider) Identity(opts *driver.IdentityOptions) (view.Identity, []byte, error) { // Derive NymPublicKey nymKey, err := p.Csp.KeyDeriv( p.userKey, diff --git a/platform/fabric/core/generic/msp/idemix/provider_test.go b/platform/fabric/core/generic/msp/idemix/provider_test.go index 88bdae53a..8bef2b2db 100644 --- a/platform/fabric/core/generic/msp/idemix/provider_test.go +++ b/platform/fabric/core/generic/msp/idemix/provider_test.go @@ -13,8 +13,8 @@ import ( bccsp "github.com/IBM/idemix/bccsp/types" math "github.com/IBM/mathlib" idemix2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/idemix" + sig2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/sig" driver2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - sig2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/core/sig" _ "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/db/driver/memory" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs/mock" @@ -29,21 +29,21 @@ func TestProvider(t *testing.T) { kvss, err := kvs.NewWithConfig(registry, "memory", "", &mock.ConfigProvider{}) assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - sigService := sig2.NewSignService(registry, nil, kvss) + sigService := sig2.NewService(sig2.NewMultiplexDeserializer(), kvss) assert.NoError(t, registry.RegisterService(sigService)) config, err := msp2.GetLocalMspConfigWithType("./testdata/idemix", nil, "idemix", "idemix") assert.NoError(t, err) - p, err := idemix2.NewProviderWithEidRhNymPolicy(config, registry) + p, err := idemix2.NewProviderWithEidRhNymPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p) - p, err = idemix2.NewProviderWithSigType(config, registry, bccsp.Standard) + p, err = idemix2.NewProviderWithSigType(config, kvss, sigService, bccsp.Standard) assert.NoError(t, err) assert.NotNil(t, p) - p, err = idemix2.NewProviderWithSigType(config, registry, bccsp.EidNymRhNym) + p, err = idemix2.NewProviderWithSigType(config, kvss, sigService, bccsp.EidNymRhNym) assert.NoError(t, err) assert.NotNil(t, p) } @@ -54,13 +54,13 @@ func TestIdentityWithEidRhNymPolicy(t *testing.T) { kvss, err := kvs.NewWithConfig(registry, "memory", "", &mock.ConfigProvider{}) assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - sigService := sig2.NewSignService(registry, nil, kvss) + sigService := sig2.NewService(sig2.NewMultiplexDeserializer(), kvss) assert.NoError(t, registry.RegisterService(sigService)) config, err := msp2.GetLocalMspConfigWithType("./testdata/idemix", nil, "idemix", "idemix") assert.NoError(t, err) - p, err := idemix2.NewProviderWithEidRhNymPolicy(config, registry) + p, err := idemix2.NewProviderWithEidRhNymPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p) @@ -86,7 +86,7 @@ func TestIdentityWithEidRhNymPolicy(t *testing.T) { assert.NoError(t, err) assert.NoError(t, verifier.Verify([]byte("hello world!!!"), sigma)) - p, err = idemix2.NewProviderWithAnyPolicy(config, registry) + p, err = idemix2.NewProviderWithAnyPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p) @@ -119,13 +119,13 @@ func TestIdentityStandard(t *testing.T) { kvss, err := kvs.NewWithConfig(registry, "memory", "", &mock.ConfigProvider{}) assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - sigService := sig2.NewSignService(registry, nil, kvss) + sigService := sig2.NewService(sig2.NewMultiplexDeserializer(), kvss) assert.NoError(t, registry.RegisterService(sigService)) config, err := msp2.GetLocalMspConfigWithType("./testdata/idemix", nil, "idemix", "idemix") assert.NoError(t, err) - p, err := idemix2.NewProviderWithSigType(config, registry, bccsp.Standard) + p, err := idemix2.NewProviderWithSigType(config, kvss, sigService, bccsp.Standard) assert.NoError(t, err) assert.NotNil(t, p) @@ -143,7 +143,7 @@ func TestIdentityStandard(t *testing.T) { assert.NoError(t, err) assert.NoError(t, verifier.Verify([]byte("hello world!!!"), sigma)) - p, err = idemix2.NewProviderWithStandardPolicy(config, registry) + p, err = idemix2.NewProviderWithStandardPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p) @@ -161,7 +161,7 @@ func TestIdentityStandard(t *testing.T) { assert.NoError(t, err) assert.NoError(t, verifier.Verify([]byte("hello world!!!"), sigma)) - p, err = idemix2.NewProviderWithAnyPolicy(config, registry) + p, err = idemix2.NewProviderWithAnyPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p) @@ -186,18 +186,18 @@ func TestAuditWithEidRhNymPolicy(t *testing.T) { kvss, err := kvs.NewWithConfig(registry, "memory", "", &mock.ConfigProvider{}) assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - sigService := sig2.NewSignService(registry, nil, kvss) + sigService := sig2.NewService(sig2.NewMultiplexDeserializer(), kvss) assert.NoError(t, registry.RegisterService(sigService)) config, err := msp2.GetLocalMspConfigWithType("./testdata/idemix", nil, "idemix", "idemix") assert.NoError(t, err) - p, err := idemix2.NewProviderWithEidRhNymPolicy(config, registry) + p, err := idemix2.NewProviderWithEidRhNymPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p) config, err = msp2.GetLocalMspConfigWithType("./testdata/idemix2", nil, "idemix", "idemix") assert.NoError(t, err) - p2, err := idemix2.NewProviderWithEidRhNymPolicy(config, registry) + p2, err := idemix2.NewProviderWithEidRhNymPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p2) @@ -228,18 +228,18 @@ func TestProvider_DeserializeSigner(t *testing.T) { kvss, err := kvs.NewWithConfig(registry, "memory", "", &mock.ConfigProvider{}) assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - sigService := sig2.NewSignService(registry, nil, kvss) + sigService := sig2.NewService(sig2.NewMultiplexDeserializer(), kvss) assert.NoError(t, registry.RegisterService(sigService)) config, err := msp2.GetLocalMspConfigWithType("./testdata/sameissuer/idemix", nil, "idemix", "idemix") assert.NoError(t, err) - p, err := idemix2.NewProviderWithEidRhNymPolicy(config, registry) + p, err := idemix2.NewProviderWithEidRhNymPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p) config, err = msp2.GetLocalMspConfigWithType("./testdata/sameissuer/idemix2", nil, "idemix", "idemix") assert.NoError(t, err) - p2, err := idemix2.NewProviderWithEidRhNymPolicy(config, registry) + p2, err := idemix2.NewProviderWithEidRhNymPolicy(config, kvss, sigService) assert.NoError(t, err) assert.NotNil(t, p2) @@ -266,8 +266,7 @@ func TestProvider_DeserializeSigner(t *testing.T) { assert.NoError(t, err) // this must work - des, err := sig2.NewMultiplexDeserializer(registry) - assert.NoError(t, err) + des := sig2.NewMultiplexDeserializer() des.AddDeserializer(p) des.AddDeserializer(p2) signer, err = des.DeserializeSigner(id) @@ -285,13 +284,13 @@ func TestIdentityFromFabricCA(t *testing.T) { kvss, err := kvs.NewWithConfig(registry, "memory", "", &mock.ConfigProvider{}) assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - sigService := sig2.NewSignService(registry, nil, kvss) + sigService := sig2.NewService(sig2.NewMultiplexDeserializer(), kvss) assert.NoError(t, registry.RegisterService(sigService)) config, err := idemix2.GetLocalMspConfigWithType("./testdata/charlie.ExtraId2", "charlie.ExtraId2") assert.NoError(t, err) - p, err := idemix2.NewProviderWithSigTypeAncCurve(config, registry, bccsp.Standard, math.BN254) + p, err := idemix2.NewProviderWithSigTypeAncCurve(config, kvss, sigService, bccsp.Standard, math.BN254) assert.NoError(t, err) assert.NotNil(t, p) @@ -309,7 +308,7 @@ func TestIdentityFromFabricCA(t *testing.T) { assert.NoError(t, err) assert.NoError(t, verifier.Verify([]byte("hello world!!!"), sigma)) - p, err = idemix2.NewProviderWithSigTypeAncCurve(config, registry, bccsp.Standard, math.BN254) + p, err = idemix2.NewProviderWithSigTypeAncCurve(config, kvss, sigService, bccsp.Standard, math.BN254) assert.NoError(t, err) assert.NotNil(t, p) @@ -327,7 +326,7 @@ func TestIdentityFromFabricCA(t *testing.T) { assert.NoError(t, err) assert.NoError(t, verifier.Verify([]byte("hello world!!!"), sigma)) - p, err = idemix2.NewProviderWithSigTypeAncCurve(config, registry, idemix2.Any, math.BN254) + p, err = idemix2.NewProviderWithSigTypeAncCurve(config, kvss, sigService, idemix2.Any, math.BN254) assert.NoError(t, err) assert.NotNil(t, p) @@ -352,13 +351,13 @@ func TestIdentityFromFabricCAWithEidRhNymPolicy(t *testing.T) { kvss, err := kvs.NewWithConfig(registry, "memory", "", &mock.ConfigProvider{}) assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - sigService := sig2.NewSignService(registry, nil, kvss) + sigService := sig2.NewService(sig2.NewMultiplexDeserializer(), kvss) assert.NoError(t, registry.RegisterService(sigService)) config, err := idemix2.GetLocalMspConfigWithType("./testdata/charlie.ExtraId2", "charlie.ExtraId2") assert.NoError(t, err) - p, err := idemix2.NewProviderWithSigTypeAncCurve(config, registry, bccsp.EidNymRhNym, math.BN254) + p, err := idemix2.NewProviderWithSigTypeAncCurve(config, kvss, sigService, bccsp.EidNymRhNym, math.BN254) assert.NoError(t, err) assert.NotNil(t, p) @@ -386,7 +385,7 @@ func TestIdentityFromFabricCAWithEidRhNymPolicy(t *testing.T) { assert.NoError(t, err) assert.NoError(t, verifier.Verify([]byte("hello world!!!"), sigma)) - p, err = idemix2.NewProviderWithAnyPolicyAndCurve(config, registry, math.BN254) + p, err = idemix2.NewProviderWithAnyPolicyAndCurve(config, kvss, sigService, math.BN254) assert.NoError(t, err) assert.NotNil(t, p) diff --git a/platform/fabric/core/generic/msp/mock/config_provider.go b/platform/fabric/core/generic/msp/mock/config_provider.go deleted file mode 100644 index 51fcace96..000000000 --- a/platform/fabric/core/generic/msp/mock/config_provider.go +++ /dev/null @@ -1,771 +0,0 @@ -// Code generated by counterfeiter. DO NOT EDIT. -package mock - -import ( - "sync" - "time" - - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/driver" -) - -type ConfigProvider struct { - ConfigFileUsedStub func() string - configFileUsedMutex sync.RWMutex - configFileUsedArgsForCall []struct { - } - configFileUsedReturns struct { - result1 string - } - configFileUsedReturnsOnCall map[int]struct { - result1 string - } - GetBoolStub func(string) bool - getBoolMutex sync.RWMutex - getBoolArgsForCall []struct { - arg1 string - } - getBoolReturns struct { - result1 bool - } - getBoolReturnsOnCall map[int]struct { - result1 bool - } - GetDurationStub func(string) time.Duration - getDurationMutex sync.RWMutex - getDurationArgsForCall []struct { - arg1 string - } - getDurationReturns struct { - result1 time.Duration - } - getDurationReturnsOnCall map[int]struct { - result1 time.Duration - } - GetIntStub func(string) int - getIntMutex sync.RWMutex - getIntArgsForCall []struct { - arg1 string - } - getIntReturns struct { - result1 int - } - getIntReturnsOnCall map[int]struct { - result1 int - } - GetPathStub func(string) string - getPathMutex sync.RWMutex - getPathArgsForCall []struct { - arg1 string - } - getPathReturns struct { - result1 string - } - getPathReturnsOnCall map[int]struct { - result1 string - } - GetStringStub func(string) string - getStringMutex sync.RWMutex - getStringArgsForCall []struct { - arg1 string - } - getStringReturns struct { - result1 string - } - getStringReturnsOnCall map[int]struct { - result1 string - } - GetStringSliceStub func(string) []string - getStringSliceMutex sync.RWMutex - getStringSliceArgsForCall []struct { - arg1 string - } - getStringSliceReturns struct { - result1 []string - } - getStringSliceReturnsOnCall map[int]struct { - result1 []string - } - IsSetStub func(string) bool - isSetMutex sync.RWMutex - isSetArgsForCall []struct { - arg1 string - } - isSetReturns struct { - result1 bool - } - isSetReturnsOnCall map[int]struct { - result1 bool - } - TranslatePathStub func(string) string - translatePathMutex sync.RWMutex - translatePathArgsForCall []struct { - arg1 string - } - translatePathReturns struct { - result1 string - } - translatePathReturnsOnCall map[int]struct { - result1 string - } - UnmarshalKeyStub func(string, interface{}) error - unmarshalKeyMutex sync.RWMutex - unmarshalKeyArgsForCall []struct { - arg1 string - arg2 interface{} - } - unmarshalKeyReturns struct { - result1 error - } - unmarshalKeyReturnsOnCall map[int]struct { - result1 error - } - invocations map[string][][]interface{} - invocationsMutex sync.RWMutex -} - -func (fake *ConfigProvider) ConfigFileUsed() string { - fake.configFileUsedMutex.Lock() - ret, specificReturn := fake.configFileUsedReturnsOnCall[len(fake.configFileUsedArgsForCall)] - fake.configFileUsedArgsForCall = append(fake.configFileUsedArgsForCall, struct { - }{}) - stub := fake.ConfigFileUsedStub - fakeReturns := fake.configFileUsedReturns - fake.recordInvocation("ConfigFileUsed", []interface{}{}) - fake.configFileUsedMutex.Unlock() - if stub != nil { - return stub() - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) ConfigFileUsedCallCount() int { - fake.configFileUsedMutex.RLock() - defer fake.configFileUsedMutex.RUnlock() - return len(fake.configFileUsedArgsForCall) -} - -func (fake *ConfigProvider) ConfigFileUsedCalls(stub func() string) { - fake.configFileUsedMutex.Lock() - defer fake.configFileUsedMutex.Unlock() - fake.ConfigFileUsedStub = stub -} - -func (fake *ConfigProvider) ConfigFileUsedReturns(result1 string) { - fake.configFileUsedMutex.Lock() - defer fake.configFileUsedMutex.Unlock() - fake.ConfigFileUsedStub = nil - fake.configFileUsedReturns = struct { - result1 string - }{result1} -} - -func (fake *ConfigProvider) ConfigFileUsedReturnsOnCall(i int, result1 string) { - fake.configFileUsedMutex.Lock() - defer fake.configFileUsedMutex.Unlock() - fake.ConfigFileUsedStub = nil - if fake.configFileUsedReturnsOnCall == nil { - fake.configFileUsedReturnsOnCall = make(map[int]struct { - result1 string - }) - } - fake.configFileUsedReturnsOnCall[i] = struct { - result1 string - }{result1} -} - -func (fake *ConfigProvider) GetBool(arg1 string) bool { - fake.getBoolMutex.Lock() - ret, specificReturn := fake.getBoolReturnsOnCall[len(fake.getBoolArgsForCall)] - fake.getBoolArgsForCall = append(fake.getBoolArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.GetBoolStub - fakeReturns := fake.getBoolReturns - fake.recordInvocation("GetBool", []interface{}{arg1}) - fake.getBoolMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) GetBoolCallCount() int { - fake.getBoolMutex.RLock() - defer fake.getBoolMutex.RUnlock() - return len(fake.getBoolArgsForCall) -} - -func (fake *ConfigProvider) GetBoolCalls(stub func(string) bool) { - fake.getBoolMutex.Lock() - defer fake.getBoolMutex.Unlock() - fake.GetBoolStub = stub -} - -func (fake *ConfigProvider) GetBoolArgsForCall(i int) string { - fake.getBoolMutex.RLock() - defer fake.getBoolMutex.RUnlock() - argsForCall := fake.getBoolArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *ConfigProvider) GetBoolReturns(result1 bool) { - fake.getBoolMutex.Lock() - defer fake.getBoolMutex.Unlock() - fake.GetBoolStub = nil - fake.getBoolReturns = struct { - result1 bool - }{result1} -} - -func (fake *ConfigProvider) GetBoolReturnsOnCall(i int, result1 bool) { - fake.getBoolMutex.Lock() - defer fake.getBoolMutex.Unlock() - fake.GetBoolStub = nil - if fake.getBoolReturnsOnCall == nil { - fake.getBoolReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.getBoolReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - -func (fake *ConfigProvider) GetDuration(arg1 string) time.Duration { - fake.getDurationMutex.Lock() - ret, specificReturn := fake.getDurationReturnsOnCall[len(fake.getDurationArgsForCall)] - fake.getDurationArgsForCall = append(fake.getDurationArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.GetDurationStub - fakeReturns := fake.getDurationReturns - fake.recordInvocation("GetDuration", []interface{}{arg1}) - fake.getDurationMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) GetDurationCallCount() int { - fake.getDurationMutex.RLock() - defer fake.getDurationMutex.RUnlock() - return len(fake.getDurationArgsForCall) -} - -func (fake *ConfigProvider) GetDurationCalls(stub func(string) time.Duration) { - fake.getDurationMutex.Lock() - defer fake.getDurationMutex.Unlock() - fake.GetDurationStub = stub -} - -func (fake *ConfigProvider) GetDurationArgsForCall(i int) string { - fake.getDurationMutex.RLock() - defer fake.getDurationMutex.RUnlock() - argsForCall := fake.getDurationArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *ConfigProvider) GetDurationReturns(result1 time.Duration) { - fake.getDurationMutex.Lock() - defer fake.getDurationMutex.Unlock() - fake.GetDurationStub = nil - fake.getDurationReturns = struct { - result1 time.Duration - }{result1} -} - -func (fake *ConfigProvider) GetDurationReturnsOnCall(i int, result1 time.Duration) { - fake.getDurationMutex.Lock() - defer fake.getDurationMutex.Unlock() - fake.GetDurationStub = nil - if fake.getDurationReturnsOnCall == nil { - fake.getDurationReturnsOnCall = make(map[int]struct { - result1 time.Duration - }) - } - fake.getDurationReturnsOnCall[i] = struct { - result1 time.Duration - }{result1} -} - -func (fake *ConfigProvider) GetInt(arg1 string) int { - fake.getIntMutex.Lock() - ret, specificReturn := fake.getIntReturnsOnCall[len(fake.getIntArgsForCall)] - fake.getIntArgsForCall = append(fake.getIntArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.GetIntStub - fakeReturns := fake.getIntReturns - fake.recordInvocation("GetInt", []interface{}{arg1}) - fake.getIntMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) GetIntCallCount() int { - fake.getIntMutex.RLock() - defer fake.getIntMutex.RUnlock() - return len(fake.getIntArgsForCall) -} - -func (fake *ConfigProvider) GetIntCalls(stub func(string) int) { - fake.getIntMutex.Lock() - defer fake.getIntMutex.Unlock() - fake.GetIntStub = stub -} - -func (fake *ConfigProvider) GetIntArgsForCall(i int) string { - fake.getIntMutex.RLock() - defer fake.getIntMutex.RUnlock() - argsForCall := fake.getIntArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *ConfigProvider) GetIntReturns(result1 int) { - fake.getIntMutex.Lock() - defer fake.getIntMutex.Unlock() - fake.GetIntStub = nil - fake.getIntReturns = struct { - result1 int - }{result1} -} - -func (fake *ConfigProvider) GetIntReturnsOnCall(i int, result1 int) { - fake.getIntMutex.Lock() - defer fake.getIntMutex.Unlock() - fake.GetIntStub = nil - if fake.getIntReturnsOnCall == nil { - fake.getIntReturnsOnCall = make(map[int]struct { - result1 int - }) - } - fake.getIntReturnsOnCall[i] = struct { - result1 int - }{result1} -} - -func (fake *ConfigProvider) GetPath(arg1 string) string { - fake.getPathMutex.Lock() - ret, specificReturn := fake.getPathReturnsOnCall[len(fake.getPathArgsForCall)] - fake.getPathArgsForCall = append(fake.getPathArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.GetPathStub - fakeReturns := fake.getPathReturns - fake.recordInvocation("GetPath", []interface{}{arg1}) - fake.getPathMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) GetPathCallCount() int { - fake.getPathMutex.RLock() - defer fake.getPathMutex.RUnlock() - return len(fake.getPathArgsForCall) -} - -func (fake *ConfigProvider) GetPathCalls(stub func(string) string) { - fake.getPathMutex.Lock() - defer fake.getPathMutex.Unlock() - fake.GetPathStub = stub -} - -func (fake *ConfigProvider) GetPathArgsForCall(i int) string { - fake.getPathMutex.RLock() - defer fake.getPathMutex.RUnlock() - argsForCall := fake.getPathArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *ConfigProvider) GetPathReturns(result1 string) { - fake.getPathMutex.Lock() - defer fake.getPathMutex.Unlock() - fake.GetPathStub = nil - fake.getPathReturns = struct { - result1 string - }{result1} -} - -func (fake *ConfigProvider) GetPathReturnsOnCall(i int, result1 string) { - fake.getPathMutex.Lock() - defer fake.getPathMutex.Unlock() - fake.GetPathStub = nil - if fake.getPathReturnsOnCall == nil { - fake.getPathReturnsOnCall = make(map[int]struct { - result1 string - }) - } - fake.getPathReturnsOnCall[i] = struct { - result1 string - }{result1} -} - -func (fake *ConfigProvider) GetString(arg1 string) string { - fake.getStringMutex.Lock() - ret, specificReturn := fake.getStringReturnsOnCall[len(fake.getStringArgsForCall)] - fake.getStringArgsForCall = append(fake.getStringArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.GetStringStub - fakeReturns := fake.getStringReturns - fake.recordInvocation("GetString", []interface{}{arg1}) - fake.getStringMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) GetStringCallCount() int { - fake.getStringMutex.RLock() - defer fake.getStringMutex.RUnlock() - return len(fake.getStringArgsForCall) -} - -func (fake *ConfigProvider) GetStringCalls(stub func(string) string) { - fake.getStringMutex.Lock() - defer fake.getStringMutex.Unlock() - fake.GetStringStub = stub -} - -func (fake *ConfigProvider) GetStringArgsForCall(i int) string { - fake.getStringMutex.RLock() - defer fake.getStringMutex.RUnlock() - argsForCall := fake.getStringArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *ConfigProvider) GetStringReturns(result1 string) { - fake.getStringMutex.Lock() - defer fake.getStringMutex.Unlock() - fake.GetStringStub = nil - fake.getStringReturns = struct { - result1 string - }{result1} -} - -func (fake *ConfigProvider) GetStringReturnsOnCall(i int, result1 string) { - fake.getStringMutex.Lock() - defer fake.getStringMutex.Unlock() - fake.GetStringStub = nil - if fake.getStringReturnsOnCall == nil { - fake.getStringReturnsOnCall = make(map[int]struct { - result1 string - }) - } - fake.getStringReturnsOnCall[i] = struct { - result1 string - }{result1} -} - -func (fake *ConfigProvider) GetStringSlice(arg1 string) []string { - fake.getStringSliceMutex.Lock() - ret, specificReturn := fake.getStringSliceReturnsOnCall[len(fake.getStringSliceArgsForCall)] - fake.getStringSliceArgsForCall = append(fake.getStringSliceArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.GetStringSliceStub - fakeReturns := fake.getStringSliceReturns - fake.recordInvocation("GetStringSlice", []interface{}{arg1}) - fake.getStringSliceMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) GetStringSliceCallCount() int { - fake.getStringSliceMutex.RLock() - defer fake.getStringSliceMutex.RUnlock() - return len(fake.getStringSliceArgsForCall) -} - -func (fake *ConfigProvider) GetStringSliceCalls(stub func(string) []string) { - fake.getStringSliceMutex.Lock() - defer fake.getStringSliceMutex.Unlock() - fake.GetStringSliceStub = stub -} - -func (fake *ConfigProvider) GetStringSliceArgsForCall(i int) string { - fake.getStringSliceMutex.RLock() - defer fake.getStringSliceMutex.RUnlock() - argsForCall := fake.getStringSliceArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *ConfigProvider) GetStringSliceReturns(result1 []string) { - fake.getStringSliceMutex.Lock() - defer fake.getStringSliceMutex.Unlock() - fake.GetStringSliceStub = nil - fake.getStringSliceReturns = struct { - result1 []string - }{result1} -} - -func (fake *ConfigProvider) GetStringSliceReturnsOnCall(i int, result1 []string) { - fake.getStringSliceMutex.Lock() - defer fake.getStringSliceMutex.Unlock() - fake.GetStringSliceStub = nil - if fake.getStringSliceReturnsOnCall == nil { - fake.getStringSliceReturnsOnCall = make(map[int]struct { - result1 []string - }) - } - fake.getStringSliceReturnsOnCall[i] = struct { - result1 []string - }{result1} -} - -func (fake *ConfigProvider) IsSet(arg1 string) bool { - fake.isSetMutex.Lock() - ret, specificReturn := fake.isSetReturnsOnCall[len(fake.isSetArgsForCall)] - fake.isSetArgsForCall = append(fake.isSetArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.IsSetStub - fakeReturns := fake.isSetReturns - fake.recordInvocation("IsSet", []interface{}{arg1}) - fake.isSetMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) IsSetCallCount() int { - fake.isSetMutex.RLock() - defer fake.isSetMutex.RUnlock() - return len(fake.isSetArgsForCall) -} - -func (fake *ConfigProvider) IsSetCalls(stub func(string) bool) { - fake.isSetMutex.Lock() - defer fake.isSetMutex.Unlock() - fake.IsSetStub = stub -} - -func (fake *ConfigProvider) IsSetArgsForCall(i int) string { - fake.isSetMutex.RLock() - defer fake.isSetMutex.RUnlock() - argsForCall := fake.isSetArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *ConfigProvider) IsSetReturns(result1 bool) { - fake.isSetMutex.Lock() - defer fake.isSetMutex.Unlock() - fake.IsSetStub = nil - fake.isSetReturns = struct { - result1 bool - }{result1} -} - -func (fake *ConfigProvider) IsSetReturnsOnCall(i int, result1 bool) { - fake.isSetMutex.Lock() - defer fake.isSetMutex.Unlock() - fake.IsSetStub = nil - if fake.isSetReturnsOnCall == nil { - fake.isSetReturnsOnCall = make(map[int]struct { - result1 bool - }) - } - fake.isSetReturnsOnCall[i] = struct { - result1 bool - }{result1} -} - -func (fake *ConfigProvider) TranslatePath(arg1 string) string { - fake.translatePathMutex.Lock() - ret, specificReturn := fake.translatePathReturnsOnCall[len(fake.translatePathArgsForCall)] - fake.translatePathArgsForCall = append(fake.translatePathArgsForCall, struct { - arg1 string - }{arg1}) - stub := fake.TranslatePathStub - fakeReturns := fake.translatePathReturns - fake.recordInvocation("TranslatePath", []interface{}{arg1}) - fake.translatePathMutex.Unlock() - if stub != nil { - return stub(arg1) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) TranslatePathCallCount() int { - fake.translatePathMutex.RLock() - defer fake.translatePathMutex.RUnlock() - return len(fake.translatePathArgsForCall) -} - -func (fake *ConfigProvider) TranslatePathCalls(stub func(string) string) { - fake.translatePathMutex.Lock() - defer fake.translatePathMutex.Unlock() - fake.TranslatePathStub = stub -} - -func (fake *ConfigProvider) TranslatePathArgsForCall(i int) string { - fake.translatePathMutex.RLock() - defer fake.translatePathMutex.RUnlock() - argsForCall := fake.translatePathArgsForCall[i] - return argsForCall.arg1 -} - -func (fake *ConfigProvider) TranslatePathReturns(result1 string) { - fake.translatePathMutex.Lock() - defer fake.translatePathMutex.Unlock() - fake.TranslatePathStub = nil - fake.translatePathReturns = struct { - result1 string - }{result1} -} - -func (fake *ConfigProvider) TranslatePathReturnsOnCall(i int, result1 string) { - fake.translatePathMutex.Lock() - defer fake.translatePathMutex.Unlock() - fake.TranslatePathStub = nil - if fake.translatePathReturnsOnCall == nil { - fake.translatePathReturnsOnCall = make(map[int]struct { - result1 string - }) - } - fake.translatePathReturnsOnCall[i] = struct { - result1 string - }{result1} -} - -func (fake *ConfigProvider) UnmarshalKey(arg1 string, arg2 interface{}) error { - fake.unmarshalKeyMutex.Lock() - ret, specificReturn := fake.unmarshalKeyReturnsOnCall[len(fake.unmarshalKeyArgsForCall)] - fake.unmarshalKeyArgsForCall = append(fake.unmarshalKeyArgsForCall, struct { - arg1 string - arg2 interface{} - }{arg1, arg2}) - stub := fake.UnmarshalKeyStub - fakeReturns := fake.unmarshalKeyReturns - fake.recordInvocation("UnmarshalKey", []interface{}{arg1, arg2}) - fake.unmarshalKeyMutex.Unlock() - if stub != nil { - return stub(arg1, arg2) - } - if specificReturn { - return ret.result1 - } - return fakeReturns.result1 -} - -func (fake *ConfigProvider) UnmarshalKeyCallCount() int { - fake.unmarshalKeyMutex.RLock() - defer fake.unmarshalKeyMutex.RUnlock() - return len(fake.unmarshalKeyArgsForCall) -} - -func (fake *ConfigProvider) UnmarshalKeyCalls(stub func(string, interface{}) error) { - fake.unmarshalKeyMutex.Lock() - defer fake.unmarshalKeyMutex.Unlock() - fake.UnmarshalKeyStub = stub -} - -func (fake *ConfigProvider) UnmarshalKeyArgsForCall(i int) (string, interface{}) { - fake.unmarshalKeyMutex.RLock() - defer fake.unmarshalKeyMutex.RUnlock() - argsForCall := fake.unmarshalKeyArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2 -} - -func (fake *ConfigProvider) UnmarshalKeyReturns(result1 error) { - fake.unmarshalKeyMutex.Lock() - defer fake.unmarshalKeyMutex.Unlock() - fake.UnmarshalKeyStub = nil - fake.unmarshalKeyReturns = struct { - result1 error - }{result1} -} - -func (fake *ConfigProvider) UnmarshalKeyReturnsOnCall(i int, result1 error) { - fake.unmarshalKeyMutex.Lock() - defer fake.unmarshalKeyMutex.Unlock() - fake.UnmarshalKeyStub = nil - if fake.unmarshalKeyReturnsOnCall == nil { - fake.unmarshalKeyReturnsOnCall = make(map[int]struct { - result1 error - }) - } - fake.unmarshalKeyReturnsOnCall[i] = struct { - result1 error - }{result1} -} - -func (fake *ConfigProvider) Invocations() map[string][][]interface{} { - fake.invocationsMutex.RLock() - defer fake.invocationsMutex.RUnlock() - fake.configFileUsedMutex.RLock() - defer fake.configFileUsedMutex.RUnlock() - fake.getBoolMutex.RLock() - defer fake.getBoolMutex.RUnlock() - fake.getDurationMutex.RLock() - defer fake.getDurationMutex.RUnlock() - fake.getIntMutex.RLock() - defer fake.getIntMutex.RUnlock() - fake.getPathMutex.RLock() - defer fake.getPathMutex.RUnlock() - fake.getStringMutex.RLock() - defer fake.getStringMutex.RUnlock() - fake.getStringSliceMutex.RLock() - defer fake.getStringSliceMutex.RUnlock() - fake.isSetMutex.RLock() - defer fake.isSetMutex.RUnlock() - fake.translatePathMutex.RLock() - defer fake.translatePathMutex.RUnlock() - fake.unmarshalKeyMutex.RLock() - defer fake.unmarshalKeyMutex.RUnlock() - copiedInvocations := map[string][][]interface{}{} - for key, value := range fake.invocations { - copiedInvocations[key] = value - } - return copiedInvocations -} - -func (fake *ConfigProvider) recordInvocation(key string, args []interface{}) { - fake.invocationsMutex.Lock() - defer fake.invocationsMutex.Unlock() - if fake.invocations == nil { - fake.invocations = map[string][][]interface{}{} - } - if fake.invocations[key] == nil { - fake.invocations[key] = [][]interface{}{} - } - fake.invocations[key] = append(fake.invocations[key], args) -} - -var _ driver.ConfigProvider = new(ConfigProvider) diff --git a/platform/fabric/core/generic/msp/service.go b/platform/fabric/core/generic/msp/service.go index 3c6ed97b2..13a420c75 100644 --- a/platform/fabric/core/generic/msp/service.go +++ b/platform/fabric/core/generic/msp/service.go @@ -8,15 +8,13 @@ package msp import ( "fmt" - "reflect" "sync" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/idemix" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/x509" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/sig" fdriver "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/core/sig" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" "github.com/hyperledger/fabric/msp" @@ -32,13 +30,20 @@ const ( var logger = flogging.MustGetLogger("fabric-sdk.msp") +type KVS interface { + Exists(id string) bool + Put(id string, state interface{}) error + Get(id string, state interface{}) error +} + type service struct { - sp view2.ServiceProvider defaultIdentity view.Identity defaultSigningIdentity driver.SigningIdentity signerService driver.SignerService binderService driver.BinderService + deserializerManager driver.DeserializerManager defaultViewIdentity view.Identity + KVS KVS config driver.Config mspsMutex sync.RWMutex @@ -53,18 +58,20 @@ type service struct { } func NewLocalMSPManager( - sp view2.ServiceProvider, config driver.Config, + KVS KVS, signerService driver.SignerService, binderService driver.BinderService, defaultViewIdentity view.Identity, + deserializerManager driver.DeserializerManager, cacheSize int, ) *service { s := &service{ - sp: sp, config: config, + KVS: KVS, signerService: signerService, binderService: binderService, + deserializerManager: deserializerManager, defaultViewIdentity: defaultViewIdentity, mspsByTypeAndName: map[string]*driver.MSP{}, bccspMspsByIdentity: map[string]*driver.MSP{}, @@ -75,13 +82,21 @@ func NewLocalMSPManager( } s.PutIdentityLoader(BccspMSP, &x509.IdentityLoader{}) s.PutIdentityLoader(BccspMSPFolder, &x509.FolderIdentityLoader{}) - s.PutIdentityLoader(IdemixMSP, &idemix.IdentityLoader{}) - s.PutIdentityLoader(IdemixMSPFolder, &idemix.FolderIdentityLoader{}) + s.PutIdentityLoader(IdemixMSP, &idemix.IdentityLoader{ + KVS: KVS, + SignerService: signerService, + }) + s.PutIdentityLoader(IdemixMSPFolder, &idemix.FolderIdentityLoader{ + IdentityLoader: &idemix.IdentityLoader{ + KVS: KVS, + SignerService: signerService, + }, + }) return s } func (s *service) AddDeserializer(deserializer sig.Deserializer) { - s.DeserializerManager().AddDeserializer(deserializer) + s.deserializerManager.AddDeserializer(deserializer) } func (s *service) Config() driver.Config { @@ -97,10 +112,6 @@ func (s *service) SignerService() driver.SignerService { return s.signerService } -func (s *service) ServiceProvider() view2.ServiceProvider { - return s.sp -} - func (s *service) CacheSize() int { return s.cacheSize } @@ -123,12 +134,9 @@ func (s *service) DefaultIdentity() view.Identity { func (s *service) AnonymousIdentity() view.Identity { id := s.Identity("idemix") - - es := view2.GetEndpointService(s.sp) - if err := es.Bind(s.defaultViewIdentity, id); err != nil { + if err := s.binderService.Bind(s.defaultViewIdentity, id); err != nil { panic(err) } - return id } @@ -141,7 +149,7 @@ func (s *service) Identity(label string) view.Identity { } func (s *service) IsMe(id view.Identity) bool { - return view2.GetSigService(s.sp).IsMe(id) + return s.signerService.IsMe(id) } func (s *service) DefaultSigningIdentity() fdriver.SigningIdentity { @@ -225,7 +233,7 @@ func (s *service) GetIdentityByID(id string) (view.Identity, error) { } } - identity, err := view2.GetEndpointService(s.sp).GetIdentity(id, nil) + identity, err := s.binderService.GetIdentity(id, nil) if err != nil { return nil, errors.Errorf("identity [%s] not found", id) } @@ -240,12 +248,12 @@ func (s *service) RegisterIdemixMSP(id string, path string, mspID string) error if err != nil { return errors.Wrapf(err, "failed reading idemix msp configuration from [%s]", path) } - provider, err := idemix.NewProviderWithAnyPolicy(conf, s.sp) + provider, err := idemix.NewProviderWithAnyPolicy(conf, s.KVS, s.signerService) if err != nil { return errors.Wrapf(err, "failed instantiating idemix msp provider from [%s]", path) } - s.DeserializerManager().AddDeserializer(provider) + s.deserializerManager.AddDeserializer(provider) s.AddMSP(id, IdemixMSP, provider.EnrollmentID(), idemix.NewIdentityCache(provider.Identity, s.cacheSize, nil).Identity) logger.Debugf("added IdemixMSP msp for id %s with cache of size %d", id+"@"+provider.EnrollmentID(), s.cacheSize) return nil @@ -260,7 +268,7 @@ func (s *service) RegisterX509MSP(id string, path string, mspID string) error { return errors.Wrapf(err, "failed instantiating idemix msp provider from [%s]", path) } - s.DeserializerManager().AddDeserializer(provider) + s.deserializerManager.AddDeserializer(provider) s.AddMSP(id, BccspMSP, provider.EnrollmentID(), provider.Identity) return nil @@ -348,14 +356,6 @@ func (s *service) Msps() []string { return res } -func (s *service) DeserializerManager() driver.DeserializerManager { - dm, err := s.sp.GetService(reflect.TypeOf((*driver.DeserializerManager)(nil))) - if err != nil { - panic(fmt.Sprintf("failed looking up deserializer manager [%s]", err)) - } - return dm.(driver.DeserializerManager) -} - func (s *service) loadLocalMSPs() error { configs, err := s.config.MSPs() if err != nil { @@ -383,7 +383,7 @@ func (s *service) loadLocalMSPs() error { } if s.defaultIdentity == nil { - return errors.Errorf("no default identity set for network [%s]", s.config.Name()) + return errors.Errorf("no default identity set for network [%s]", s.config.NetworkName()) } return nil diff --git a/platform/fabric/core/generic/msp/service_test.go b/platform/fabric/core/generic/msp/service_test.go index 9a802ba65..76315ba09 100644 --- a/platform/fabric/core/generic/msp/service_test.go +++ b/platform/fabric/core/generic/msp/service_test.go @@ -12,9 +12,9 @@ import ( config2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" msp2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp" - mock2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/mock" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/driver/mock" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/sig" "github.com/hyperledger-labs/fabric-smart-client/platform/view/core/config" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/core/sig" _ "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/db/driver/memory" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs" registry2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/registry" @@ -22,25 +22,22 @@ import ( "github.com/stretchr/testify/assert" ) -//go:generate counterfeiter -o mock/config_provider.go -fake-name ConfigProvider . ConfigProvider - func TestRegisterIdemixLocalMSP(t *testing.T) { registry := registry2.New() - cp := &mock2.ConfigProvider{} + cp := &mock.ConfigProvider{} cp.IsSetReturns(false) assert.NoError(t, registry.RegisterService(cp)) kvss, err := kvs.New(registry, "memory", "") assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - des, err := sig.NewMultiplexDeserializer(registry) - assert.NoError(t, err) + des := sig.NewMultiplexDeserializer() assert.NoError(t, registry.RegisterService(des)) - config, err := config2.New(cp, "default", true) + config, err := config2.NewService(cp, "default", true) assert.NoError(t, err) - mspService := msp2.NewLocalMSPManager(registry, config, nil, nil, nil, 100) + mspService := msp2.NewLocalMSPManager(config, kvss, nil, nil, nil, des, 100) assert.NoError(t, registry.RegisterService(mspService)) - sigService := sig.NewSignService(registry, nil, kvss) + sigService := sig.NewService(des, kvss) assert.NoError(t, registry.RegisterService(sigService)) assert.NoError(t, mspService.RegisterIdemixMSP("apple", "./idemix/testdata/idemix", "idemix")) @@ -64,14 +61,13 @@ func TestIdemixTypeFolder(t *testing.T) { kvss, err := kvs.New(registry, "memory", "") assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - des, err := sig.NewMultiplexDeserializer(registry) - assert.NoError(t, err) + des := sig.NewMultiplexDeserializer() assert.NoError(t, registry.RegisterService(des)) - config, err := config2.New(cp, "default", true) + config, err := config2.NewService(cp, "default", true) assert.NoError(t, err) - mspService := msp2.NewLocalMSPManager(registry, config, nil, nil, nil, 100) + mspService := msp2.NewLocalMSPManager(config, kvss, nil, nil, nil, des, 100) assert.NoError(t, registry.RegisterService(mspService)) - sigService := sig.NewSignService(registry, nil, kvss) + sigService := sig.NewService(des, kvss) assert.NoError(t, registry.RegisterService(sigService)) assert.NoError(t, mspService.Load()) @@ -85,20 +81,19 @@ func TestIdemixTypeFolder(t *testing.T) { func TestRegisterX509LocalMSP(t *testing.T) { registry := registry2.New() - cp := &mock2.ConfigProvider{} + cp := &mock.ConfigProvider{} cp.IsSetReturns(false) assert.NoError(t, registry.RegisterService(cp)) kvss, err := kvs.New(registry, "memory", "") assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - des, err := sig.NewMultiplexDeserializer(registry) - assert.NoError(t, err) + des := sig.NewMultiplexDeserializer() assert.NoError(t, registry.RegisterService(des)) - config, err := config2.New(cp, "default", true) + config, err := config2.NewService(cp, "default", true) assert.NoError(t, err) - mspService := msp2.NewLocalMSPManager(registry, config, nil, nil, nil, 100) + mspService := msp2.NewLocalMSPManager(config, kvss, nil, nil, nil, des, 100) assert.NoError(t, registry.RegisterService(mspService)) - sigService := sig.NewSignService(registry, nil, kvss) + sigService := sig.NewService(des, kvss) assert.NoError(t, registry.RegisterService(sigService)) assert.NoError(t, mspService.RegisterX509MSP("apple", "./x509/testdata/msp", "x509")) @@ -121,14 +116,13 @@ func TestX509TypeFolder(t *testing.T) { kvss, err := kvs.New(registry, "memory", "") assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - des, err := sig.NewMultiplexDeserializer(registry) - assert.NoError(t, err) + des := sig.NewMultiplexDeserializer() assert.NoError(t, registry.RegisterService(des)) - config, err := config2.New(cp, "default", true) + config, err := config2.NewService(cp, "default", true) assert.NoError(t, err) - mspService := msp2.NewLocalMSPManager(registry, config, nil, nil, nil, 100) + mspService := msp2.NewLocalMSPManager(config, kvss, nil, nil, nil, des, 100) assert.NoError(t, registry.RegisterService(mspService)) - sigService := sig.NewSignService(registry, nil, kvss) + sigService := sig.NewService(des, kvss) assert.NoError(t, registry.RegisterService(sigService)) assert.NoError(t, mspService.Load()) @@ -148,14 +142,13 @@ func TestRefresh(t *testing.T) { kvss, err := kvs.New(registry, "memory", "") assert.NoError(t, err) assert.NoError(t, registry.RegisterService(kvss)) - des, err := sig.NewMultiplexDeserializer(registry) - assert.NoError(t, err) + des := sig.NewMultiplexDeserializer() assert.NoError(t, registry.RegisterService(des)) - config, err := config2.New(cp, "default", true) + config, err := config2.NewService(cp, "default", true) assert.NoError(t, err) - mspService := msp2.NewLocalMSPManager(registry, config, nil, nil, nil, 100) + mspService := msp2.NewLocalMSPManager(config, kvss, nil, nil, nil, des, 100) assert.NoError(t, registry.RegisterService(mspService)) - sigService := sig.NewSignService(registry, nil, kvss) + sigService := sig.NewService(des, kvss) assert.NoError(t, registry.RegisterService(sigService)) assert.NoError(t, mspService.Load()) diff --git a/platform/fabric/core/generic/msp/x509/provider.go b/platform/fabric/core/generic/msp/x509/provider.go index 3045b9ffd..8ceb2a066 100644 --- a/platform/fabric/core/generic/msp/x509/provider.go +++ b/platform/fabric/core/generic/msp/x509/provider.go @@ -13,15 +13,14 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" driver2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/msp/driver" - fdriver "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/driver" + driver "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" "github.com/hyperledger/fabric-protos-go/msp" "github.com/pkg/errors" ) type SignerService interface { - RegisterSigner(identity view.Identity, signer fdriver.Signer, verifier fdriver.Verifier) error + RegisterSigner(identity view.Identity, signer driver.Signer, verifier driver.Verifier) error } type Provider struct { @@ -84,7 +83,7 @@ func (p *Provider) IsRemote() bool { return p.sID == nil } -func (p *Provider) Identity(opts *fdriver.IdentityOptions) (view.Identity, []byte, error) { +func (p *Provider) Identity(opts *driver.IdentityOptions) (view.Identity, []byte, error) { revocationHandle, err := GetRevocationHandle(p.id) if err != nil { return nil, nil, errors.Wrapf(err, "failed getting revocation handle") diff --git a/platform/fabric/core/generic/network.go b/platform/fabric/core/generic/network.go index a2b2f8345..fd2d4810a 100644 --- a/platform/fabric/core/generic/network.go +++ b/platform/fabric/core/generic/network.go @@ -7,10 +7,8 @@ SPDX-License-Identifier: Apache-2.0 package generic import ( - "math/rand" "sync" - config2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/metrics" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/ordering" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/rwset" @@ -21,7 +19,6 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" "github.com/hyperledger/fabric/common/channelconfig" "github.com/pkg/errors" - "golang.org/x/net/context" ) var logger = flogging.MustGetLogger("fabric-sdk.core") @@ -32,28 +29,18 @@ type Network struct { SP view2.ServiceProvider name string - config *config2.Config - + configService driver.ConfigService localMembership driver.LocalMembership idProvider driver.IdentityProvider processorManager driver.ProcessorManager transactionManager driver.TransactionManager sigService driver.SignerService - // Ordering related fields - orderers []*grpc.ConnectionConfig - ConsensusType string - configuredOrderers int - - // Peers related fields - peers map[driver.PeerFunctionType][]*grpc.ConnectionConfig + ConsensusType string + Ordering driver.Ordering - // Channel related fields - defaultChannel string - channelConfigs []*config2.Channel + Metrics *metrics.Metrics - Metrics *metrics.Metrics - Ordering driver.Ordering NewChannel NewChannelFunc ChannelMap map[string]driver.Channel ChannelMutex sync.RWMutex @@ -62,7 +49,7 @@ type Network struct { func NewNetwork( sp view2.ServiceProvider, name string, - config *config2.Config, + config driver.ConfigService, idProvider driver.IdentityProvider, localMembership driver.LocalMembership, sigService driver.SignerService, @@ -72,7 +59,7 @@ func NewNetwork( return &Network{ SP: sp, name: name, - config: config, + configService: config, ChannelMap: map[string]driver.Channel{}, localMembership: localMembership, idProvider: idProvider, @@ -86,60 +73,15 @@ func (f *Network) Name() string { return f.name } -func (f *Network) DefaultChannel() string { - return f.defaultChannel -} - -func (f *Network) Channels() []string { - var chs []string - for _, c := range f.channelConfigs { - chs = append(chs, c.Name) - } - return chs -} - -func (f *Network) Orderers() []*grpc.ConnectionConfig { - return f.orderers -} - -func (f *Network) PickOrderer() *grpc.ConnectionConfig { - if len(f.orderers) == 0 { - return nil - } - return f.orderers[rand.Intn(len(f.orderers))] -} - -func (f *Network) Peers() []*grpc.ConnectionConfig { - var peers []*grpc.ConnectionConfig - for _, configs := range f.peers { - peers = append(peers, configs...) - } - return peers -} - -func (f *Network) PickPeer(ft driver.PeerFunctionType) *grpc.ConnectionConfig { - source, ok := f.peers[ft] - if !ok { - source = f.peers[driver.PeerForAnything] - } - return source[rand.Intn(len(source))] -} - func (f *Network) Channel(name string) (driver.Channel, error) { logger.Debugf("Getting Channel [%s]", name) if len(name) == 0 { - name = f.DefaultChannel() + name = f.ConfigService().DefaultChannel() logger.Debugf("Resorting to default Channel [%s]", name) } - chanQuiet := false - for _, chanDef := range f.channelConfigs { - if chanDef.Name == name { - chanQuiet = chanDef.Quiet - break - } - } + chanQuiet := f.ConfigService().IsChannelQuiet(name) // first check the cache f.ChannelMutex.RLock() @@ -171,11 +113,19 @@ func (f *Network) Channel(name string) (driver.Channel, error) { } func (f *Network) Ledger(name string) (driver.Ledger, error) { - return f.Channel(name) + ch, err := f.Channel(name) + if err != nil { + return nil, err + } + return ch.Ledger(), nil } func (f *Network) Committer(name string) (driver.Committer, error) { - return f.Channel(name) + ch, err := f.Channel(name) + if err != nil { + return nil, err + } + return ch.Committer(), nil } func (f *Network) IdentityProvider() driver.IdentityProvider { @@ -194,13 +144,8 @@ func (f *Network) TransactionManager() driver.TransactionManager { return f.transactionManager } -func (f *Network) Broadcast(context context.Context, blob interface{}) error { - return f.Ordering.Broadcast(context, blob) -} - -// SetConsensusType sets the consensus type the ordering service should use -func (f *Network) SetConsensusType(consensusType string) error { - return f.Ordering.SetConsensusType(consensusType) +func (f *Network) OrderingService() driver.Ordering { + return f.Ordering } func (f *Network) SignerService() driver.SignerService { @@ -208,44 +153,28 @@ func (f *Network) SignerService() driver.SignerService { } func (f *Network) ConfigService() driver.ConfigService { - return f.config -} - -func (f *Network) Config() *config2.Config { - return f.config + return f.configService } func (f *Network) Init() error { - f.processorManager = rwset.NewProcessorManager(f.SP, f, nil) - f.transactionManager = transaction.NewManager(f.SP, f) - - var err error - f.orderers, err = f.config.Orderers() - if err != nil { - return errors.Wrap(err, "failed loading orderers") - } - f.configuredOrderers = len(f.orderers) - logger.Debugf("Orderers [%v]", f.orderers) - - f.peers, err = f.config.Peers() - if err != nil { - return errors.Wrap(err, "failed loading peers") - } - logger.Debugf("Peers [%v]", f.peers) - - f.channelConfigs, err = f.config.Channels() - if err != nil { - return errors.Wrap(err, "failed loading channels") - } - logger.Debugf("Channels [%v]", f.channelConfigs) - for _, channel := range f.channelConfigs { - if channel.Default { - f.defaultChannel = channel.Name - break - } - } - - f.Ordering = ordering.NewService(f.SP, f, f.config.OrdererConnectionPoolSize(), f.Metrics) + f.processorManager = rwset.NewProcessorManager(f, nil) + f.transactionManager = transaction.NewManager() + f.transactionManager.AddTransactionFactory( + driver.EndorserTransaction, + transaction.NewEndorserTransactionFactory(f.Name(), f, f.sigService), + ) + f.Ordering = ordering.NewService( + func(channelID string) (driver.EndorserTransactionService, error) { + ch, err := f.Channel(channelID) + if err != nil { + return nil, err + } + return ch.TransactionService(), nil + }, + f.sigService, + f.configService, + f.Metrics, + ) return nil } @@ -253,11 +182,9 @@ func (f *Network) SetConfigOrderers(o channelconfig.Orderer, orderers []*grpc.Co if err := f.Ordering.SetConsensusType(o.ConsensusType()); err != nil { return errors.WithMessagef(err, "failed to set consensus type from channel config") } - // the first configuredOrderers are from the configuration, keep them - // and append the new ones - f.orderers = append(f.orderers[:f.configuredOrderers], orderers...) - logger.Debugf("New Orderers [%d]", len(f.orderers)) - + if err := f.ConfigService().SetConfigOrderers(orderers); err != nil { + return errors.WithMessagef(err, "failed to set ordererss") + } return nil } diff --git a/platform/fabric/core/generic/ordering/bft.go b/platform/fabric/core/generic/ordering/bft.go index 4e4adb716..5d848cb11 100644 --- a/platform/fabric/core/generic/ordering/bft.go +++ b/platform/fabric/core/generic/ordering/bft.go @@ -13,6 +13,7 @@ import ( "time" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/metrics" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" common2 "github.com/hyperledger/fabric-protos-go/common" "github.com/pkg/errors" @@ -20,7 +21,8 @@ import ( ) type BFTBroadcaster struct { - Network Network + ConfigService driver.ConfigService + connSem *semaphore.Weighted metrics *metrics.Metrics poolSize int @@ -29,21 +31,21 @@ type BFTBroadcaster struct { connections map[string]chan *Connection } -func NewBFTBroadcaster(network Network, poolSize int, metrics *metrics.Metrics) *BFTBroadcaster { +func NewBFTBroadcaster(configService driver.ConfigService, metrics *metrics.Metrics) *BFTBroadcaster { return &BFTBroadcaster{ - Network: network, - connections: map[string]chan *Connection{}, - connSem: semaphore.NewWeighted(int64(poolSize)), - metrics: metrics, - poolSize: poolSize, + ConfigService: configService, + connections: map[string]chan *Connection{}, + connSem: semaphore.NewWeighted(int64(configService.OrdererConnectionPoolSize())), + metrics: metrics, + poolSize: configService.OrdererConnectionPoolSize(), } } func (o *BFTBroadcaster) Broadcast(context context.Context, env *common2.Envelope) error { // send the envelope for ordering - retries := o.Network.Config().BroadcastNumRetries() - retryInterval := o.Network.Config().BroadcastRetryInterval() - orderers := o.Network.Orderers() + retries := o.ConfigService.BroadcastNumRetries() + retryInterval := o.ConfigService.BroadcastRetryInterval() + orderers := o.ConfigService.Orderers() if len(orderers) < 4 { return errors.Errorf("not enough orderers, 4 minimum got [%d]", len(orderers)) } diff --git a/platform/fabric/core/generic/ordering/cft.go b/platform/fabric/core/generic/ordering/cft.go index 7cf5a5b41..1d8b92134 100644 --- a/platform/fabric/core/generic/ordering/cft.go +++ b/platform/fabric/core/generic/ordering/cft.go @@ -11,6 +11,7 @@ import ( "time" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/metrics" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" common2 "github.com/hyperledger/fabric-protos-go/common" ab "github.com/hyperledger/fabric-protos-go/orderer" "github.com/pkg/errors" @@ -18,18 +19,22 @@ import ( ) type CFTBroadcaster struct { - Network Network + NetworkID string + ConfigService driver.ConfigService + connSem *semaphore.Weighted connections chan *Connection metrics *metrics.Metrics } -func NewCFTBroadcaster(network Network, poolSize int, metrics *metrics.Metrics) *CFTBroadcaster { +func NewCFTBroadcaster(configService driver.ConfigService, metrics *metrics.Metrics) *CFTBroadcaster { + poolSize := configService.OrdererConnectionPoolSize() return &CFTBroadcaster{ - Network: network, - connections: make(chan *Connection, poolSize), - connSem: semaphore.NewWeighted(int64(poolSize)), - metrics: metrics, + NetworkID: configService.NetworkName(), + ConfigService: configService, + connections: make(chan *Connection, poolSize), + connSem: semaphore.NewWeighted(int64(poolSize)), + metrics: metrics, } } @@ -37,8 +42,8 @@ func (o *CFTBroadcaster) Broadcast(context context.Context, env *common2.Envelop // send the envelope for ordering var status *ab.BroadcastResponse var connection *Connection - retries := o.Network.Config().BroadcastNumRetries() - retryInterval := o.Network.Config().BroadcastRetryInterval() + retries := o.ConfigService.BroadcastNumRetries() + retryInterval := o.ConfigService.BroadcastRetryInterval() forceConnect := true var err error for i := 0; i < retries; i++ { @@ -74,7 +79,7 @@ func (o *CFTBroadcaster) Broadcast(context context.Context, env *common2.Envelop } labels := []string{ - "network", o.Network.Name(), + "network", o.NetworkID, } o.metrics.OrderedTransactions.With(labels...).Add(1) o.releaseConnection(connection) @@ -102,7 +107,7 @@ func (o *CFTBroadcaster) getConnection(ctx context.Context) (*Connection, error) cancel() // create connection - ordererConfig := o.Network.PickOrderer() + ordererConfig := o.ConfigService.PickOrderer() if ordererConfig == nil { return nil, errors.New("no orderer configured") } diff --git a/platform/fabric/core/generic/ordering/ordering.go b/platform/fabric/core/generic/ordering/ordering.go index 27cf02f01..3ca844dc6 100644 --- a/platform/fabric/core/generic/ordering/ordering.go +++ b/platform/fabric/core/generic/ordering/ordering.go @@ -10,13 +10,10 @@ import ( "context" "sync" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/fabricutils" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/metrics" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" common2 "github.com/hyperledger/fabric-protos-go/common" "github.com/pkg/errors" @@ -24,28 +21,15 @@ import ( context2 "golang.org/x/net/context" ) -var logger = flogging.MustGetLogger("fabric-sdk.ordering") - -type Signer interface { - // Sign the message - Sign(msg []byte) ([]byte, error) -} +type ConsensusType = string -type ViewManager interface { - InitiateView(view view.View) (interface{}, error) -} +const ( + BFT ConsensusType = "BFT" + Raft ConsensusType = "etcdraft" + Solo ConsensusType = "solo" +) -type Network interface { - Name() string - PickOrderer() *grpc.ConnectionConfig - Orderers() []*grpc.ConnectionConfig - LocalMembership() driver.LocalMembership - // Broadcast sends the passed blob to the ordering Service to be ordered - Broadcast(context context2.Context, blob interface{}) error - Channel(name string) (driver.Channel, error) - SignerService() driver.SignerService - Config() *config.Config -} +var logger = flogging.MustGetLogger("fabric-sdk.ordering") type Transaction interface { Channel() string @@ -62,27 +46,31 @@ type TransactionWithEnvelope interface { type BroadcastFnc = func(context context.Context, env *common2.Envelope) error +type GetEndorserTransactionServiceFunc = func(channelID string) (driver.EndorserTransactionService, error) + type Service struct { - SP view2.ServiceProvider - Network Network - Metrics *metrics.Metrics + GetEndorserTransactionService GetEndorserTransactionServiceFunc + SigService driver.SignerService + Metrics *metrics.Metrics - Broadcasters map[string]BroadcastFnc + Broadcasters map[ConsensusType]BroadcastFnc BroadcastMutex sync.RWMutex Broadcaster BroadcastFnc } -func NewService(sp view2.ServiceProvider, network Network, poolSize int, metrics *metrics.Metrics) *Service { +func NewService(getEndorserTransactionService GetEndorserTransactionServiceFunc, sigService driver.SignerService, configService driver.ConfigService, metrics *metrics.Metrics) *Service { s := &Service{ - SP: sp, - Network: network, - Metrics: metrics, - Broadcasters: map[string]BroadcastFnc{}, + GetEndorserTransactionService: getEndorserTransactionService, + SigService: sigService, + Metrics: metrics, + Broadcasters: map[ConsensusType]BroadcastFnc{}, + BroadcastMutex: sync.RWMutex{}, + Broadcaster: nil, } - s.Broadcasters["BFT"] = NewBFTBroadcaster(network, poolSize, metrics).Broadcast - cft := NewCFTBroadcaster(network, poolSize, metrics) - s.Broadcasters["etcdraft"] = cft.Broadcast - s.Broadcasters["solo"] = cft.Broadcast + s.Broadcasters[BFT] = NewBFTBroadcaster(configService, metrics).Broadcast + cft := NewCFTBroadcaster(configService, metrics) + s.Broadcasters[Raft] = cft.Broadcast + s.Broadcasters[Solo] = cft.Broadcast return s } @@ -126,7 +114,7 @@ func (o *Service) Broadcast(ctx context2.Context, blob interface{}) error { return broadcaster(ctx, env) } -func (o *Service) SetConsensusType(consensusType string) error { +func (o *Service) SetConsensusType(consensusType ConsensusType) error { logger.Debugf("ordering, setting consensus type to [%s]", consensusType) broadcaster, ok := o.Broadcasters[consensusType] if !ok { @@ -139,7 +127,7 @@ func (o *Service) SetConsensusType(consensusType string) error { } func (o *Service) createFabricEndorseTransactionEnvelope(tx Transaction) (*common2.Envelope, error) { - ch, err := o.Network.Channel(tx.Channel()) + ets, err := o.GetEndorserTransactionService(tx.Channel()) if err != nil { return nil, errors.Wrapf(err, "failed getting channel [%s]", tx.Channel()) } @@ -147,14 +135,14 @@ func (o *Service) createFabricEndorseTransactionEnvelope(tx Transaction) (*commo if err != nil { return nil, errors.Wrapf(err, "failed marshalling tx [%s]", tx.ID()) } - err = ch.TransactionService().StoreTransaction(tx.ID(), txRaw) + err = ets.StoreTransaction(tx.ID(), txRaw) if err != nil { return nil, errors.Wrap(err, "failed storing tx") } // tx contains the proposal and the endorsements, assemble them in a fabric transaction signerID := tx.Creator() - signer, err := o.Network.SignerService().GetSigner(signerID) + signer, err := o.SigService.GetSigner(signerID) if err != nil { logger.Errorf("signer not found for %s while creating tx envelope for ordering [%s]", signerID.UniqueID(), err) return nil, errors.Wrapf(err, "signer not found for %s while creating tx envelope for ordering", signerID.UniqueID()) @@ -169,7 +157,7 @@ func (o *Service) createFabricEndorseTransactionEnvelope(tx Transaction) (*commo type signerWrapper struct { creator view.Identity - signer Signer + signer driver.Signer } func (s *signerWrapper) Sign(message []byte) ([]byte, error) { diff --git a/platform/fabric/core/generic/peer.go b/platform/fabric/core/generic/peer.go new file mode 100644 index 000000000..953e4395f --- /dev/null +++ b/platform/fabric/core/generic/peer.go @@ -0,0 +1,95 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package generic + +import ( + peer2 "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" + "github.com/pkg/errors" +) + +type PeerManager struct { + ConnCache peer2.CachingEndorserPool +} + +func NewPeerManager(configService driver.ConfigService, signer driver.Signer) *PeerManager { + return &PeerManager{ + ConnCache: peer2.CachingEndorserPool{ + Cache: map[string]peer2.Client{}, + ConnCreator: &connCreator{ + ConfigService: configService, + Singer: signer, + }, + Signer: signer, + }, + } +} + +func (c *PeerManager) NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer2.Client, error) { + logger.Debugf("NewPeerClientForAddress [%v]", cc) + return c.ConnCache.NewPeerClientForAddress(cc) +} + +type connCreator struct { + ConfigService driver.ConfigService + Singer driver.Signer +} + +func (c *connCreator) NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer2.Client, error) { + logger.Debugf("Creating new peer client for address [%s]", cc.Address) + + secOpts, err := grpc.CreateSecOpts(cc, grpc.TLSClientConfig{ + TLSClientAuthRequired: c.ConfigService.TLSClientAuthRequired(), + TLSClientKeyFile: c.ConfigService.TLSClientKeyFile(), + TLSClientCertFile: c.ConfigService.TLSClientCertFile(), + }) + if err != nil { + return nil, err + } + + timeout := c.ConfigService.ClientConnTimeout() + if timeout <= 0 { + timeout = grpc.DefaultConnectionTimeout + } + clientConfig := &grpc.ClientConfig{ + SecOpts: *secOpts, + KaOpts: grpc.KeepaliveOptions{ + ClientInterval: c.ConfigService.KeepAliveClientInterval(), + ClientTimeout: c.ConfigService.KeepAliveClientTimeout(), + }, + Timeout: timeout, + } + + override := cc.ServerNameOverride + if len(override) == 0 { + override = c.ConfigService.TLSServerHostOverride() + } + + return newPeerClientForClientConfig( + c.Singer, + cc.Address, + override, + *clientConfig, + ) +} + +func newPeerClientForClientConfig(signer driver.Signer, address, override string, clientConfig grpc.ClientConfig) (*peer2.PeerClient, error) { + gClient, err := grpc.NewGRPCClient(clientConfig) + if err != nil { + return nil, errors.WithMessage(err, "failed to create Client from config") + } + pClient := &peer2.PeerClient{ + Signer: signer.Sign, + GRPCClient: peer2.GRPCClient{ + Client: gClient, + Address: address, + Sn: override, + }, + } + return pClient, nil +} diff --git a/platform/fabric/core/generic/peer/common/conn.go b/platform/fabric/core/generic/peer/conn.go similarity index 89% rename from platform/fabric/core/generic/peer/common/conn.go rename to platform/fabric/core/generic/peer/conn.go index a89e00d98..04d1f4157 100644 --- a/platform/fabric/core/generic/peer/common/conn.go +++ b/platform/fabric/core/generic/peer/conn.go @@ -4,13 +4,14 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package common +package peer import ( "context" "sync" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" "github.com/hyperledger/fabric-protos-go/discovery" @@ -23,13 +24,13 @@ import ( var logger = flogging.MustGetLogger("fabric-sdk.core.generic.peer.conn") type ConnCreator interface { - NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer.Client, error) + NewPeerClientForAddress(cc grpc.ConnectionConfig) (Client, error) } type statefulClient struct { pb.EndorserClient discovery.DiscoveryClient - DC peer.DiscoveryClient + DC DiscoveryClient onErr func() DeliverClient pb.DeliverClient } @@ -72,7 +73,7 @@ func (sc *statefulClient) Send(ctx context.Context, req *discovery2.Request, aut type peerClient struct { lock sync.RWMutex - peer.Client + Client connect func() (*grpc2.ClientConn, error) conn *grpc2.ClientConn signer discovery2.Signer @@ -150,7 +151,7 @@ func (pc *peerClient) DeliverClient() (pb.DeliverClient, error) { }, nil } -func (pc *peerClient) DiscoveryClient() (peer.DiscoveryClient, error) { +func (pc *peerClient) DiscoveryClient() (DiscoveryClient, error) { dc := discovery2.NewClient( func() (*grpc2.ClientConn, error) { conn, err := pc.getOrConn() @@ -187,17 +188,17 @@ func (pc *peerClient) Close() { type CachingEndorserPool struct { ConnCreator lock sync.RWMutex - Cache map[string]peer.Client - Signer discovery2.Signer + Cache map[string]Client + Signer driver.Signer } -func (cep *CachingEndorserPool) NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer.Client, error) { - return cep.getOrCreateClient(cc.Address, func() (peer.Client, error) { +func (cep *CachingEndorserPool) NewPeerClientForAddress(cc grpc.ConnectionConfig) (Client, error) { + return cep.getOrCreateClient(cc.Address, func() (Client, error) { return cep.ConnCreator.NewPeerClientForAddress(cc) }) } -func (cep *CachingEndorserPool) getOrCreateClient(key string, newClient func() (peer.Client, error)) (peer.Client, error) { +func (cep *CachingEndorserPool) getOrCreateClient(key string, newClient func() (Client, error)) (Client, error) { if cl, found := cep.lookup(key); found { return cl, nil } @@ -222,7 +223,7 @@ func (cep *CachingEndorserPool) getOrCreateClient(key string, newClient func() ( }, address: pc.Address(), Client: cl, - signer: cep.Signer, + signer: cep.Signer.Sign, } logger.Debugf("Created new client for [%s]", key) @@ -231,12 +232,12 @@ func (cep *CachingEndorserPool) getOrCreateClient(key string, newClient func() ( return cl, nil } -func (cep *CachingEndorserPool) lookupNoLock(key string) (peer.Client, bool) { +func (cep *CachingEndorserPool) lookupNoLock(key string) (Client, bool) { cl, ok := cep.Cache[key] return cl, ok } -func (cep *CachingEndorserPool) lookup(key string) (peer.Client, bool) { +func (cep *CachingEndorserPool) lookup(key string) (Client, bool) { cep.lock.RLock() defer cep.lock.RUnlock() diff --git a/platform/fabric/core/generic/peer/common/common.go b/platform/fabric/core/generic/peer/grpc.go similarity index 84% rename from platform/fabric/core/generic/peer/common/common.go rename to platform/fabric/core/generic/peer/grpc.go index e081a8e66..bb4e9c27f 100644 --- a/platform/fabric/core/generic/peer/common/common.go +++ b/platform/fabric/core/generic/peer/grpc.go @@ -4,13 +4,13 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package common +package peer import ( "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" ) -type CommonClient struct { +type GRPCClient struct { *grpc.Client Address string Sn string diff --git a/platform/fabric/core/generic/peer/common/peerclient.go b/platform/fabric/core/generic/peer/peerclient.go similarity index 73% rename from platform/fabric/core/generic/peer/common/peerclient.go rename to platform/fabric/core/generic/peer/peerclient.go index d40c3c18f..7fe9fed6b 100644 --- a/platform/fabric/core/generic/peer/common/peerclient.go +++ b/platform/fabric/core/generic/peer/peerclient.go @@ -4,13 +4,12 @@ Copyright IBM Corp. All Rights Reserved. SPDX-License-Identifier: Apache-2.0 */ -package common +package peer import ( "context" "crypto/tls" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" "github.com/hyperledger/fabric-protos-go/discovery" pb "github.com/hyperledger/fabric-protos-go/peer" @@ -21,16 +20,16 @@ import ( // PeerClient represents a client for communicating with a peer type PeerClient struct { - CommonClient + GRPCClient Signer discovery2.Signer } func (pc *PeerClient) Close() { - pc.CommonClient.Client.Close() + pc.GRPCClient.Client.Close() } func (pc *PeerClient) Connection() (*grpc2.ClientConn, error) { - conn, err := pc.CommonClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) + conn, err := pc.GRPCClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) if err != nil { return nil, errors.WithMessagef(err, "endorser client failed to connect to %s", pc.Address()) } @@ -39,7 +38,7 @@ func (pc *PeerClient) Connection() (*grpc2.ClientConn, error) { // Endorser returns a client for the Endorser service func (pc *PeerClient) Endorser() (pb.EndorserClient, error) { - conn, err := pc.CommonClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) + conn, err := pc.GRPCClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) if err != nil { return nil, errors.WithMessagef(err, "endorser client failed to connect to %s", pc.Address()) } @@ -47,17 +46,17 @@ func (pc *PeerClient) Endorser() (pb.EndorserClient, error) { } func (pc *PeerClient) Discovery() (discovery.DiscoveryClient, error) { - conn, err := pc.CommonClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) + conn, err := pc.GRPCClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) if err != nil { return nil, errors.WithMessagef(err, "discovery client failed to connect to %s", pc.Address()) } return discovery.NewDiscoveryClient(conn), nil } -func (pc *PeerClient) DiscoveryClient() (peer.DiscoveryClient, error) { +func (pc *PeerClient) DiscoveryClient() (DiscoveryClient, error) { return discovery2.NewClient( func() (*grpc2.ClientConn, error) { - conn, err := pc.CommonClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) + conn, err := pc.GRPCClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) if err != nil { return nil, errors.WithMessagef(err, "discovery client failed to connect to %s", pc.Address()) } @@ -68,7 +67,7 @@ func (pc *PeerClient) DiscoveryClient() (peer.DiscoveryClient, error) { } func (pc *PeerClient) DeliverClient() (pb.DeliverClient, error) { - conn, err := pc.CommonClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) + conn, err := pc.GRPCClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) if err != nil { return nil, errors.WithMessagef(err, "endorser client failed to connect to %s", pc.Address()) } @@ -77,7 +76,7 @@ func (pc *PeerClient) DeliverClient() (pb.DeliverClient, error) { // Deliver returns a client for the Deliver service func (pc *PeerClient) Deliver() (pb.Deliver_DeliverClient, error) { - conn, err := pc.CommonClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) + conn, err := pc.GRPCClient.NewConnection(pc.Address(), grpc.ServerNameOverride(pc.Sn)) if err != nil { return nil, errors.WithMessagef(err, "deliver client failed to connect to %s", pc.Address()) } @@ -86,9 +85,9 @@ func (pc *PeerClient) Deliver() (pb.Deliver_DeliverClient, error) { // Certificate returns the TLS client certificate (if available) func (pc *PeerClient) Certificate() tls.Certificate { - return pc.CommonClient.Certificate() + return pc.GRPCClient.Certificate() } func (pc *PeerClient) Address() string { - return pc.CommonClient.Address + return pc.GRPCClient.Address } diff --git a/platform/fabric/core/generic/processors.go b/platform/fabric/core/generic/processors.go deleted file mode 100644 index 85a447b96..000000000 --- a/platform/fabric/core/generic/processors.go +++ /dev/null @@ -1,7 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package generic diff --git a/platform/fabric/core/generic/rwset/processor.go b/platform/fabric/core/generic/rwset/processor.go index 1bbc6e2f0..3bacfca2f 100644 --- a/platform/fabric/core/generic/rwset/processor.go +++ b/platform/fabric/core/generic/rwset/processor.go @@ -8,17 +8,14 @@ package rwset import ( "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/pkg/errors" ) var logger = flogging.MustGetLogger("fabric-sdk.rwset") -type Network interface { +type ChannelProvider interface { Channel(name string) (driver.Channel, error) - TransactionManager() driver.TransactionManager - Name() string } type RWSExtractor interface { @@ -34,17 +31,18 @@ func (r *request) ID() string { } type processorManager struct { - sp view.ServiceProvider - network Network + channelProvider ChannelProvider defaultProcessor driver.Processor processors map[string]driver.Processor channelProcessors map[string]map[string]driver.Processor } -func NewProcessorManager(sp view.ServiceProvider, network Network, defaultProcessor driver.Processor) *processorManager { +func NewProcessorManager( + channelProvider ChannelProvider, + defaultProcessor driver.Processor, +) *processorManager { return &processorManager{ - sp: sp, - network: network, + channelProvider: channelProvider, defaultProcessor: defaultProcessor, processors: map[string]driver.Processor{}, channelProcessors: map[string]map[string]driver.Processor{}, @@ -54,7 +52,7 @@ func NewProcessorManager(sp view.ServiceProvider, network Network, defaultProces func (r *processorManager) ProcessByID(channel, txID string) error { logger.Debugf("process transaction [%s,%s]", channel, txID) - ch, err := r.network.Channel(channel) + ch, err := r.channelProvider.Channel(channel) if err != nil { return errors.Wrapf(err, "failed getting channel [%s]", ch) } @@ -66,9 +64,9 @@ func (r *processorManager) ProcessByID(channel, txID string) error { var tx driver.ProcessTransaction switch { case ch.EnvelopeService().Exists(txID): - rws, tx, err = ch.GetRWSetFromEvn(txID) + rws, tx, err = ch.RWSetLoader().GetRWSetFromEvn(txID) case ch.TransactionService().Exists(txID): - rws, tx, err = ch.GetRWSetFromETx(txID) + rws, tx, err = ch.RWSetLoader().GetRWSetFromETx(txID) default: logger.Debugf("no entry found for [%s,%s]", channel, txID) return nil diff --git a/platform/fabric/core/generic/sig/deserializer.go b/platform/fabric/core/generic/sig/deserializer.go new file mode 100644 index 000000000..a2d396f35 --- /dev/null +++ b/platform/fabric/core/generic/sig/deserializer.go @@ -0,0 +1,121 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package sig + +import ( + "sync" + + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/pkg/errors" + "go.uber.org/zap/zapcore" +) + +type Deserializer interface { + DeserializeVerifier(raw []byte) (driver.Verifier, error) + DeserializeSigner(raw []byte) (driver.Signer, error) + Info(raw []byte, auditInfo []byte) (string, error) +} + +type deserializer struct { + deserializersMutex sync.RWMutex + deserializers []Deserializer +} + +func NewMultiplexDeserializer() *deserializer { + return &deserializer{ + deserializers: []Deserializer{}, + } +} + +func (d *deserializer) AddDeserializer(newD Deserializer) { + d.deserializersMutex.Lock() + d.deserializers = append(d.deserializers, newD) + d.deserializersMutex.Unlock() +} + +func (d *deserializer) DeserializeVerifier(raw []byte) (driver.Verifier, error) { + var errs []error + + copyDeserial := d.threadSafeCopyDeserializers() + for _, des := range copyDeserial { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying deserialization with [%v]", des) + } + v, err := des.DeserializeVerifier(raw) + if err == nil { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying deserialization with [%v] succeeded", des) + } + return v, nil + } + + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying deserialization with [%v] failed", des) + } + errs = append(errs, err) + } + + return nil, errors.Errorf("failed deserialization [%v]", errs) +} + +func (d *deserializer) DeserializeSigner(raw []byte) (driver.Signer, error) { + var errs []error + + copyDeserial := d.threadSafeCopyDeserializers() + for _, des := range copyDeserial { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying signer deserialization with [%s]", des) + } + v, err := des.DeserializeSigner(raw) + if err == nil { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying signer deserialization with [%s] succeeded", des) + } + return v, nil + } + + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying signer deserialization with [%s] failed [%s]", des, err) + } + errs = append(errs, err) + } + + return nil, errors.Errorf("failed signer deserialization [%v]", errs) +} + +func (d *deserializer) Info(raw []byte, auditInfo []byte) (string, error) { + var errs []error + + copyDeserial := d.threadSafeCopyDeserializers() + for _, des := range copyDeserial { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying info deserialization with [%v]", des) + } + v, err := des.Info(raw, auditInfo) + if err == nil { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying info deserialization with [%v] succeeded", des) + } + return v, nil + } + + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("trying info deserialization with [%v] failed", des) + } + errs = append(errs, err) + } + + return "", errors.Errorf("failed info deserialization [%v]", errs) +} + +func (d *deserializer) threadSafeCopyDeserializers() []Deserializer { + d.deserializersMutex.RLock() + res := make([]Deserializer, len(d.deserializers)) + copy(res, d.deserializers) + d.deserializersMutex.RUnlock() + return res +} diff --git a/platform/fabric/core/generic/sig/service.go b/platform/fabric/core/generic/sig/service.go new file mode 100644 index 000000000..f97f8528a --- /dev/null +++ b/platform/fabric/core/generic/sig/service.go @@ -0,0 +1,310 @@ +/* +Copyright IBM Corp. All Rights Reserved. + +SPDX-License-Identifier: Apache-2.0 +*/ + +package sig + +import ( + "fmt" + "reflect" + "runtime/debug" + "sync" + + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs" + "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" + "github.com/pkg/errors" + "go.uber.org/zap/zapcore" +) + +var logger = flogging.MustGetLogger("fabric-sdk.core.generic.sig") + +type KVS interface { + Exists(id string) bool + Put(id string, state interface{}) error + Get(id string, state interface{}) error +} + +type VerifierEntry struct { + Verifier driver.Verifier + DebugStack []byte +} + +type SignerEntry struct { + Signer driver.Signer + DebugStack []byte +} + +type Service struct { + signers map[string]SignerEntry + verifiers map[string]VerifierEntry + deserializer Deserializer + viewsSync sync.RWMutex + kvs KVS +} + +func NewService(deserializer Deserializer, kvs KVS) *Service { + return &Service{ + signers: map[string]SignerEntry{}, + verifiers: map[string]VerifierEntry{}, + deserializer: deserializer, + kvs: kvs, + } +} + +func (o *Service) RegisterSigner(identity view.Identity, signer driver.Signer, verifier driver.Verifier) error { + if signer == nil { + return errors.New("invalid signer, expected a valid instance") + } + + idHash := identity.UniqueID() + o.viewsSync.Lock() + s, ok := o.signers[idHash] + o.viewsSync.Unlock() + if ok { + logger.Warnf("another signer bound to [%s]:[%s][%s] from [%s]", identity, GetIdentifier(s), GetIdentifier(signer), string(s.DebugStack)) + return nil + } + o.viewsSync.Lock() + + entry := SignerEntry{Signer: signer} + if logger.IsEnabledFor(zapcore.DebugLevel) { + entry.DebugStack = debug.Stack() + } + + o.signers[idHash] = entry + if o.kvs != nil { + k, err := kvs.CreateCompositeKey("sigService", []string{"signer", idHash}) + if err != nil { + return errors.Wrap(err, "failed to create composite key to store entry in kvs") + } + err = o.kvs.Put(k, signer) + if err != nil { + return errors.Wrap(err, "failed to store entry in kvs for the passed signer") + } + } + o.viewsSync.Unlock() + + if verifier != nil { + return o.RegisterVerifier(identity, verifier) + } + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("signer for [%s][%s] registered, no verifier passed", idHash, GetIdentifier(signer)) + } + return nil +} + +func (o *Service) RegisterVerifier(identity view.Identity, verifier driver.Verifier) error { + if verifier == nil { + return errors.New("invalid verifier, expected a valid instance") + } + + idHash := identity.UniqueID() + o.viewsSync.Lock() + v, ok := o.verifiers[idHash] + o.viewsSync.Unlock() + if ok { + logger.Warnf("another verifier bound to [%s]:[%s][%s] from [%s]", idHash, GetIdentifier(v), GetIdentifier(verifier), string(v.DebugStack)) + return nil + } + entry := VerifierEntry{Verifier: verifier} + if logger.IsEnabledFor(zapcore.DebugLevel) { + entry.DebugStack = debug.Stack() + } + o.viewsSync.Lock() + o.verifiers[idHash] = entry + o.viewsSync.Unlock() + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("register verifier to [%s]:[%s]", idHash, GetIdentifier(verifier)) + } + + return nil +} + +func (o *Service) RegisterAuditInfo(identity view.Identity, info []byte) error { + k := kvs.CreateCompositeKeyOrPanic( + "fsc.platform.view.sig", + []string{ + identity.String(), + }, + ) + if err := o.kvs.Put(k, info); err != nil { + return err + } + return nil +} + +func (o *Service) GetAuditInfo(identity view.Identity) ([]byte, error) { + k := kvs.CreateCompositeKeyOrPanic( + "fsc.platform.view.sig", + []string{ + identity.String(), + }, + ) + if !o.kvs.Exists(k) { + return nil, nil + } + var res []byte + if err := o.kvs.Get(k, &res); err != nil { + return nil, err + } + return res, nil +} + +func (o *Service) IsMe(identity view.Identity) bool { + idHash := identity.UniqueID() + // check local cache + o.viewsSync.Lock() + _, ok := o.signers[idHash] + o.viewsSync.Unlock() + if ok { + return true + } + // check kvs + if o.kvs != nil { + k, err := kvs.CreateCompositeKey("sigService", []string{"signer", idHash}) + if err != nil { + return false + } + if o.kvs.Exists(k) { + return true + } + } + // last chance, deserialize + //signer, err := o.GetSigner(identity) + //if err != nil { + // return false + //} + //return signer != nil + return false +} + +func (o *Service) Info(id view.Identity) string { + auditInfo, err := o.GetAuditInfo(id) + if err != nil { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("failed getting audit info for [%s]", id) + } + return fmt.Sprintf("unable to identify identity : [%s][%s]", id.UniqueID(), string(id)) + } + info, err := o.deserializer.Info(id, auditInfo) + if err != nil { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("failed getting info for [%s]", id) + } + return fmt.Sprintf("unable to identify identity : [%s][%s]", id.UniqueID(), string(id)) + } + return info +} + +func (o *Service) GetSigner(identity view.Identity) (driver.Signer, error) { + idHash := identity.UniqueID() + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("get signer for [%s]", idHash) + } + o.viewsSync.Lock() + entry, ok := o.signers[idHash] + o.viewsSync.Unlock() + if !ok { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("signer for [%s] not found, try to deserialize", idHash) + } + // ask the deserializer + if o.deserializer == nil { + return nil, errors.Errorf("cannot find signer for [%s], no deserializer set", identity) + } + var err error + signer, err := o.deserializer.DeserializeSigner(identity) + if err != nil { + return nil, errors.Wrapf(err, "failed deserializing identity for signer [%s]", identity) + } + entry = SignerEntry{Signer: signer} + if logger.IsEnabledFor(zapcore.DebugLevel) { + entry.DebugStack = debug.Stack() + } + + o.viewsSync.Lock() + o.signers[idHash] = entry + o.viewsSync.Unlock() + } else { + if logger.IsEnabledFor(zapcore.DebugLevel) { + logger.Debugf("signer for [%s] found", idHash) + } + } + return entry.Signer, nil +} + +func (o *Service) GetVerifier(identity view.Identity) (driver.Verifier, error) { + idHash := identity.UniqueID() + o.viewsSync.Lock() + entry, ok := o.verifiers[idHash] + o.viewsSync.Unlock() + if !ok { + // ask the deserializer + if o.deserializer == nil { + return nil, errors.Errorf("cannot find verifier for [%s], no deserializer set", identity) + } + var err error + verifier, err := o.deserializer.DeserializeVerifier(identity) + if err != nil { + return nil, errors.Wrapf(err, "failed deserializing identity for verifier %v", identity) + } + + entry = VerifierEntry{Verifier: verifier} + if logger.IsEnabledFor(zapcore.DebugLevel) { + entry.DebugStack = debug.Stack() + logger.Debugf("add deserialized verifier for [%s]:[%s]", idHash, GetIdentifier(verifier)) + } + o.viewsSync.Lock() + o.verifiers[idHash] = entry + o.viewsSync.Unlock() + } + return entry.Verifier, nil +} + +func (o *Service) GetSigningIdentity(identity view.Identity) (driver.SigningIdentity, error) { + signer, err := o.GetSigner(identity) + if err != nil { + return nil, err + } + + if _, ok := signer.(driver.SigningIdentity); ok { + return signer.(driver.SigningIdentity), nil + } + + return &si{ + id: identity, + signer: signer, + }, nil +} + +type si struct { + id view.Identity + signer driver.Signer +} + +func (s *si) Verify(message []byte, signature []byte) error { + panic("implement me") +} + +func (s *si) Sign(bytes []byte) ([]byte, error) { + return s.signer.Sign(bytes) +} + +func (s *si) Serialize() ([]byte, error) { + return s.id, nil +} + +func GetIdentifier(f any) string { + if f == nil { + return "" + } + t := reflect.TypeOf(f) + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t.PkgPath() + "/" + t.Name() +} diff --git a/platform/fabric/core/generic/signer.go b/platform/fabric/core/generic/signer.go deleted file mode 100644 index 8430a40f8..000000000 --- a/platform/fabric/core/generic/signer.go +++ /dev/null @@ -1,13 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package generic - -type SerializableSigner interface { - Sign(message []byte) ([]byte, error) - - Serialize() ([]byte, error) -} diff --git a/platform/fabric/core/generic/sigservice.go b/platform/fabric/core/generic/sigservice.go deleted file mode 100644 index 2697d9af0..000000000 --- a/platform/fabric/core/generic/sigservice.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package generic - -import ( - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" -) - -type SigService struct { - sp view2.ServiceProvider -} - -func NewSigService(sp view2.ServiceProvider) *SigService { - return &SigService{sp: sp} -} - -func (s *SigService) GetVerifier(id view.Identity) (driver.Verifier, error) { - return view2.GetSigService(s.sp).GetVerifier(id) -} - -func (s *SigService) GetSigner(id view.Identity) (driver.Signer, error) { - return view2.GetSigService(s.sp).GetSigner(id) -} - -func (s *SigService) GetSigningIdentity(id view.Identity) (driver.SigningIdentity, error) { - return view2.GetSigService(s.sp).GetSigningIdentity(id) -} - -func (s *SigService) RegisterSigner(identity view.Identity, signer driver.Signer, verifier driver.Verifier) error { - return view2.GetSigService(s.sp).RegisterSigner(identity, signer, verifier) -} diff --git a/platform/fabric/core/generic/transaction/manager.go b/platform/fabric/core/generic/transaction/manager.go index b9992748e..352a7d727 100644 --- a/platform/fabric/core/generic/transaction/manager.go +++ b/platform/fabric/core/generic/transaction/manager.go @@ -10,22 +10,21 @@ import ( "encoding/json" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view" view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" "github.com/hyperledger/fabric/protoutil" "github.com/pkg/errors" ) +type ChannelProvider interface { + Channel(name string) (driver.Channel, error) +} + type Manager struct { - sp view.ServiceProvider - fns driver.FabricNetworkService factories map[driver.TransactionType]driver.TransactionFactory } -func NewManager(sp view.ServiceProvider, fns driver.FabricNetworkService) *Manager { - factories := map[driver.TransactionType]driver.TransactionFactory{} - factories[driver.EndorserTransaction] = NewEndorserTransactionFactory(sp, fns) - return &Manager{sp: sp, fns: fns, factories: factories} +func NewManager() *Manager { + return &Manager{factories: map[driver.TransactionType]driver.TransactionFactory{}} } func (m *Manager) ComputeTxID(id *driver.TxID) string { @@ -98,16 +97,17 @@ func (m *Manager) AddTransactionFactory(tt driver.TransactionType, factory drive } type EndorserTransactionFactory struct { - sp view.ServiceProvider - fns driver.FabricNetworkService + networkName string + channelProvider ChannelProvider + sigService driver.SignerService } -func NewEndorserTransactionFactory(sp view.ServiceProvider, fns driver.FabricNetworkService) *EndorserTransactionFactory { - return &EndorserTransactionFactory{sp: sp, fns: fns} +func NewEndorserTransactionFactory(networkName string, channelProvider ChannelProvider, sigService driver.SignerService) *EndorserTransactionFactory { + return &EndorserTransactionFactory{networkName: networkName, channelProvider: channelProvider, sigService: sigService} } func (e *EndorserTransactionFactory) NewTransaction(channel string, nonce []byte, creator []byte, txid string, rawRequest []byte) (driver.Transaction, error) { - ch, err := e.fns.Channel(channel) + ch, err := e.channelProvider.Channel(channel) if err != nil { return nil, err } @@ -123,15 +123,15 @@ func (e *EndorserTransactionFactory) NewTransaction(channel string, nonce []byte } return &Transaction{ - sp: e.sp, - fns: e.fns, - channel: ch, - TCreator: creator, - TNonce: nonce, - TTxID: txid, - TNetwork: e.fns.Name(), - TChannel: channel, - TTransient: map[string][]byte{}, + channelProvider: e.channelProvider, + sigService: e.sigService, + channel: ch, + TCreator: creator, + TNonce: nonce, + TTxID: txid, + TNetwork: e.networkName, + TChannel: channel, + TTransient: map[string][]byte{}, }, nil } diff --git a/platform/fabric/core/generic/transaction/services.go b/platform/fabric/core/generic/transaction/services.go index cea09eecd..adff85f33 100644 --- a/platform/fabric/core/generic/transaction/services.go +++ b/platform/fabric/core/generic/transaction/services.go @@ -9,7 +9,6 @@ package transaction import ( "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/kvs" "github.com/hyperledger/fabric-protos-go/common" @@ -19,15 +18,21 @@ import ( var logger = flogging.MustGetLogger("fabric-sdk.core") +type KVS interface { + Exists(id string) bool + Put(id string, state interface{}) error + Get(id string, state interface{}) error +} + type mds struct { - sp view2.ServiceProvider + KVS KVS network string channel string } -func NewMetadataService(sp view2.ServiceProvider, network string, channel string) *mds { +func NewMetadataService(KVS KVS, network string, channel string) *mds { return &mds{ - sp: sp, + KVS: KVS, network: network, channel: channel, } @@ -38,7 +43,7 @@ func (s *mds) Exists(txid string) bool { if err != nil { return false } - return kvs.GetService(s.sp).Exists(key) + return s.KVS.Exists(key) } func (s *mds) StoreTransient(txid string, transientMap driver.TransientMap) error { @@ -49,7 +54,7 @@ func (s *mds) StoreTransient(txid string, transientMap driver.TransientMap) erro if logger.IsEnabledFor(zapcore.DebugLevel) { logger.Debugf("store transient for [%s][%v]", txid, transientMap) } - return kvs.GetService(s.sp).Put(key, transientMap) + return s.KVS.Put(key, transientMap) } func (s *mds) LoadTransient(txid string) (driver.TransientMap, error) { @@ -60,7 +65,7 @@ func (s *mds) LoadTransient(txid string) (driver.TransientMap, error) { return nil, err } transientMap := driver.TransientMap{} - err = kvs.GetService(s.sp).Get(key, &transientMap) + err = s.KVS.Get(key, &transientMap) if err != nil { return nil, err } @@ -68,14 +73,14 @@ func (s *mds) LoadTransient(txid string) (driver.TransientMap, error) { } type envs struct { - sp view2.ServiceProvider + KVS KVS network string channel string } -func NewEnvelopeService(sp view2.ServiceProvider, network string, channel string) *envs { +func NewEnvelopeService(KVS KVS, network string, channel string) *envs { return &envs{ - sp: sp, + KVS: KVS, network: network, channel: channel, } @@ -87,7 +92,7 @@ func (s *envs) Exists(txid string) bool { return false } - return kvs.GetService(s.sp).Exists(key) + return s.KVS.Exists(key) } func (s *envs) StoreEnvelope(txID string, env interface{}) error { @@ -99,13 +104,13 @@ func (s *envs) StoreEnvelope(txID string, env interface{}) error { switch e := env.(type) { case []byte: - return kvs.GetService(s.sp).Put(key, e) + return s.KVS.Put(key, e) case *common.Envelope: envBytes, err := proto.Marshal(e) if err != nil { return errors.WithMessagef(err, "failed marshalling envelop for tx [%s]", txID) } - return kvs.GetService(s.sp).Put(key, envBytes) + return s.KVS.Put(key, envBytes) default: return errors.Errorf("invalid env, expected []byte or *common.Envelope, got [%T]", env) } @@ -119,7 +124,7 @@ func (s *envs) LoadEnvelope(txid string) ([]byte, error) { return nil, err } env := []byte{} - err = kvs.GetService(s.sp).Get(key, &env) + err = s.KVS.Get(key, &env) if err != nil { return nil, err } @@ -127,14 +132,14 @@ func (s *envs) LoadEnvelope(txid string) ([]byte, error) { } type ets struct { - sp view2.ServiceProvider + KVS KVS network string channel string } -func NewEndorseTransactionService(sp view2.ServiceProvider, network string, channel string) *ets { +func NewEndorseTransactionService(KVS KVS, network string, channel string) *ets { return &ets{ - sp: sp, + KVS: KVS, network: network, channel: channel, } @@ -145,7 +150,7 @@ func (s *ets) Exists(txid string) bool { if err != nil { return false } - return kvs.GetService(s.sp).Exists(key) + return s.KVS.Exists(key) } func (s *ets) StoreTransaction(txid string, env []byte) error { @@ -155,7 +160,7 @@ func (s *ets) StoreTransaction(txid string, env []byte) error { } logger.Debugf("store etx for [%s]", txid) - return kvs.GetService(s.sp).Put(key, env) + return s.KVS.Put(key, env) } func (s *ets) LoadTransaction(txid string) ([]byte, error) { @@ -166,7 +171,7 @@ func (s *ets) LoadTransaction(txid string) ([]byte, error) { return nil, err } env := []byte{} - err = kvs.GetService(s.sp).Get(key, &env) + err = s.KVS.Get(key, &env) if err != nil { return nil, err } diff --git a/platform/fabric/core/generic/transaction/transasction.go b/platform/fabric/core/generic/transaction/transasction.go index b934143ca..b0146311a 100644 --- a/platform/fabric/core/generic/transaction/transasction.go +++ b/platform/fabric/core/generic/transaction/transasction.go @@ -11,11 +11,9 @@ import ( "encoding/base64" "encoding/json" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/fabricutils" - "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/proto" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/fabricutils" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" pcommon "github.com/hyperledger/fabric-protos-go/common" pb "github.com/hyperledger/fabric-protos-go/peer" @@ -75,8 +73,8 @@ func (p *SignedProposal) ChaincodeVersion() string { } type Transaction struct { - sp view2.ServiceProvider - fns driver.FabricNetworkService + channelProvider ChannelProvider + sigService driver.SignerService rwset driver.RWSet channel driver.Channel signedProposal *SignedProposal @@ -215,7 +213,7 @@ func (t *Transaction) SetFromBytes(raw []byte) error { } // Set the channel - ch, err := t.fns.Channel(t.Channel()) + ch, err := t.channelProvider.Channel(t.Channel()) if err != nil { return err } @@ -248,7 +246,7 @@ func (t *Transaction) SetFromEnvelopeBytes(raw []byte) error { t.TProposalResponses = upe.ProposalResponses // Set the channel - ch, err := t.fns.Channel(t.Channel()) + ch, err := t.channelProvider.Channel(t.Channel()) if err != nil { return err } @@ -309,21 +307,21 @@ func (t *Transaction) SetRWSet() error { if err != nil { return errors.WithMessagef(err, "failed to get rws from proposal response") } - t.rwset, err = t.channel.GetRWSet(t.ID(), results) + t.rwset, err = t.channel.Vault().GetRWSet(t.ID(), results) if err != nil { return errors.WithMessagef(err, "failed to populate rws from proposal response") } case len(t.RWSet) != 0: logger.Debugf("populate rws from rwset") var err error - t.rwset, err = t.channel.GetRWSet(t.ID(), t.RWSet) + t.rwset, err = t.channel.Vault().GetRWSet(t.ID(), t.RWSet) if err != nil { return errors.WithMessagef(err, "failed to populate rws from existing rws") } default: logger.Debugf("populate rws from scratch") var err error - t.rwset, err = t.channel.NewRWSet(t.ID()) + t.rwset, err = t.channel.Vault().NewRWSet(t.ID()) if err != nil { return errors.WithMessagef(err, "failed to create fresh rws") } @@ -414,7 +412,7 @@ func (t *Transaction) EndorseWithIdentity(identity view.Identity) error { logger.Debugf("endorse transaction [%s] with identity [%s]", t.ID(), identity.String()) } // prepare signer - s, err := t.fns.SignerService().GetSigner(identity) + s, err := t.sigService.GetSigner(identity) if err != nil { return err } @@ -493,7 +491,7 @@ func (t *Transaction) EndorseProposal() error { func (t *Transaction) EndorseProposalWithIdentity(identity view.Identity) error { // prepare signer - s, err := t.fns.SignerService().GetSigner(identity) + s, err := t.sigService.GetSigner(identity) if err != nil { return err } @@ -513,7 +511,7 @@ func (t *Transaction) EndorseProposalResponse() error { func (t *Transaction) EndorseProposalResponseWithIdentity(identity view.Identity) error { // prepare signer - s, err := t.fns.SignerService().GetSigner(identity) + s, err := t.sigService.GetSigner(identity) if err != nil { return err } @@ -532,7 +530,7 @@ func (t *Transaction) AppendProposalResponse(response driver.ProposalResponse) e } func (t *Transaction) ProposalHasBeenEndorsedBy(party view.Identity) error { - verifier, err := t.channel.GetVerifier(party) + verifier, err := t.channel.ChannelMembership().GetVerifier(party) if err != nil { return err } @@ -566,7 +564,7 @@ func (t *Transaction) ProposalResponse() ([]byte, error) { func (t *Transaction) Envelope() (driver.Envelope, error) { signerID := t.Creator() - signer, err := t.fns.SignerService().GetSigner(signerID) + signer, err := t.sigService.GetSigner(signerID) if err != nil { logger.Errorf("signer not found for %s while creating tx envelope for ordering [%s]", signerID.UniqueID(), err) return nil, errors.Wrapf(err, "signer not found for %s while creating tx envelope for ordering", signerID.UniqueID()) @@ -653,7 +651,7 @@ func (t *Transaction) getProposalResponse(signer SerializableSigner) (*pb.Propos version := signedProposal.ChaincodeName() if len(signedProposal.ChaincodeVersion()) == 0 { // fetch current chaincode version - chaincode := t.channel.Chaincode(signedProposal.ChaincodeName()) + chaincode := t.channel.ChaincodeManager().Chaincode(signedProposal.ChaincodeName()) var err error version, err = chaincode.Version() if err != nil { @@ -724,3 +722,57 @@ func (s *signerWrapper) Sign(message []byte) ([]byte, error) { func (s *signerWrapper) Serialize() ([]byte, error) { return s.creator, nil } + +type processedTransaction struct { + vc int32 + ue *UnpackedEnvelope + env []byte +} + +func NewProcessedTransactionFromEnvelope(env *pcommon.Envelope) (*processedTransaction, int32, error) { + ue, headerType, err := UnpackEnvelope(env) + if err != nil { + return nil, headerType, err + } + return &processedTransaction{ue: ue}, headerType, nil +} + +func NewProcessedTransactionFromEnvelopeRaw(env []byte) (*processedTransaction, error) { + ue, _, err := UnpackEnvelopeFromBytes(env) + if err != nil { + return nil, err + } + return &processedTransaction{ue: ue, env: env}, nil +} + +func NewProcessedTransaction(pt *pb.ProcessedTransaction) (*processedTransaction, error) { + ue, _, err := UnpackEnvelope(pt.TransactionEnvelope) + if err != nil { + return nil, err + } + env, err := protoutil.Marshal(pt.TransactionEnvelope) + if err != nil { + return nil, err + } + return &processedTransaction{vc: pt.ValidationCode, ue: ue, env: env}, nil +} + +func (p *processedTransaction) TxID() string { + return p.ue.TxID +} + +func (p *processedTransaction) Results() []byte { + return p.ue.Results +} + +func (p *processedTransaction) IsValid() bool { + return p.vc == int32(pb.TxValidationCode_VALID) +} + +func (p *processedTransaction) Envelope() []byte { + return p.env +} + +func (p *processedTransaction) ValidationCode() int32 { + return p.vc +} diff --git a/platform/fabric/core/generic/txconfig.go b/platform/fabric/core/generic/txconfig.go deleted file mode 100644 index 5fa0c7265..000000000 --- a/platform/fabric/core/generic/txconfig.go +++ /dev/null @@ -1,299 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package generic - -import ( - "strconv" - "sync" - "time" - - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/committer" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/rwset" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" - "github.com/hyperledger/fabric-protos-go/common" - "github.com/hyperledger/fabric/bccsp/factory" - "github.com/hyperledger/fabric/common/channelconfig" - "github.com/hyperledger/fabric/common/configtx" - "github.com/hyperledger/fabric/protoutil" - "github.com/pkg/errors" -) - -const ( - channelConfigKey = "CHANNEL_CONFIG_ENV_BYTES" - peerNamespace = "_configtx" -) - -// TODO: introduced due to a race condition in idemix. -var commitConfigMutex = &sync.Mutex{} - -func (c *Channel) ReloadConfigTransactions() error { - c.ResourcesApplyLock.Lock() - defer c.ResourcesApplyLock.Unlock() - - qe, err := c.Vault.NewQueryExecutor() - if err != nil { - return errors.WithMessagef(err, "failed getting query executor") - } - defer qe.Done() - - logger.Infof("looking up the latest config block available") - var sequence uint64 = 0 - for { - txID := committer.ConfigTXPrefix + strconv.FormatUint(sequence, 10) - vc, _, err := c.Vault.Status(txID) - if err != nil { - return errors.WithMessagef(err, "failed getting tx's status [%s]", txID) - } - logger.Infof("check config block at txID [%s], status [%v]...", txID, vc) - done := false - switch vc { - case driver.Valid: - logger.Infof("config block available, txID [%s], loading...", txID) - - key, err := rwset.CreateCompositeKey(channelConfigKey, []string{strconv.FormatUint(sequence, 10)}) - if err != nil { - return errors.Wrapf(err, "cannot create configtx rws key") - } - envelope, err := qe.GetState(peerNamespace, key) - if err != nil { - return errors.Wrapf(err, "failed setting configtx state in rws") - } - env, err := protoutil.UnmarshalEnvelope(envelope) - if err != nil { - return errors.Wrapf(err, "cannot get payload from config transaction [%s]", txID) - } - payload, err := protoutil.UnmarshalPayload(env.Payload) - if err != nil { - return errors.Wrapf(err, "cannot get payload from config transaction [%s]", txID) - } - ctx, err := configtx.UnmarshalConfigEnvelope(payload.Data) - if err != nil { - return errors.Wrapf(err, "error unmarshalling config which passed initial validity checks [%s]", txID) - } - - var bundle *channelconfig.Bundle - if c.Resources() == nil { - // set up the genesis block - bundle, err = channelconfig.NewBundle(c.ChannelName, ctx.Config, factory.GetDefault()) - if err != nil { - return errors.Wrapf(err, "failed to build a new bundle") - } - } else { - configTxValidator := c.Resources().ConfigtxValidator() - err := configTxValidator.Validate(ctx) - if err != nil { - return errors.Wrapf(err, "failed to validate config transaction [%s]", txID) - } - - bundle, err = channelconfig.NewBundle(configTxValidator.ChannelID(), ctx.Config, factory.GetDefault()) - if err != nil { - return errors.Wrapf(err, "failed to create next bundle") - } - - channelconfig.LogSanityChecks(bundle) - if err := capabilitiesSupported(bundle); err != nil { - return err - } - } - - if err := c.ApplyBundle(bundle); err != nil { - return err - } - - sequence = sequence + 1 - continue - case driver.Unknown: - if sequence == 0 { - // Give a chance to 1, in certain setting the first block starts with 1 - sequence++ - continue - } - - logger.Infof("config block at txID [%s] unavailable, stop loading", txID) - done = true - default: - return errors.Errorf("invalid configtx's [%s] status [%d]", txID, vc) - } - if done { - logger.Infof("loading config block done") - break - } - } - if sequence == 1 { - logger.Infof("no config block available, must start from genesis") - // no configuration block found - return nil - } - logger.Infof("latest config block available at sequence [%d]", sequence-1) - - return nil -} - -// CommitConfig is used to validate and apply configuration transactions for a Channel. -func (c *Channel) CommitConfig(blockNumber uint64, raw []byte, env *common.Envelope) error { - commitConfigMutex.Lock() - defer commitConfigMutex.Unlock() - - c.ResourcesApplyLock.Lock() - defer c.ResourcesApplyLock.Unlock() - - if env == nil { - return errors.Errorf("Channel config found nil") - } - - payload, err := protoutil.UnmarshalPayload(env.Payload) - if err != nil { - return errors.Wrapf(err, "cannot get payload from config transaction, block number [%d]", blockNumber) - } - - ctx, err := configtx.UnmarshalConfigEnvelope(payload.Data) - if err != nil { - return errors.Wrapf(err, "error unmarshalling config which passed initial validity checks") - } - - txid := committer.ConfigTXPrefix + strconv.FormatUint(ctx.Config.Sequence, 10) - vc, _, err := c.Vault.Status(txid) - if err != nil { - return errors.Wrapf(err, "failed getting tx's status [%s]", txid) - } - switch vc { - case driver.Valid: - logger.Infof("config block [%s] already committed, skip it.", txid) - return nil - case driver.Unknown: - logger.Infof("config block [%s] not committed, commit it.", txid) - // this is okay - default: - return errors.Errorf("invalid configtx's [%s] status [%d]", txid, vc) - } - - var bundle *channelconfig.Bundle - if c.Resources() == nil { - // set up the genesis block - bundle, err = channelconfig.NewBundle(c.ChannelName, ctx.Config, factory.GetDefault()) - if err != nil { - return errors.Wrapf(err, "failed to build a new bundle") - } - } else { - configTxValidator := c.Resources().ConfigtxValidator() - err := configTxValidator.Validate(ctx) - if err != nil { - return errors.Wrapf(err, "failed to validate config transaction, block number [%d]", blockNumber) - } - - bundle, err = channelconfig.NewBundle(configTxValidator.ChannelID(), ctx.Config, factory.GetDefault()) - if err != nil { - return errors.Wrapf(err, "failed to create next bundle") - } - - channelconfig.LogSanityChecks(bundle) - if err := capabilitiesSupported(bundle); err != nil { - return err - } - } - - if err := c.commitConfig(txid, blockNumber, ctx.Config.Sequence, raw); err != nil { - return errors.Wrapf(err, "failed committing configtx to the vault") - } - - return c.ApplyBundle(bundle) -} - -// Resources returns the active Channel configuration bundle. -func (c *Channel) Resources() channelconfig.Resources { - c.ResourcesLock.RLock() - res := c.ChannelResources - c.ResourcesLock.RUnlock() - return res -} - -func (c *Channel) commitConfig(txID string, blockNumber uint64, seq uint64, envelope []byte) error { - logger.Infof("[Channel: %s] commit config transaction number [bn:%d][seq:%d]", c.ChannelName, blockNumber, seq) - - rws, err := c.Vault.NewRWSet(txID) - if err != nil { - return errors.Wrapf(err, "cannot create rws for configtx") - } - defer rws.Done() - - key, err := rwset.CreateCompositeKey(channelConfigKey, []string{strconv.FormatUint(seq, 10)}) - if err != nil { - return errors.Wrapf(err, "cannot create configtx rws key") - } - if err := rws.SetState(peerNamespace, key, envelope); err != nil { - return errors.Wrapf(err, "failed setting configtx state in rws") - } - rws.Done() - if err := c.CommitTX(txID, blockNumber, 0, nil); err != nil { - if err2 := c.DiscardTx(txID, ""); err2 != nil { - logger.Errorf("failed committing configtx rws [%s]", err2) - } - return errors.Wrapf(err, "failed committing configtx rws") - } - return nil -} - -func (c *Channel) ApplyBundle(bundle *channelconfig.Bundle) error { - c.ResourcesLock.Lock() - defer c.ResourcesLock.Unlock() - c.ChannelResources = bundle - - // update the list of orderers - ordererConfig, exists := c.ChannelResources.OrdererConfig() - if exists { - logger.Debugf("[Channel: %s] Orderer config has changed, updating the list of orderers", c.ChannelName) - - var newOrderers []*grpc.ConnectionConfig - orgs := ordererConfig.Organizations() - for _, org := range orgs { - msp := org.MSP() - var tlsRootCerts [][]byte - tlsRootCerts = append(tlsRootCerts, msp.GetTLSRootCerts()...) - tlsRootCerts = append(tlsRootCerts, msp.GetTLSIntermediateCerts()...) - for _, endpoint := range org.Endpoints() { - logger.Debugf("[Channel: %s] Adding orderer endpoint: [%s:%s:%s]", c.ChannelName, org.Name(), org.MSPID(), endpoint) - // TODO: load from configuration - newOrderers = append(newOrderers, &grpc.ConnectionConfig{ - Address: endpoint, - ConnectionTimeout: 10 * time.Second, - TLSEnabled: true, - TLSRootCertBytes: tlsRootCerts, - }) - } - } - if len(newOrderers) != 0 { - logger.Debugf("[Channel: %s] Updating the list of orderers: (%d) found", c.ChannelName, len(newOrderers)) - if err := c.Network.SetConfigOrderers(ordererConfig, newOrderers); err != nil { - return err - } - } else { - logger.Debugf("[Channel: %s] No orderers found in Channel config", c.ChannelName) - } - } else { - logger.Debugf("no orderer configuration found in Channel config") - } - - return nil -} - -func capabilitiesSupported(res channelconfig.Resources) error { - ac, ok := res.ApplicationConfig() - if !ok { - return errors.Errorf("[Channel %s] does not have application config so is incompatible", res.ConfigtxValidator().ChannelID()) - } - - if err := ac.Capabilities().Supported(); err != nil { - return errors.Wrapf(err, "[Channel %s] incompatible", res.ConfigtxValidator().ChannelID()) - } - - if err := res.ChannelConfig().Capabilities().Supported(); err != nil { - return errors.Wrapf(err, "[Channel %s] incompatible", res.ConfigtxValidator().ChannelID()) - } - - return nil -} diff --git a/platform/fabric/core/generic/vault.go b/platform/fabric/core/generic/vault.go index 643f33d50..02bf195c7 100644 --- a/platform/fabric/core/generic/vault.go +++ b/platform/fabric/core/generic/vault.go @@ -7,30 +7,59 @@ SPDX-License-Identifier: Apache-2.0 package generic import ( - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/config" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/vault" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/vault/txidstore" - fdriver "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" + "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/cache/secondcache" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/db" "github.com/pkg/errors" ) +type VaultService struct { + *vault.Vault +} + +func NewVaultService(vault *vault.Vault) *VaultService { + return &VaultService{Vault: vault} +} + +// NewRWSet returns a RWSet for this ledger. +// A client may obtain more than one such simulator; they are made unique +// by way of the supplied txid +func (c *VaultService) NewRWSet(txid string) (driver.RWSet, error) { + return c.Vault.NewRWSet(txid) +} + +// GetRWSet returns a RWSet for this ledger whose content is unmarshalled +// from the passed bytes. +// A client may obtain more than one such simulator; they are made unique +// by way of the supplied txid +func (c *VaultService) GetRWSet(txid string, rwset []byte) (driver.RWSet, error) { + return c.Vault.GetRWSet(txid, rwset) +} + +// GetEphemeralRWSet returns an ephemeral RWSet for this ledger whose content is unmarshalled +// from the passed bytes. +// If namespaces is not empty, the returned RWSet will be filtered by the passed namespaces +func (c *VaultService) GetEphemeralRWSet(rwset []byte, namespaces ...string) (driver.RWSet, error) { + return c.Vault.InspectRWSet(rwset, namespaces...) +} + type TXIDStore interface { - fdriver.TXIDStore - Get(txid string) (fdriver.ValidationCode, string, error) - Set(txID string, code fdriver.ValidationCode, message string) error + driver.TXIDStore + Get(txid string) (driver.ValidationCode, string, error) + Set(txID string, code driver.ValidationCode, message string) error } -func NewVault(sp view2.ServiceProvider, config *config.Config, channel string) (*vault.Vault, TXIDStore, error) { - logger.Debugf("new fabric vault for channel [%s] with config [%v]", channel, config) - pType := config.VaultPersistenceType() +func NewVault(sp view2.ServiceProvider, configService driver.ConfigService, channel string) (*vault.Vault, TXIDStore, error) { + logger.Debugf("new fabric vault for channel [%s] with config [%v]", channel, configService) + pType := configService.VaultPersistenceType() if pType == "file" { // for retro compatibility pType = "badger" } - persistence, err := db.OpenVersioned(sp, pType, channel, db.NewPrefixConfig(config, config.VaultPersistencePrefix())) + persistence, err := db.OpenVersioned(sp, pType, channel, db.NewPrefixConfig(configService, configService.VaultPersistencePrefix())) if err != nil { return nil, nil, errors.Wrapf(err, "failed creating vault") } @@ -41,7 +70,7 @@ func NewVault(sp view2.ServiceProvider, config *config.Config, channel string) ( return nil, nil, errors.Wrapf(err, "failed creating txid store") } - txIDStoreCacheSize := config.VaultTXStoreCacheSize() + txIDStoreCacheSize := configService.VaultTXStoreCacheSize() if err != nil { return nil, nil, errors.Wrapf(err, "failed loading txID store cache size from configuration") } diff --git a/platform/fabric/core/provider.go b/platform/fabric/core/provider.go index 414260dd7..ae12ff5ef 100644 --- a/platform/fabric/core/provider.go +++ b/platform/fabric/core/provider.go @@ -12,8 +12,6 @@ import ( "reflect" "sync" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/finality" - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/views" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" @@ -40,9 +38,6 @@ func NewFabricNetworkServiceProvider(sp view.ServiceProvider, config *Config) (* config: config, networks: map[string]driver.FabricNetworkService{}, } - if err := provider.InstallViews(); err != nil { - return nil, errors.WithMessage(err, "failed to install fns provider") - } provider.InitFabricLogging() return provider, nil } @@ -55,13 +50,13 @@ func (p *FSNProvider) Start(ctx context.Context) error { if err != nil { return errors.Wrapf(err, "failed to start fabric network service [%s]", name) } - for _, channelName := range fns.Channels() { + for _, channelName := range fns.ConfigService().ChannelIDs() { ch, err := fns.Channel(channelName) if err != nil { return errors.Wrapf(err, "failed to get channel [%s] for fabric network service [%s]", channelName, name) } logger.Infof("start fabric [%s:%s]'s delivery service...", name, channelName) - if err := ch.StartDelivery(ctx); err != nil { + if err := ch.Delivery().Start(ctx); err != nil { return errors.WithMessagef(err, "failed to start delivery on channel [%s] for fabric network service [%s]", channelName, name) } } @@ -76,7 +71,7 @@ func (p *FSNProvider) Stop() error { if err != nil { return err } - for _, channelName := range fns.Channels() { + for _, channelName := range fns.ConfigService().ChannelIDs() { ch, err := fns.Channel(channelName) if err != nil { return err @@ -117,13 +112,6 @@ func (p *FSNProvider) FabricNetworkService(network string) (driver.FabricNetwork return net, nil } -func (p *FSNProvider) InstallViews() error { - if err := view.GetRegistry(p.sp).RegisterResponder(views.NewIsFinalResponderView(p), &finality.IsFinalInitiatorView{}); err != nil { - return errors.WithMessagef(err, "failed to register finality responder") - } - return nil -} - // InitFabricLogging initializes the fabric logging system // using the FSC configuration. func (p *FSNProvider) InitFabricLogging() { diff --git a/platform/fabric/core/views/finality.go b/platform/fabric/core/views/finality.go deleted file mode 100644 index e80c4b122..000000000 --- a/platform/fabric/core/views/finality.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package views - -import ( - "context" - "time" - - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/session" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" - "github.com/pkg/errors" -) - -type FNSProvider interface { - FabricNetworkService(network string) (driver.FabricNetworkService, error) -} - -type IsFinalRequest struct { - Network string - Channel string - TxID string - Timeout time.Duration -} - -type IsFinalResponse struct { - Err error -} - -type IsFinalResponderView struct { - FNSProvider FNSProvider -} - -func NewIsFinalResponderView(FNSProvider FNSProvider) *IsFinalResponderView { - return &IsFinalResponderView{FNSProvider: FNSProvider} -} - -func (i *IsFinalResponderView) Call(ctx view.Context) (interface{}, error) { - // receive IsFinalRequest struct - isFinalRequest := &IsFinalRequest{} - session := session.JSON(ctx) - if err := session.Receive(isFinalRequest); err != nil { - return nil, errors.Wrapf(err, "failed to receive request") - } - - // check finality - var err error - network, err := i.FNSProvider.FabricNetworkService(isFinalRequest.Network) - if err != nil { - return nil, errors.Wrapf(err, "failed to get network service for %s", isFinalRequest.Network) - } - var ch driver.Channel - ch, err = network.Channel(isFinalRequest.Channel) - if err == nil { - c := ctx.Context() - if isFinalRequest.Timeout != 0 { - var cancel context.CancelFunc - c, cancel = context.WithTimeout(c, isFinalRequest.Timeout) - defer cancel() - } - err = ch.IsFinal(c, isFinalRequest.TxID) - } else { - err = errors.Wrapf(err, "channel %s not found", isFinalRequest.Channel) - } - - // send back answer - if err := session.Send(&IsFinalResponse{Err: err}); err != nil { - return nil, errors.Wrapf(err, "failed to send response") - } - return nil, nil -} diff --git a/platform/fabric/delivery.go b/platform/fabric/delivery.go index e17442cef..5c5b8e982 100644 --- a/platform/fabric/delivery.go +++ b/platform/fabric/delivery.go @@ -16,14 +16,14 @@ type DeliveryCallback func(tx *ProcessedTransaction) (bool, error) // Delivery models the Fabric's delivery service type Delivery struct { - ch *Channel + delivery driver.Delivery } // Scan iterates over all transactions in block starting from the block containing the passed transaction id. // If txID is empty, the iterations starts from the first block. // On each transaction, the callback function is invoked. func (d *Delivery) Scan(ctx context.Context, txID string, callback DeliveryCallback) error { - return d.ch.ch.Scan(ctx, txID, func(tx driver.ProcessedTransaction) (bool, error) { + return d.delivery.Scan(ctx, txID, func(tx driver.ProcessedTransaction) (bool, error) { return callback(&ProcessedTransaction{ pt: tx, }) diff --git a/platform/fabric/driver/channel.go b/platform/fabric/driver/channel.go index 7bfedb9c4..9d12d1060 100644 --- a/platform/fabric/driver/channel.go +++ b/platform/fabric/driver/channel.go @@ -6,36 +6,34 @@ SPDX-License-Identifier: Apache-2.0 package driver -import ( - "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/core/generic/peer" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" -) - // Channel gives access to Fabric channel related information type Channel interface { - Committer - Vault - Delivery - - Ledger - Finality - ChannelMembership - TXIDStore - ChaincodeManager - RWSetLoader - // Name returns the name of the channel this instance is bound to Name() string + Committer() Committer + + Vault() Vault + + Delivery() Delivery + + Ledger() Ledger + + Finality() Finality + + ChannelMembership() ChannelMembership + + TXIDStore() TXIDStore + + ChaincodeManager() ChaincodeManager + + RWSetLoader() RWSetLoader + EnvelopeService() EnvelopeService TransactionService() EndorserTransactionService MetadataService() MetadataService - // NewPeerClientForAddress creates an instance of a Client using the - // provided peer connection config - NewPeerClientForAddress(cc grpc.ConnectionConfig) (peer.Client, error) - Close() error } diff --git a/platform/fabric/driver/committer.go b/platform/fabric/driver/committer.go index 594a6429d..72bfcb992 100644 --- a/platform/fabric/driver/committer.go +++ b/platform/fabric/driver/committer.go @@ -12,12 +12,11 @@ import "github.com/hyperledger/fabric-protos-go/common" type ValidationCode int const ( - _ ValidationCode = iota - Valid // Transaction is valid and committed - Invalid // Transaction is invalid and has been discarded - Busy // Transaction does not yet have a validity state - Unknown // Transaction is unknown - HasDependencies // Transaction is unknown but has known dependencies + _ ValidationCode = iota + Valid // Transaction is valid and committed + Invalid // Transaction is invalid and has been discarded + Busy // Transaction does not yet have a validity state + Unknown // Transaction is unknown ) // TransactionStatusChanged is sent when the status of a transaction changes @@ -59,7 +58,7 @@ type Committer interface { // Status returns a validation code this committer bind to the passed transaction id, plus // a list of dependant transaction ids if they exist. - Status(txID string) (ValidationCode, string, []string, error) + Status(txID string) (ValidationCode, string, error) // DiscardTx discards the transaction with the passed id and all its dependencies, if they exists. DiscardTx(txID string, message string) error @@ -67,7 +66,6 @@ type Committer interface { // CommitTX commits the transaction with the passed id and all its dependencies, if they exists. // Depending on tx's status, CommitTX does the following: // Tx is Unknown, CommitTx does nothing and returns no error. - // Tx is HasDependencies, CommitTx proceeds with the multi-shard private transaction commit protocol. // Tx is Valid, CommitTx does nothing and returns an error. // Tx is Invalid, CommitTx does nothing and returns an error. // Tx is Busy, if Tx is a multi-shard private transaction then CommitTx proceeds with the multi-shard private transaction commit protocol, diff --git a/platform/fabric/driver/config.go b/platform/fabric/driver/config.go index c157b0de9..91968ec68 100644 --- a/platform/fabric/driver/config.go +++ b/platform/fabric/driver/config.go @@ -12,23 +12,6 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" ) -type ConfigService interface { - // GetString returns the value associated with the key as a string - GetString(key string) string - // GetDuration returns the value associated with the key as a duration - GetDuration(key string) time.Duration - // GetBool returns the value associated with the key asa boolean - GetBool(key string) bool - // IsSet checks to see if the key has been set in any of the data locations - IsSet(key string) bool - // UnmarshalKey takes a single key and unmarshals it into a Struct - UnmarshalKey(key string, rawVal interface{}) error - // GetPath allows configuration strings that specify a (config-file) relative path - GetPath(key string) string - // TranslatePath translates the passed path relative to the config path - TranslatePath(path string) string -} - // PeerFunctionType defines classes of peers providing a specific functionality type PeerFunctionType int @@ -45,20 +28,74 @@ const ( PeerForQuery ) -// Config defines basic information the configuration should provide -type Config interface { - // DefaultChannel returns the name of the default channel - DefaultChannel() string - - // Channels return the list of registered channel names - Channels() []string +type ChaincodeConfig interface { + ID() string + IsPrivate() bool +} - // Orderers returns the list of all registered ordereres - Orderers() []*grpc.ConnectionConfig +type ChannelConfig interface { + ID() string + FinalityWaitTimeout() time.Duration + FinalityForPartiesWaitTimeout() time.Duration + CommitterPollingTimeout() time.Duration + CommitterFinalityNumRetries() int + CommitterFinalityUnknownTXTimeout() time.Duration + CommitterWaitForEventTimeout() time.Duration + DeliverySleepAfterFailure() time.Duration + ChaincodeConfigs() []ChaincodeConfig + GetNumRetries() uint + GetRetrySleep() time.Duration + DiscoveryDefaultTTLS() time.Duration + DiscoveryTimeout() time.Duration +} - // Peers returns the list of all registered peers - Peers() []*grpc.ConnectionConfig +type Configuration interface { + // GetString returns the value associated with the key as a string + GetString(key string) string + // GetInt returns the value associated with the key as an integer + GetInt(key string) int + // GetDuration returns the value associated with the key as a duration + GetDuration(key string) time.Duration + // GetBool returns the value associated with the key asa boolean + GetBool(key string) bool + // GetStringSlice returns the value associated with the key as a slice of strings + GetStringSlice(key string) []string + // IsSet checks to see if the key has been set in any of the data locations + IsSet(key string) bool + // UnmarshalKey takes a single key and unmarshals it into a Struct + UnmarshalKey(key string, rawVal interface{}) error + // ConfigFileUsed returns the file used to populate the config registry + ConfigFileUsed() string + // GetPath allows configuration strings that specify a (config-file) relative path + GetPath(key string) string + // TranslatePath translates the passed path relative to the config path + TranslatePath(path string) string +} - // PickPeer picks a peer at random among the peers that provide the passed functionality +type ConfigService interface { + Configuration + NetworkName() string + DefaultChannel() string + Channel(name string) ChannelConfig + ChannelIDs() []string + Orderers() []*grpc.ConnectionConfig + SetConfigOrderers([]*grpc.ConnectionConfig) error + PickOrderer() *grpc.ConnectionConfig + BroadcastNumRetries() int + BroadcastRetryInterval() time.Duration + OrdererConnectionPoolSize() int PickPeer(funcType PeerFunctionType) *grpc.ConnectionConfig + IsChannelQuiet(name string) bool + VaultPersistenceType() string + VaultPersistencePrefix() string + VaultTXStoreCacheSize() int + TLSServerHostOverride() string + ClientConnTimeout() time.Duration + TLSClientAuthRequired() bool + TLSClientKeyFile() string + TLSClientCertFile() string + KeepAliveClientInterval() time.Duration + KeepAliveClientTimeout() time.Duration + NewDefaultChannelConfig(name string) ChannelConfig + TLSEnabled() bool } diff --git a/platform/fabric/driver/delivery.go b/platform/fabric/driver/delivery.go index 6ddcc0ff0..bcf4d21ac 100644 --- a/platform/fabric/driver/delivery.go +++ b/platform/fabric/driver/delivery.go @@ -15,7 +15,7 @@ type DeliveryCallback func(tx ProcessedTransaction) (bool, error) // Delivery gives access to Fabric channel delivery type Delivery interface { // StartDelivery starts the delivery process - StartDelivery(ctx context.Context) error + Start(ctx context.Context) error // Scan iterates over all transactions in block starting from the block containing the passed transaction id. // If txID is empty, the iterations starts from the first block. diff --git a/platform/fabric/driver/finality.go b/platform/fabric/driver/finality.go index 35e72f856..51f3bf507 100644 --- a/platform/fabric/driver/finality.go +++ b/platform/fabric/driver/finality.go @@ -8,8 +8,6 @@ package driver import ( "context" - - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) type Finality interface { @@ -17,9 +15,4 @@ type Finality interface { // with the respect to the passed context that can be used to set a deadline // for the waiting time. IsFinal(ctx context.Context, txID string) error - - // IsFinalForParties takes in input a transaction id and an array of identities. - // The identities are contacted to gather information about the finality of the - // passed transaction - IsFinalForParties(txID string, parties ...view.Identity) error } diff --git a/platform/fabric/driver/fns.go b/platform/fabric/driver/fns.go index b886c57c1..971d55f78 100644 --- a/platform/fabric/driver/fns.go +++ b/platform/fabric/driver/fns.go @@ -14,11 +14,10 @@ import ( // FabricNetworkService gives access to a Fabric network components type FabricNetworkService interface { - Config - Ordering - Name() string + OrderingService() Ordering + TransactionManager() TransactionManager ProcessorManager() ProcessorManager diff --git a/platform/fabric/driver/membership.go b/platform/fabric/driver/membership.go index 8ff98f436..1d978b48a 100644 --- a/platform/fabric/driver/membership.go +++ b/platform/fabric/driver/membership.go @@ -24,11 +24,6 @@ type IdentityInfo struct { GetIdentity GetIdentityFunc } -type SigningIdentity interface { - Serialize() ([]byte, error) - Sign(msg []byte) ([]byte, error) -} - type LocalMembership interface { DefaultIdentity() view.Identity AnonymousIdentity() view.Identity diff --git a/platform/fabric/driver/sig.go b/platform/fabric/driver/sig.go index 13ec8e536..c28255b51 100644 --- a/platform/fabric/driver/sig.go +++ b/platform/fabric/driver/sig.go @@ -10,6 +10,21 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) +type SigningIdentity interface { + Serialize() ([]byte, error) + Sign(msg []byte) ([]byte, error) +} + +// Verifier is an interface which wraps the Verify method. +type Verifier interface { + // Verify verifies the signature over the passed message. + Verify(message, sigma []byte) error +} + +type Signer interface { + Sign(message []byte) ([]byte, error) +} + // SignerService models a signer service type SignerService interface { // GetSigner returns the signer for the passed identity diff --git a/platform/fabric/driver/transaction.go b/platform/fabric/driver/transaction.go index f4eaf92f6..44f92b035 100644 --- a/platform/fabric/driver/transaction.go +++ b/platform/fabric/driver/transaction.go @@ -76,16 +76,6 @@ type TransactionManager interface { AddTransactionFactory(tt TransactionType, factory TransactionFactory) } -// Verifier is an interface which wraps the Verify method. -type Verifier interface { - // Verify verifies the signature over the passed message. - Verify(message, sigma []byte) error -} - -type Signer interface { - Sign(message []byte) ([]byte, error) -} - type Transaction interface { Creator() view.Identity Nonce() []byte diff --git a/platform/fabric/driver/vault.go b/platform/fabric/driver/vault.go index a412434d6..3f75cab8a 100644 --- a/platform/fabric/driver/vault.go +++ b/platform/fabric/driver/vault.go @@ -28,4 +28,15 @@ type Vault interface { // from the passed bytes. // If namespaces is not empty, the returned RWSet will be filtered by the passed namespaces GetEphemeralRWSet(rwset []byte, namespaces ...string) (RWSet, error) + + CommitTX(id string, block uint64, index int) error + + Status(id string) (ValidationCode, string, error) + + DiscardTx(id string, message string) error + + RWSExists(id string) bool + + Match(id string, results []byte) error + Close() error } diff --git a/platform/fabric/finality.go b/platform/fabric/finality.go index 405af04d3..242495512 100644 --- a/platform/fabric/finality.go +++ b/platform/fabric/finality.go @@ -10,20 +10,12 @@ import ( "context" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) type Finality struct { - ch driver.Channel + finality driver.Finality } func (c *Finality) IsFinal(ctx context.Context, txID string) error { - if ctx == nil { - ctx = context.Background() - } - return c.ch.IsFinal(ctx, txID) -} - -func (c *Finality) IsFinalForParties(txID string, parties ...view.Identity) error { - return c.ch.IsFinalForParties(txID, parties...) + return c.finality.IsFinal(ctx, txID) } diff --git a/platform/fabric/fns.go b/platform/fabric/fns.go index 2ae54c88d..58c7201d9 100644 --- a/platform/fabric/fns.go +++ b/platform/fabric/fns.go @@ -15,7 +15,6 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" view2 "github.com/hyperledger-labs/fabric-smart-client/platform/view" "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/flogging" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" "github.com/pkg/errors" ) @@ -38,21 +37,6 @@ func NewNetworkService(SP view2.ServiceProvider, fns driver.FabricNetworkService return &NetworkService{SP: SP, fns: fns, name: name, channels: map[string]*Channel{}} } -// DefaultChannel returns the name of the default channel -func (n *NetworkService) DefaultChannel() string { - return n.fns.DefaultChannel() -} - -// Channels returns the channel names -func (n *NetworkService) Channels() []string { - return n.fns.Channels() -} - -// Peers returns the list of known Peer nodes -func (n *NetworkService) Peers() []*grpc.ConnectionConfig { - return n.fns.Peers() -} - // Channel returns the channel service for the passed id func (n *NetworkService) Channel(id string) (*Channel, error) { n.channelMutex.RLock() diff --git a/platform/fabric/ledger.go b/platform/fabric/ledger.go index bffad56e6..077e3222b 100644 --- a/platform/fabric/ledger.go +++ b/platform/fabric/ledger.go @@ -42,17 +42,17 @@ func (pt *ProcessedTransaction) ValidationCode() int32 { // Ledger models the ledger stored at a remote Fabric peer type Ledger struct { - ch *Channel + l driver.Ledger } // GetBlockNumberByTxID returns the number of the block where the passed transaction appears func (l *Ledger) GetBlockNumberByTxID(txID string) (uint64, error) { - return l.ch.ch.GetBlockNumberByTxID(txID) + return l.l.GetBlockNumberByTxID(txID) } // GetTransactionByID retrieves a transaction by id func (l *Ledger) GetTransactionByID(txID string) (*ProcessedTransaction, error) { - pt, err := l.ch.ch.GetTransactionByID(txID) + pt, err := l.l.GetTransactionByID(txID) if err != nil { return nil, err } @@ -61,7 +61,7 @@ func (l *Ledger) GetTransactionByID(txID string) (*ProcessedTransaction, error) // GetBlockByNumber fetches a block by number func (l *Ledger) GetBlockByNumber(number uint64) (*Block, error) { - b, err := l.ch.ch.GetBlockByNumber(number) + b, err := l.l.GetBlockByNumber(number) if err != nil { return nil, err } diff --git a/platform/fabric/membership.go b/platform/fabric/membership.go index 464e4809d..9a12ce64f 100644 --- a/platform/fabric/membership.go +++ b/platform/fabric/membership.go @@ -141,7 +141,7 @@ type Verifier interface { } type MSPManager struct { - ch driver.Channel + ch driver.ChannelMembership } func (c *MSPManager) GetMSPIDs() []string { diff --git a/platform/fabric/ordering.go b/platform/fabric/ordering.go index 1739dc05a..4e732b7aa 100644 --- a/platform/fabric/ordering.go +++ b/platform/fabric/ordering.go @@ -10,25 +10,19 @@ import ( "context" "github.com/hyperledger-labs/fabric-smart-client/platform/fabric/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/services/grpc" ) type Ordering struct { network driver.FabricNetworkService } -// Orderers returns the list of known Orderer nodes -func (n *Ordering) Orderers() []*grpc.ConnectionConfig { - return n.network.Orderers() -} - func (n *Ordering) Broadcast(context context.Context, blob interface{}) error { switch b := blob.(type) { case *Envelope: - return n.network.Broadcast(context, b.e) + return n.network.OrderingService().Broadcast(context, b.e) case *Transaction: - return n.network.Broadcast(context, b.tx) + return n.network.OrderingService().Broadcast(context, b.tx) default: - return n.network.Broadcast(context, blob) + return n.network.OrderingService().Broadcast(context, blob) } } diff --git a/platform/fabric/services/endorser/finality.go b/platform/fabric/services/endorser/finality.go index 7ff3e5926..90e6a0bbf 100644 --- a/platform/fabric/services/endorser/finality.go +++ b/platform/fabric/services/endorser/finality.go @@ -16,9 +16,8 @@ import ( ) type finalityView struct { - tx *Transaction - endpoints []view.Identity - timeout time.Duration + tx *Transaction + timeout time.Duration } func (f *finalityView) Call(ctx view.Context) (interface{}, error) { @@ -30,9 +29,6 @@ func (f *finalityView) Call(ctx view.Context) (interface{}, error) { if err != nil { return nil, errors.WithMessagef(err, "failed getting channel [%s:%s]", f.tx.Network(), f.tx.Channel()) } - if len(f.endpoints) != 0 { - return nil, ch.Finality().IsFinalForParties(f.tx.ID(), f.endpoints...) - } c := ctx.Context() if f.timeout != 0 { var cancel context.CancelFunc @@ -50,7 +46,3 @@ func NewFinalityView(tx *Transaction) *finalityView { func NewFinalityWithTimeoutView(tx *Transaction, timeout time.Duration) *finalityView { return &finalityView{tx: tx, timeout: timeout} } - -func NewFinalityFromView(tx *Transaction, endpoints ...view.Identity) *finalityView { - return &finalityView{tx: tx, endpoints: endpoints} -} diff --git a/platform/fabric/services/state/transaction.go b/platform/fabric/services/state/transaction.go index 3a3010154..9ed963d07 100755 --- a/platform/fabric/services/state/transaction.go +++ b/platform/fabric/services/state/transaction.go @@ -59,7 +59,7 @@ func NewAnonymousTransaction(context view.Context) (*Transaction, error) { _, tx, err := endorser.NewTransactionWithSigner( context, fns.Name(), - fns.DefaultChannel(), + fns.ConfigService().DefaultChannel(), fns.LocalMembership().AnonymousIdentity(), ) if err != nil { diff --git a/platform/fabric/services/weaver/relay.go b/platform/fabric/services/weaver/relay.go index c6f99d6b1..96dab0533 100644 --- a/platform/fabric/services/weaver/relay.go +++ b/platform/fabric/services/weaver/relay.go @@ -21,9 +21,10 @@ type Relay struct { // ToFabric gives access to the Relay services towards a Fabric network func (r *Relay) ToFabric() *fabric.Fabric { - ch, err := r.fns.Channel(r.fns.DefaultChannel()) + defaultChannel := r.fns.ConfigService().DefaultChannel() + ch, err := r.fns.Channel(defaultChannel) if err != nil { - logger.Errorf("cannot get channel [%s:%s]: [%s]", r.fns.Name(), r.fns.DefaultChannel()) + logger.Errorf("cannot get channel [%s:%s]: [%s]", r.fns.Name(), defaultChannel) return nil } diff --git a/platform/fabric/vault.go b/platform/fabric/vault.go index 120bfbc45..4911babac 100644 --- a/platform/fabric/vault.go +++ b/platform/fabric/vault.go @@ -239,12 +239,11 @@ func (qe *QueryExecutor) Done() { type ValidationCode int const ( - _ ValidationCode = iota - Valid // Transaction is valid and committed - Invalid // Transaction is invalid and has been discarded - Busy // Transaction does not yet have a validity state - Unknown // Transaction is unknown - HasDependencies // Transaction is unknown but has known dependencies + _ ValidationCode = iota + Valid // Transaction is valid and committed + Invalid // Transaction is invalid and has been discarded + Busy // Transaction does not yet have a validity state + Unknown // Transaction is unknown ) type SeekStart struct{} @@ -286,12 +285,14 @@ func (t *TxIDIterator) Close() { // Vault models a key-value store that can be updated by committing rwsets type Vault struct { - ch fdriver.Channel + vault fdriver.Vault + txidStore fdriver.TXIDStore + ch fdriver.Channel } // GetLastTxID returns the last transaction id committed func (c *Vault) GetLastTxID() (string, error) { - return c.ch.GetLastTxID() + return c.txidStore.GetLastTxID() } func (c *Vault) TxIDIterator(pos interface{}) (*TxIDIterator, error) { @@ -306,34 +307,34 @@ func (c *Vault) TxIDIterator(pos interface{}) (*TxIDIterator, error) { default: return nil, errors.Errorf("invalid position %T", pos) } - it, err := c.ch.Iterator(iPos) + it, err := c.txidStore.Iterator(iPos) if err != nil { return nil, err } return &TxIDIterator{TxidIterator: it}, nil } -func (c *Vault) Status(txID string) (ValidationCode, string, []string, error) { - code, message, deps, err := c.ch.Status(txID) +func (c *Vault) Status(txID string) (ValidationCode, string, error) { + code, message, err := c.vault.Status(txID) if err != nil { - return Unknown, "", deps, err + return Unknown, "", err } - return ValidationCode(code), message, deps, nil + return ValidationCode(code), message, nil } func (c *Vault) DiscardTx(txID string, message string) error { - return c.ch.DiscardTx(txID, message) + return c.vault.DiscardTx(txID, message) } func (c *Vault) CommitTX(txid string, block uint64, indexInBloc int) error { - return c.ch.CommitTX(txid, block, indexInBloc, nil) + return c.vault.CommitTX(txid, block, indexInBloc) } // NewQueryExecutor gives handle to a query executor. // A client can obtain more than one 'QueryExecutor's for parallel execution. // Any synchronization should be performed at the implementation level if required func (c *Vault) NewQueryExecutor() (*QueryExecutor, error) { - qe, err := c.ch.NewQueryExecutor() + qe, err := c.vault.NewQueryExecutor() if err != nil { return nil, err } @@ -344,7 +345,7 @@ func (c *Vault) NewQueryExecutor() (*QueryExecutor, error) { // A client may obtain more than one such simulator; they are made unique // by way of the supplied txid func (c *Vault) NewRWSet(txid string) (*RWSet, error) { - rws, err := c.ch.NewRWSet(txid) + rws, err := c.vault.NewRWSet(txid) if err != nil { return nil, err } @@ -356,7 +357,7 @@ func (c *Vault) NewRWSet(txid string) (*RWSet, error) { // A client may obtain more than one such simulator; they are made unique // by way of the supplied txid func (c *Vault) GetRWSet(txid string, rwset []byte) (*RWSet, error) { - rws, err := c.ch.GetRWSet(txid, rwset) + rws, err := c.vault.GetRWSet(txid, rwset) if err != nil { return nil, err } @@ -367,7 +368,7 @@ func (c *Vault) GetRWSet(txid string, rwset []byte) (*RWSet, error) { // from the passed bytes. // If namespaces is not empty, the returned RWSet will be filtered by the passed namespaces func (c *Vault) GetEphemeralRWSet(rwset []byte, namespaces ...string) (*RWSet, error) { - rws, err := c.ch.GetEphemeralRWSet(rwset, namespaces...) + rws, err := c.vault.GetEphemeralRWSet(rwset, namespaces...) if err != nil { return nil, err } diff --git a/platform/orion/core/generic/committer/committer.go b/platform/orion/core/generic/committer/committer.go index 95597b618..9d42d1e6a 100644 --- a/platform/orion/core/generic/committer/committer.go +++ b/platform/orion/core/generic/committer/committer.go @@ -48,7 +48,6 @@ type ProcessorManager interface { // TxEvent contains information for token transaction commit type TxEvent struct { TxID string - DependantTxIDs []string ValidationCode types.Flag ValidationMessage string Block uint64 @@ -376,16 +375,6 @@ func (c *committer) notifyFinality(event TxEvent) { for _, listener := range listeners { listener <- event } - - for _, txid := range event.DependantTxIDs { - listeners := c.listeners[txid] - if logger.IsEnabledFor(zapcore.DebugLevel) { - logger.Debugf("Notify the finality of [%s] (dependant) to [%d] listeners, event: [%v]", txid, len(listeners), event) - } - for _, listener := range listeners { - listener <- event - } - } } func (c *committer) listenToFinality(ctx context.Context, txID string, timeout time.Duration) error { diff --git a/platform/orion/core/generic/finality/finality.go b/platform/orion/core/generic/finality/finality.go index d0d7b646e..253e8ecb9 100644 --- a/platform/orion/core/generic/finality/finality.go +++ b/platform/orion/core/generic/finality/finality.go @@ -12,7 +12,6 @@ import ( "github.com/hyperledger-labs/fabric-smart-client/pkg/utils/errors" "github.com/hyperledger-labs/fabric-smart-client/platform/orion/core/generic/committer" "github.com/hyperledger-labs/fabric-smart-client/platform/orion/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" errors2 "github.com/pkg/errors" ) @@ -69,7 +68,3 @@ func (f *finality) IsFinal(ctx context.Context, txID string) error { } return errors2.Errorf("failed retrieveing transaction finality for [%s]: [%s][%s]", txID, err, err2) } - -func (f *finality) IsFinalForParties(txID string, parties ...view.Identity) error { - panic("implement me") -} diff --git a/platform/orion/driver/finality.go b/platform/orion/driver/finality.go index eebd5b86f..2d8ef259e 100644 --- a/platform/orion/driver/finality.go +++ b/platform/orion/driver/finality.go @@ -8,8 +8,6 @@ package driver import ( "context" - - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) type Finality interface { @@ -17,9 +15,4 @@ type Finality interface { // with the respect to the passed context that can be used to set a deadline // for the waiting time. IsFinal(ctx context.Context, txID string) error - - // IsFinalForParties takes in input a transaction id and an array of identities. - // The identities are contacted to gather information about the finality of the - // passed transaction - IsFinalForParties(txID string, parties ...view.Identity) error } diff --git a/platform/orion/driver/transaction.go b/platform/orion/driver/transaction.go index ab70aa4d1..759505316 100644 --- a/platform/orion/driver/transaction.go +++ b/platform/orion/driver/transaction.go @@ -9,12 +9,11 @@ package driver type ValidationCode int const ( - _ ValidationCode = iota - Valid // Transaction is valid and committed - Invalid // Transaction is invalid and has been discarded - Busy // Transaction does not yet have a validity state - Unknown // Transaction is unknown - HasDependencies // Transaction is unknown but has known dependencies + _ ValidationCode = iota + Valid // Transaction is valid and committed + Invalid // Transaction is invalid and has been discarded + Busy // Transaction does not yet have a validity state + Unknown // Transaction is unknown ) type Envelope interface { diff --git a/platform/orion/finality.go b/platform/orion/finality.go index cce97eccb..d2be47e73 100644 --- a/platform/orion/finality.go +++ b/platform/orion/finality.go @@ -10,7 +10,6 @@ import ( "context" "github.com/hyperledger-labs/fabric-smart-client/platform/orion/driver" - "github.com/hyperledger-labs/fabric-smart-client/platform/view/view" ) type Finality struct { @@ -23,7 +22,3 @@ func (c *Finality) IsFinal(ctx context.Context, txID string) error { } return c.finality.IsFinal(ctx, txID) } - -func (c *Finality) IsFinalForParties(txID string, parties ...view.Identity) error { - return c.finality.IsFinalForParties(txID, parties...) -} diff --git a/platform/orion/vault.go b/platform/orion/vault.go index 667efacd2..17cd4ed0e 100644 --- a/platform/orion/vault.go +++ b/platform/orion/vault.go @@ -24,12 +24,11 @@ const ( type ValidationCode int const ( - _ ValidationCode = iota - Valid // Transaction is valid and committed - Invalid // Transaction is invalid and has been discarded - Busy // Transaction does not yet have a validity state - Unknown // Transaction is unknown - HasDependencies // Transaction is unknown but has known dependencies + _ ValidationCode = iota + Valid // Transaction is valid and committed + Invalid // Transaction is invalid and has been discarded + Busy // Transaction does not yet have a validity state + Unknown // Transaction is unknown ) type RWSet struct { diff --git a/platform/view/services/grpc/client.go b/platform/view/services/grpc/client.go index 3196b8648..62fce1a80 100644 --- a/platform/view/services/grpc/client.go +++ b/platform/view/services/grpc/client.go @@ -110,13 +110,76 @@ func NewGRPCClient(config ClientConfig) (*Client, error) { return client, nil } +type TLSClientConfig struct { + TLSClientAuthRequired bool + TLSClientKeyFile string + TLSClientCertFile string +} + +func CreateSecOpts(connConfig ConnectionConfig, cliConfig TLSClientConfig) (*SecureOptions, error) { + return createSecOpts(connConfig, false, &cliConfig) +} + +func createTLSSecOpts(connConfig ConnectionConfig) (*SecureOptions, error) { + return createSecOpts(connConfig, true, nil) +} + +func createSecOpts(connConfig ConnectionConfig, forceTLS bool, cliConfig *TLSClientConfig) (*SecureOptions, error) { + var certs [][]byte + if connConfig.TLSEnabled { + switch { + case len(connConfig.TLSRootCertFile) != 0: + caPEM, err := os.ReadFile(connConfig.TLSRootCertFile) + if err != nil { + return nil, errors.WithMessagef(err, "unable to load TLS cert from %s", connConfig.TLSRootCertFile) + } + certs = append(certs, caPEM) + case len(connConfig.TLSRootCertBytes) != 0: + certs = connConfig.TLSRootCertBytes + default: + return nil, errors.New("missing TLSRootCertFile in client config") + } + } + + tlsEnabled := connConfig.TLSEnabled || forceTLS + secOpts := &SecureOptions{ + UseTLS: tlsEnabled, + RequireClientCert: !tlsEnabled && cliConfig.TLSClientAuthRequired, + } + + if secOpts.RequireClientCert { + keyPEM, err := os.ReadFile(cliConfig.TLSClientKeyFile) + if err != nil { + return nil, errors.WithMessage(err, "unable to load fabric.tls.clientKey.file") + } + secOpts.Key = keyPEM + certPEM, err := os.ReadFile(cliConfig.TLSClientCertFile) + if err != nil { + return nil, errors.WithMessage(err, "unable to load fabric.tls.clientCert.file") + } + secOpts.Certificate = certPEM + } + + if tlsEnabled { + if len(certs) == 0 { + return nil, errors.New("tls root cert file must be set") + } + secOpts.ServerRootCAs = certs + } + return secOpts, nil +} + // CreateGRPCClient returns a comm.Client based on toke client config func CreateGRPCClient(config *ConnectionConfig) (*Client, error) { + secOpts, err := createTLSSecOpts(*config) + if err != nil { + return nil, err + } timeout := config.ConnectionTimeout if timeout <= 0 { timeout = DefaultConnectionTimeout } - clientConfig := ClientConfig{ + return NewGRPCClient(ClientConfig{ KaOpts: KeepaliveOptions{ ClientInterval: 60 * time.Second, ClientTimeout: 60 * time.Second, @@ -125,32 +188,8 @@ func CreateGRPCClient(config *ConnectionConfig) (*Client, error) { ServerMinInterval: 60 * time.Second, }, Timeout: timeout, - } - - if config.TLSEnabled { - var certs [][]byte - switch { - case len(config.TLSRootCertFile) != 0: - caPEM, err := os.ReadFile(config.TLSRootCertFile) - if err != nil { - return nil, errors.WithMessagef(err, "unable to load TLS cert from %s", config.TLSRootCertFile) - } - certs = append(certs, caPEM) - case len(config.TLSRootCertBytes) != 0: - certs = config.TLSRootCertBytes - default: - return nil, errors.New("missing TLSRootCertFile in client config") - } - - secOpts := SecureOptions{ - UseTLS: true, - ServerRootCAs: certs, - RequireClientCert: false, - } - clientConfig.SecOpts = secOpts - } - - return NewGRPCClient(clientConfig) + SecOpts: *secOpts, + }) } func (client *Client) parseSecureOptions(opts SecureOptions) error {