diff --git a/.dockerignore b/.dockerignore index 65e3ba2ed..5a71055bc 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1,3 @@ test/ +bin/cql* +*.cover.out diff --git a/.gitignore b/.gitignore index 0ce9eb7e2..4ae821fc2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ *.trace +conf* +..bfg-report pprof.txt .dsn *.svg @@ -13,9 +15,11 @@ vendor/**/.gitignore node_*/ kayak_test *.conf +*.ldb *.db *.db-shm *.db-wal +*.ldb .DS_Store msgpack-20180824 diff --git a/.travis.yml b/.travis.yml index f4644b655..93a905397 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ env: - REVIEWDOG_VERSION=0.9.11 language: go go: - - '1.11.x' + - '1.10.x' os: - linux @@ -32,7 +32,9 @@ script: - bash build.sh - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ && cd - - - gocovmerge cover.out $(find cmd -name "*.cover.out") > coverage.txt && rm -f cover.out + - bash cleanupDB.sh || true + - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ && cd - + - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) - >- golint ./... | grep -v 'vendor/' | grep -v 'server/' | grep -v 'utils/' | reviewdog -f=golint -reporter=github-pr-review || true diff --git a/CHANGELOG.md b/CHANGELOG.md index bcca8da03..37674d155 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## [v0.0.4](https://github.com/CovenantSQL/CovenantSQL/tree/v0.0.4) (2018-11-08) + +[Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.0.3...v0.0.4) + +**Fixed bugs:** + +- Potential deadlock in testing [\#93](https://github.com/CovenantSQL/CovenantSQL/issues/93) + +**Closed issues:** + +- Where can I find covenantsql.io/covenantsql\_adapter [\#53](https://github.com/CovenantSQL/CovenantSQL/issues/53) + +**Merged pull requests:** + +- Fix loadChain failure, remove the lock in sync\(\) [\#114](https://github.com/CovenantSQL/CovenantSQL/pull/114) ([zeqing-guo](https://github.com/zeqing-guo)) +- Kayak performance improvement refactor [\#112](https://github.com/CovenantSQL/CovenantSQL/pull/112) ([xq262144](https://github.com/xq262144)) +- Fix index out of bound, refactor part of sqlchain code [\#110](https://github.com/CovenantSQL/CovenantSQL/pull/110) ([leventeliu](https://github.com/leventeliu)) +- Support lastInsertID/affectedRows in kayak [\#109](https://github.com/CovenantSQL/CovenantSQL/pull/109) ([xq262144](https://github.com/xq262144)) + ## [v0.0.3](https://github.com/CovenantSQL/CovenantSQL/tree/v0.0.3) (2018-11-04) [Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.0.2...v0.0.3) @@ -7,7 +26,6 @@ **Fixed bugs:** - Cannot receive tokens from testnet [\#84](https://github.com/CovenantSQL/CovenantSQL/issues/84) -- Potential deadlock in testing [\#93](https://github.com/CovenantSQL/CovenantSQL/issues/93) **Closed issues:** @@ -49,27 +67,6 @@ [Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/82811a8fcac65d74aefbb506450e4477ecdad048...v0.0.1) -**TestNet** - -1. Ready for CLI or SDK usage. For now, Linux & OSX supported only. -1. SQL Chain Explorer is ready. - -**TestNet Known Issues** - -1. Main Chain - 1. Allocation algorithm for BlockProducer and Miner is incomplete. - 1. Joining as BP or Miner is unsupported for now. _Fix@2018-10-12_ - 1. Forking Recovery algorithm is incomplete. -1. Connector - 1. [Java](https://github.com/CovenantSQL/covenant-connector) and [Golang Connector](https://github.com/CovenantSQL/CovenantSQL/tree/develop/client) is ready. - 1. ĐApp support for ETH or EOS is incomplete. - 1. Java connector protocol is based on RESTful HTTPS, change to Golang DH-RPC latter. -1. Database - 1. Cartesian product or big join caused OOM. _Fix@2018-10-12_ - 1. SQL Query filter is incomplete. _Fix@2018-10-12_ - 1. Forking Recovery algorithm is incomplete. - 1. Database for TestNet is World Open on [Explorer](https://explorer.dbhub.org). - **Closed issues:** - ThunderDB has been renamed to CovenantSQL [\#58](https://github.com/CovenantSQL/CovenantSQL/issues/58) diff --git a/Gopkg.lock b/Gopkg.lock index 1cee45a5b..dca93369b 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,6 +1,19 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. +[[projects]] + branch = "master" + digest = "1:2fab77e19256b26d733b70e1f68cd965cb87e38e3a619fa960cf319aef743d53" + name = "bazil.org/fuse" + packages = [ + ".", + "fs", + "fs/fstestutil", + "fuseutil", + ] + pruneopts = "UT" + revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e" + [[projects]] branch = "master" digest = "1:341f9de25b320f45124840fa084aa804a6659cfeae2ea93a45424fd73f9d7da5" @@ -575,10 +588,11 @@ [[projects]] branch = "master" - digest = "1:eceec1bdeb912f2aed8e5e9c8c81927649925095e1e4d5b85b331898fee06397" + digest = "1:67ee3460c086005f76f49c4d5f38a80ce11d68986de22979c75bf04e8372db9c" name = "golang.org/x/net" packages = [ "bpf", + "context", "html", "html/atom", "internal/iana", @@ -612,6 +626,9 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ + "bazil.org/fuse", + "bazil.org/fuse/fs", + "bazil.org/fuse/fs/fstestutil", "github.com/CovenantSQL/HashStablePack/marshalhash", "github.com/CovenantSQL/go-sqlite3-encrypt", "github.com/CovenantSQL/sqlparser", diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index 23104a50e..45b813e19 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -4,11 +4,11 @@ echo nameserver 1.1.1.1 > /etc/resolv.conf case "${COVENANT_ROLE}" in miner) - exec /app/cql-minerd -config "${COVENANT_CONF}" + exec /app/cql-minerd -config "${COVENANT_CONF}" "${@}" ;; blockproducer) rm -f /app/node_*/chain.db - exec /app/cqld -config "${COVENANT_CONF}" + exec /app/cqld -config "${COVENANT_CONF}" "${@}" ;; observer) rm -f /app/node_observer/observer.db diff --git a/blockproducer/chain.go b/blockproducer/chain.go index f91da6f92..bcb00161b 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -35,6 +35,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/coreos/bbolt" + "github.com/pkg/errors" ) var ( @@ -56,10 +57,9 @@ type Chain struct { rt *rt cl *rpc.Caller - blocksFromSelf chan *pt.Block - blocksFromRPC chan *pt.Block - pendingTxs chan pi.Transaction - stopCh chan struct{} + blocksFromRPC chan *pt.Block + pendingTxs chan pi.Transaction + stopCh chan struct{} } // NewChain creates a new blockchain. @@ -121,15 +121,14 @@ func NewChain(cfg *Config) (*Chain, error) { // create chain chain := &Chain{ - db: db, - ms: newMetaState(), - bi: newBlockIndex(), - rt: newRuntime(cfg, accountAddress), - cl: rpc.NewCaller(), - blocksFromSelf: make(chan *pt.Block), - blocksFromRPC: make(chan *pt.Block), - pendingTxs: make(chan pi.Transaction), - stopCh: make(chan struct{}), + db: db, + ms: newMetaState(), + bi: newBlockIndex(), + rt: newRuntime(cfg, accountAddress), + cl: rpc.NewCaller(), + blocksFromRPC: make(chan *pt.Block), + pendingTxs: make(chan pi.Transaction), + stopCh: make(chan struct{}), } log.WithField("genesis", cfg.Genesis).Debug("pushing genesis block") @@ -168,15 +167,14 @@ func LoadChain(cfg *Config) (chain *Chain, err error) { } chain = &Chain{ - db: db, - ms: newMetaState(), - bi: newBlockIndex(), - rt: newRuntime(cfg, accountAddress), - cl: rpc.NewCaller(), - blocksFromSelf: make(chan *pt.Block), - blocksFromRPC: make(chan *pt.Block), - pendingTxs: make(chan pi.Transaction), - stopCh: make(chan struct{}), + db: db, + ms: newMetaState(), + bi: newBlockIndex(), + rt: newRuntime(cfg, accountAddress), + cl: rpc.NewCaller(), + blocksFromRPC: make(chan *pt.Block), + pendingTxs: make(chan pi.Transaction), + stopCh: make(chan struct{}), } err = chain.db.View(func(tx *bolt.Tx) (err error) { @@ -332,6 +330,7 @@ func (c *Chain) pushGenesisBlock(b *pt.Block) (err error) { func (c *Chain) pushBlock(b *pt.Block) error { err := c.checkBlock(b) if err != nil { + err = errors.Wrap(err, "check block failed") return err } @@ -608,14 +607,6 @@ func (c *Chain) processBlocks() { var stash []*pt.Block for { select { - case block := <-c.blocksFromSelf: - h := c.rt.getHeightFromTime(block.Timestamp()) - if h == c.rt.getNextTurn()-1 { - err := c.pushBlockWithoutCheck(block) - if err != nil { - log.Error(err) - } - } case block := <-c.blocksFromRPC: if h := c.rt.getHeightFromTime(block.Timestamp()); h > c.rt.getNextTurn()-1 { // Stash newer blocks for later check @@ -630,7 +621,11 @@ func (c *Chain) processBlocks() { } else { err := c.pushBlock(block) if err != nil { - log.Error(err) + log.WithFields(log.Fields{ + "block_hash": block.BlockHash(), + "block_parent_hash": block.ParentHash(), + "block_timestamp": block.Timestamp(), + }).Debug(err) } } @@ -662,7 +657,7 @@ func (c *Chain) processTxs() { "next_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), - "transaction": tx.GetHash().String(), + "transaction": tx.Hash().String(), }).Debugf("Failed to push tx with error: %v", err) } case <-c.stopCh: diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index 2955ad2df..1da9c742f 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -29,7 +29,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" . "github.com/smartystreets/goconvey/convey" ) @@ -279,13 +279,13 @@ func TestMultiNode(t *testing.T) { br, err := generateRandomBillingRequest() c.So(err, ShouldBeNil) - bReq := &ct.AdviseBillingReq{ + bReq := &types.AdviseBillingReq{ Envelope: proto.Envelope{ // TODO(lambda): Add fields. }, Req: br, } - bResp := &ct.AdviseBillingResp{} + bResp := &types.AdviseBillingResp{} log.WithFields(log.Fields{ "node": val, "requestHash": br.RequestHash, diff --git a/blockproducer/db_service.go b/blockproducer/db_service.go index 8deea1e03..463d925c8 100644 --- a/blockproducer/db_service.go +++ b/blockproducer/db_service.go @@ -30,9 +30,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" dto "github.com/prometheus/client_model/go" ) @@ -66,7 +65,7 @@ type DBService struct { } // CreateDatabase defines block producer create database logic. -func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatabaseResponse) (err error) { +func (s *DBService) CreateDatabase(req *types.CreateDatabaseRequest, resp *types.CreateDatabaseResponse) (err error) { // verify signature if err = req.Verify(); err != nil { return @@ -99,7 +98,7 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab log.WithField("peers", peers).Debug("generated peers info") // TODO(lambda): call accounting features, top up deposit - var genesisBlock *ct.Block + var genesisBlock *types.Block if genesisBlock, err = s.generateGenesisBlock(dbID, req.Header.ResourceMeta); err != nil { return } @@ -118,9 +117,9 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab return } - initSvcReq := new(wt.UpdateService) - initSvcReq.Header.Op = wt.CreateDB - initSvcReq.Header.Instance = wt.ServiceInstance{ + initSvcReq := new(types.UpdateService) + initSvcReq.Header.Op = types.CreateDB + initSvcReq.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, Peers: peers, GenesisBlock: genesisBlock, @@ -129,9 +128,9 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab return } - rollbackReq := new(wt.UpdateService) - rollbackReq.Header.Op = wt.DropDB - rollbackReq.Header.Instance = wt.ServiceInstance{ + rollbackReq := new(types.UpdateService) + rollbackReq.Header.Op = types.DropDB + rollbackReq.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, } if err = rollbackReq.Sign(privateKey); err != nil { @@ -143,7 +142,7 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab } // save to meta - instanceMeta := wt.ServiceInstance{ + instanceMeta := types.ServiceInstance{ DatabaseID: dbID, Peers: peers, ResourceMeta: req.Header.ResourceMeta, @@ -168,7 +167,7 @@ func (s *DBService) CreateDatabase(req *CreateDatabaseRequest, resp *CreateDatab } // DropDatabase defines block producer drop database logic. -func (s *DBService) DropDatabase(req *DropDatabaseRequest, resp *DropDatabaseResponse) (err error) { +func (s *DBService) DropDatabase(req *types.DropDatabaseRequest, resp *types.DropDatabaseResponse) (err error) { // verify signature if err = req.Verify(); err != nil { return @@ -185,15 +184,15 @@ func (s *DBService) DropDatabase(req *DropDatabaseRequest, resp *DropDatabaseRes }() // get database peers - var instanceMeta wt.ServiceInstance + var instanceMeta types.ServiceInstance if instanceMeta, err = s.ServiceMap.Get(req.Header.DatabaseID); err != nil { return } // call miner nodes to drop database - dropDBSvcReq := new(wt.UpdateService) - dropDBSvcReq.Header.Op = wt.DropDB - dropDBSvcReq.Header.Instance = wt.ServiceInstance{ + dropDBSvcReq := new(types.UpdateService) + dropDBSvcReq.Header.Op = types.DropDB + dropDBSvcReq.Header.Instance = types.ServiceInstance{ DatabaseID: req.Header.DatabaseID, } if dropDBSvcReq.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { @@ -228,7 +227,7 @@ func (s *DBService) DropDatabase(req *DropDatabaseRequest, resp *DropDatabaseRes } // GetDatabase defines block producer get database logic. -func (s *DBService) GetDatabase(req *GetDatabaseRequest, resp *GetDatabaseResponse) (err error) { +func (s *DBService) GetDatabase(req *types.GetDatabaseRequest, resp *types.GetDatabaseResponse) (err error) { // verify signature if err = req.Verify(); err != nil { return @@ -245,7 +244,7 @@ func (s *DBService) GetDatabase(req *GetDatabaseRequest, resp *GetDatabaseRespon }() // fetch from meta - var instanceMeta wt.ServiceInstance + var instanceMeta types.ServiceInstance if instanceMeta, err = s.ServiceMap.Get(req.Header.DatabaseID); err != nil { return } @@ -268,9 +267,9 @@ func (s *DBService) GetDatabase(req *GetDatabaseRequest, resp *GetDatabaseRespon } // GetNodeDatabases defines block producer get node databases logic. -func (s *DBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitServiceResponse) (err error) { +func (s *DBService) GetNodeDatabases(req *types.InitService, resp *types.InitServiceResponse) (err error) { // fetch from meta - var instances []wt.ServiceInstance + var instances []types.ServiceInstance if instances, err = s.ServiceMap.GetDatabases(req.GetNodeID().ToNodeID()); err != nil { return } @@ -323,7 +322,7 @@ func (s *DBService) generateDatabaseID(reqNodeID *proto.RawNodeID) (dbID proto.D } } -func (s *DBService) allocateNodes(lastTerm uint64, dbID proto.DatabaseID, resourceMeta wt.ResourceMeta) (peers *proto.Peers, err error) { +func (s *DBService) allocateNodes(lastTerm uint64, dbID proto.DatabaseID, resourceMeta types.ResourceMeta) (peers *proto.Peers, err error) { curRange := int(resourceMeta.Node) excludeNodes := make(map[proto.NodeID]bool) var allocated []allocatedNode @@ -506,7 +505,7 @@ func (s *DBService) buildPeers(term uint64, allocated []proto.NodeID) (peers *pr return } -func (s *DBService) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta wt.ResourceMeta) (genesisBlock *ct.Block, err error) { +func (s *DBService) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta types.ResourceMeta) (genesisBlock *types.Block, err error) { // TODO(xq262144): following is stub code, real logic should be implemented in the future emptyHash := hash.Hash{} @@ -519,9 +518,9 @@ func (s *DBService) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta wt. return } - genesisBlock = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + genesisBlock = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: nodeID, GenesisHash: emptyHash, @@ -535,7 +534,7 @@ func (s *DBService) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta wt. return } -func (s *DBService) batchSendSvcReq(req *wt.UpdateService, rollbackReq *wt.UpdateService, nodes []proto.NodeID) (err error) { +func (s *DBService) batchSendSvcReq(req *types.UpdateService, rollbackReq *types.UpdateService, nodes []proto.NodeID) (err error) { if err = s.batchSendSingleSvcReq(req, nodes); err != nil { s.batchSendSingleSvcReq(rollbackReq, nodes) } @@ -543,7 +542,7 @@ func (s *DBService) batchSendSvcReq(req *wt.UpdateService, rollbackReq *wt.Updat return } -func (s *DBService) batchSendSingleSvcReq(req *wt.UpdateService, nodes []proto.NodeID) (err error) { +func (s *DBService) batchSendSingleSvcReq(req *types.UpdateService, nodes []proto.NodeID) (err error) { var wg sync.WaitGroup errCh := make(chan error, len(nodes)) @@ -551,7 +550,7 @@ func (s *DBService) batchSendSingleSvcReq(req *wt.UpdateService, nodes []proto.N wg.Add(1) go func(s proto.NodeID, ec chan error) { defer wg.Done() - var resp wt.UpdateServiceResponse + var resp types.UpdateServiceResponse ec <- rpc.NewCaller().CallNode(s, route.DBSDeploy.String(), req, &resp) }(node, errCh) } diff --git a/blockproducer/db_service_map.go b/blockproducer/db_service_map.go index 6c260677a..1a56042a7 100644 --- a/blockproducer/db_service_map.go +++ b/blockproducer/db_service_map.go @@ -20,21 +20,21 @@ import ( "sync" "github.com/CovenantSQL/CovenantSQL/proto" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/pkg/errors" ) // DBMetaPersistence defines database meta persistence api. type DBMetaPersistence interface { - GetDatabase(dbID proto.DatabaseID) (wt.ServiceInstance, error) - SetDatabase(meta wt.ServiceInstance) error + GetDatabase(dbID proto.DatabaseID) (types.ServiceInstance, error) + SetDatabase(meta types.ServiceInstance) error DeleteDatabase(dbID proto.DatabaseID) error - GetAllDatabases() ([]wt.ServiceInstance, error) + GetAllDatabases() ([]types.ServiceInstance, error) } // DBServiceMap defines database instance meta. type DBServiceMap struct { - dbMap map[proto.DatabaseID]wt.ServiceInstance + dbMap map[proto.DatabaseID]types.ServiceInstance nodeMap map[proto.NodeID]map[proto.DatabaseID]bool persist DBMetaPersistence sync.RWMutex @@ -44,7 +44,7 @@ type DBServiceMap struct { func InitServiceMap(persistImpl DBMetaPersistence) (s *DBServiceMap, err error) { s = &DBServiceMap{ persist: persistImpl, - dbMap: make(map[proto.DatabaseID]wt.ServiceInstance), + dbMap: make(map[proto.DatabaseID]types.ServiceInstance), nodeMap: make(map[proto.NodeID]map[proto.DatabaseID]bool), } @@ -52,7 +52,7 @@ func InitServiceMap(persistImpl DBMetaPersistence) (s *DBServiceMap, err error) s.Lock() defer s.Unlock() - var allDatabases []wt.ServiceInstance + var allDatabases []types.ServiceInstance if allDatabases, err = s.persist.GetAllDatabases(); err != nil { return @@ -73,7 +73,7 @@ func InitServiceMap(persistImpl DBMetaPersistence) (s *DBServiceMap, err error) } // Set add database to meta. -func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { +func (c *DBServiceMap) Set(meta types.ServiceInstance) (err error) { c.Lock() defer c.Unlock() @@ -82,7 +82,7 @@ func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { } // remove previous records - var oldMeta wt.ServiceInstance + var oldMeta types.ServiceInstance var ok bool if oldMeta, ok = c.dbMap[meta.DatabaseID]; ok { @@ -110,7 +110,7 @@ func (c *DBServiceMap) Set(meta wt.ServiceInstance) (err error) { } // Get find database from meta. -func (c *DBServiceMap) Get(dbID proto.DatabaseID) (meta wt.ServiceInstance, err error) { +func (c *DBServiceMap) Get(dbID proto.DatabaseID) (meta types.ServiceInstance, err error) { c.RLock() defer c.RUnlock() @@ -135,7 +135,7 @@ func (c *DBServiceMap) Delete(dbID proto.DatabaseID) (err error) { c.Lock() defer c.Unlock() - var meta wt.ServiceInstance + var meta types.ServiceInstance var ok bool // delete from cache @@ -156,15 +156,15 @@ func (c *DBServiceMap) Delete(dbID proto.DatabaseID) (err error) { } // GetDatabases return database config. -func (c *DBServiceMap) GetDatabases(nodeID proto.NodeID) (dbs []wt.ServiceInstance, err error) { +func (c *DBServiceMap) GetDatabases(nodeID proto.NodeID) (dbs []types.ServiceInstance, err error) { c.RLock() defer c.RUnlock() - dbs = make([]wt.ServiceInstance, 0) + dbs = make([]types.ServiceInstance, 0) for dbID, ok := range c.nodeMap[nodeID] { if ok { - var db wt.ServiceInstance + var db types.ServiceInstance if db, ok = c.dbMap[dbID]; ok { dbs = append(dbs, db) } diff --git a/blockproducer/db_service_map_test.go b/blockproducer/db_service_map_test.go index ed2afb375..b5fe441e9 100644 --- a/blockproducer/db_service_map_test.go +++ b/blockproducer/db_service_map_test.go @@ -26,7 +26,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" . "github.com/smartystreets/goconvey/convey" ) @@ -66,7 +66,7 @@ func TestServiceMap(t *testing.T) { So(err, ShouldNotBeNil) // test get exists - var instance wt.ServiceInstance + var instance types.ServiceInstance instance, err = svcMap.Get(proto.DatabaseID("db")) So(instance.DatabaseID, ShouldResemble, proto.DatabaseID("db")) @@ -116,7 +116,7 @@ func TestServiceMap(t *testing.T) { So(svcMap.dbMap, ShouldNotContainKey, proto.DatabaseID("db2")) // test get databases - var instances []wt.ServiceInstance + var instances []types.ServiceInstance instances, err = svcMap.GetDatabases(nodeID) So(instances, ShouldHaveLength, 1) So(instances[0].DatabaseID, ShouldResemble, proto.DatabaseID("db")) diff --git a/blockproducer/db_service_test.go b/blockproducer/db_service_test.go index 6c464256e..14d5b4d95 100644 --- a/blockproducer/db_service_test.go +++ b/blockproducer/db_service_test.go @@ -26,7 +26,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" . "github.com/smartystreets/goconvey/convey" ) @@ -71,20 +71,20 @@ func TestService(t *testing.T) { So(err, ShouldBeNil) // test get database - getReq := new(GetDatabaseRequest) + getReq := new(types.GetDatabaseRequest) getReq.Header.DatabaseID = proto.DatabaseID("db") err = getReq.Sign(privateKey) So(err, ShouldBeNil) - getRes := new(GetDatabaseResponse) + getRes := new(types.GetDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBGetDatabase.String(), getReq, getRes) So(err, ShouldBeNil) So(getReq.Verify(), ShouldBeNil) So(getRes.Header.InstanceMeta.DatabaseID, ShouldResemble, proto.DatabaseID("db")) // get node databases - getAllReq := new(wt.InitService) - getAllRes := new(wt.InitServiceResponse) + getAllReq := new(types.InitService) + getAllRes := new(types.InitServiceResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBGetNodeDatabases.String(), getAllReq, getAllRes) So(err, ShouldBeNil) So(getAllRes.Verify(), ShouldBeNil) @@ -92,25 +92,25 @@ func TestService(t *testing.T) { So(getAllRes.Header.Instances[0].DatabaseID, ShouldResemble, proto.DatabaseID("db")) // create database, no metric received, should failed - createDBReq := new(CreateDatabaseRequest) - createDBReq.Header.ResourceMeta = wt.ResourceMeta{ + createDBReq := new(types.CreateDatabaseRequest) + createDBReq.Header.ResourceMeta = types.ResourceMeta{ Node: 1, } err = createDBReq.Sign(privateKey) So(err, ShouldBeNil) - createDBRes := new(CreateDatabaseResponse) + createDBRes := new(types.CreateDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBCreateDatabase.String(), createDBReq, createDBRes) So(err, ShouldNotBeNil) // trigger metrics, but does not allow block producer to service as miner metric.NewCollectClient().UploadMetrics(nodeID) - createDBRes = new(CreateDatabaseResponse) + createDBRes = new(types.CreateDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBCreateDatabase.String(), createDBReq, createDBRes) So(err, ShouldNotBeNil) // allow block producer to service as miner, only use this in test case dbService.includeBPNodesForAllocation = true - createDBRes = new(CreateDatabaseResponse) + createDBRes = new(types.CreateDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBCreateDatabase.String(), createDBReq, createDBRes) So(err, ShouldBeNil) So(createDBRes.Verify(), ShouldBeNil) @@ -133,16 +133,16 @@ func TestService(t *testing.T) { // use the database serverID := createDBRes.Header.InstanceMeta.Peers.Leader dbID := createDBRes.Header.InstanceMeta.DatabaseID - var queryReq *wt.Request - queryReq, err = buildQuery(wt.WriteQuery, 1, 1, dbID, []string{ + var queryReq *types.Request + queryReq, err = buildQuery(types.WriteQuery, 1, 1, dbID, []string{ "create table test (test int)", "insert into test values(1)", }) So(err, ShouldBeNil) - queryRes := new(wt.Response) + queryRes := new(types.Response) err = rpc.NewCaller().CallNode(serverID, route.DBSQuery.String(), queryReq, queryRes) So(err, ShouldBeNil) - queryReq, err = buildQuery(wt.ReadQuery, 1, 2, dbID, []string{ + queryReq, err = buildQuery(types.ReadQuery, 1, 2, dbID, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -158,16 +158,16 @@ func TestService(t *testing.T) { So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) // drop database - dropDBReq := new(DropDatabaseRequest) + dropDBReq := new(types.DropDatabaseRequest) dropDBReq.Header.DatabaseID = createDBRes.Header.InstanceMeta.DatabaseID err = dropDBReq.Sign(privateKey) So(err, ShouldBeNil) - dropDBRes := new(DropDatabaseResponse) + dropDBRes := new(types.DropDatabaseResponse) err = rpc.NewCaller().CallNode(nodeID, route.BPDBDropDatabase.String(), dropDBReq, dropDBRes) So(err, ShouldBeNil) // get this database again to test if it is dropped - getReq = new(GetDatabaseRequest) + getReq = new(types.GetDatabaseRequest) getReq.Header.DatabaseID = createDBRes.Header.InstanceMeta.DatabaseID err = getReq.Sign(privateKey) So(err, ShouldBeNil) @@ -176,7 +176,7 @@ func TestService(t *testing.T) { }) } -func buildQuery(queryType wt.QueryType, connID uint64, seqNo uint64, databaseID proto.DatabaseID, queries []string) (query *wt.Request, err error) { +func buildQuery(queryType types.QueryType, connID uint64, seqNo uint64, databaseID proto.DatabaseID, queries []string) (query *types.Request, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -192,15 +192,15 @@ func buildQuery(queryType wt.QueryType, connID uint64, seqNo uint64, databaseID tm := time.Now().UTC() // build queries - realQueries := make([]wt.Query, len(queries)) + realQueries := make([]types.Query, len(queries)) for i, v := range queries { realQueries[i].Pattern = v } - query = &wt.Request{ - Header: wt.SignedRequestHeader{ - RequestHeader: wt.RequestHeader{ + query = &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ DatabaseID: databaseID, QueryType: queryType, NodeID: nodeID, @@ -209,7 +209,7 @@ func buildQuery(queryType wt.QueryType, connID uint64, seqNo uint64, databaseID Timestamp: tm, }, }, - Payload: wt.RequestPayload{ + Payload: types.RequestPayload{ Queries: realQueries, }, } diff --git a/blockproducer/errors.go b/blockproducer/errors.go index b6ac4e352..a36b9d8a9 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -48,8 +48,6 @@ var ( ErrSmallerSequenceID = errors.New("SequanceID should be bigger than the old one") // ErrInvalidBillingRequest defines BillingRequest is invalid ErrInvalidBillingRequest = errors.New("The BillingRequest is invalid") - // ErrSignVerification indicates a failed signature verification. - ErrSignVerification = errors.New("signature verification failed") // ErrBalanceOverflow indicates that there will be an overflow after balance manipulation. ErrBalanceOverflow = errors.New("balance overflow") diff --git a/blockproducer/helper_test.go b/blockproducer/helper_test.go index a464632be..ce68aca12 100644 --- a/blockproducer/helper_test.go +++ b/blockproducer/helper_test.go @@ -38,11 +38,10 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/worker" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) const ( @@ -54,7 +53,7 @@ var ( ) // copied from sqlchain.xxx_test. -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -65,9 +64,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: rootHash, @@ -75,12 +74,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error Timestamp: time.Now().UTC(), }, }, - Queries: make([]*hash.Hash, rand.Intn(10)+10), - } - - for i := range b.Queries { - b.Queries[i] = new(hash.Hash) - rand.Read(b.Queries[i][:]) } if isGenesis { @@ -113,7 +106,7 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error // fake a persistence driver. type stubDBMetaPersistence struct{} -func (p *stubDBMetaPersistence) GetDatabase(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { +func (p *stubDBMetaPersistence) GetDatabase(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { // for test purpose, name with db prefix consider it exists if !strings.HasPrefix(string(dbID), "db") { err = ErrNoSuchDatabase @@ -123,7 +116,7 @@ func (p *stubDBMetaPersistence) GetDatabase(dbID proto.DatabaseID) (instance wt. return p.getInstanceMeta(dbID) } -func (p *stubDBMetaPersistence) SetDatabase(meta wt.ServiceInstance) (err error) { +func (p *stubDBMetaPersistence) SetDatabase(meta types.ServiceInstance) (err error) { return } @@ -131,13 +124,13 @@ func (p *stubDBMetaPersistence) DeleteDatabase(dbID proto.DatabaseID) (err error return } -func (p *stubDBMetaPersistence) GetAllDatabases() (instances []wt.ServiceInstance, err error) { - instances = make([]wt.ServiceInstance, 1) +func (p *stubDBMetaPersistence) GetAllDatabases() (instances []types.ServiceInstance, err error) { + instances = make([]types.ServiceInstance, 1) instances[0], err = p.getInstanceMeta("db") return } -func (p *stubDBMetaPersistence) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { +func (p *stubDBMetaPersistence) getInstanceMeta(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return diff --git a/chain/errors.go b/blockproducer/interfaces/mixins_test.go similarity index 58% rename from chain/errors.go rename to blockproducer/interfaces/mixins_test.go index f936a24db..48fca8eb0 100644 --- a/chain/errors.go +++ b/blockproducer/interfaces/mixins_test.go @@ -14,15 +14,19 @@ * limitations under the License. */ -package chain +package interfaces -import "errors" +import ( + "testing" -var ( - // ErrUnknownTx indicates that the transaction is unknown. - ErrUnknownTx = errors.New("unknown transaction") - // ErrDuplicateTx indicates that the transaction will be duplicate in the new block. - ErrDuplicateTx = errors.New("duplicate transaction") - // ErrCorruptedIndex indicates that a corrupted index item is detected. - ErrCorruptedIndex = errors.New("corrupted index") + . "github.com/smartystreets/goconvey/convey" ) + +func TestTransactionTypeMixin(t *testing.T) { + Convey("test transaction type mixin", t, func() { + m := NewTransactionTypeMixin(TransactionTypeBaseAccount) + So(m.GetTransactionType(), ShouldEqual, TransactionTypeBaseAccount) + m.SetTransactionType(TransactionTypeTransfer) + So(m.GetTransactionType(), ShouldEqual, TransactionTypeTransfer) + }) +} diff --git a/blockproducer/interfaces/transaction.go b/blockproducer/interfaces/transaction.go index f4916ac80..ba1d93a86 100644 --- a/blockproducer/interfaces/transaction.go +++ b/blockproducer/interfaces/transaction.go @@ -97,7 +97,7 @@ func (t TransactionType) String() string { type Transaction interface { GetAccountAddress() proto.AccountAddress GetAccountNonce() AccountNonce - GetHash() hash.Hash + Hash() hash.Hash GetTransactionType() TransactionType Sign(signer *asymmetric.PrivateKey) error Verify() error diff --git a/blockproducer/interfaces/transaction_test.go b/blockproducer/interfaces/transaction_test.go index 07ecba4e8..7a80ac53b 100644 --- a/blockproducer/interfaces/transaction_test.go +++ b/blockproducer/interfaces/transaction_test.go @@ -54,4 +54,9 @@ func TestTypes(t *testing.T) { So(h1, ShouldResemble, h2) } }) + Convey("test string", t, func() { + for i := TransactionTypeBilling; i != TransactionTypeNumber+1; i++ { + So(i.String(), ShouldNotBeEmpty) + } + }) } diff --git a/blockproducer/interfaces/transaction_wrapper.go b/blockproducer/interfaces/transaction_wrapper.go index 37948aa38..2c46b6cf7 100644 --- a/blockproducer/interfaces/transaction_wrapper.go +++ b/blockproducer/interfaces/transaction_wrapper.go @@ -77,14 +77,6 @@ func (w *TransactionWrapper) CodecEncodeSelf(e *codec.Encoder) { return } - // if the transaction is supports type transaction mixin - var rawTx interface{} = w.Transaction - if _, ok := rawTx.(ContainsTransactionTypeMixin); ok { - // encode directly - helperEncoder.EncFallback(w.Transaction) - return - } - // translate wrapper to two fields array wrapped by map encDriver.WriteArrayStart(2) encDriver.WriteArrayElem() @@ -146,7 +138,6 @@ func (w *TransactionWrapper) decodeFromWrapper(d *codec.Decoder) { helperDecoder.DecFallback(&w.Transaction, true) } } else { - helperDecoder.DecSwallow() helperDecoder.DecStructFieldNotFound(i, "") } } diff --git a/blockproducer/interfaces/transaction_wrapper_test.go b/blockproducer/interfaces/transaction_wrapper_test.go new file mode 100644 index 000000000..a8d97608c --- /dev/null +++ b/blockproducer/interfaces/transaction_wrapper_test.go @@ -0,0 +1,194 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interfaces_test + +import ( + "testing" + + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + . "github.com/smartystreets/goconvey/convey" +) + +type TestTransactionEncode struct { + TestField int64 + pi.TransactionTypeMixin +} + +func (e *TestTransactionEncode) GetAccountAddress() proto.AccountAddress { + return proto.AccountAddress{} +} + +func (e *TestTransactionEncode) GetAccountNonce() pi.AccountNonce { + return pi.AccountNonce(0) +} + +func (e *TestTransactionEncode) Hash() hash.Hash { + return hash.Hash{} +} + +func (e *TestTransactionEncode) Sign(signer *asymmetric.PrivateKey) error { + return nil +} + +func (e *TestTransactionEncode) Verify() error { + return nil +} +func (e *TestTransactionEncode) MarshalHash() ([]byte, error) { + return nil, nil +} + +func (e *TestTransactionEncode) Msgsize() int { + return 0 +} + +func init() { + pi.RegisterTransaction(pi.TransactionTypeBilling, (*TestTransactionEncode)(nil)) +} + +func TestTransactionWrapper(t *testing.T) { + Convey("tx wrapper test", t, func() { + w := &pi.TransactionWrapper{} + So(w.Unwrap(), ShouldBeNil) + + // nil encode + buf, err := utils.EncodeMsgPack(w) + So(err, ShouldBeNil) + var v interface{} + err = utils.DecodeMsgPack(buf.Bytes(), &v) + So(err, ShouldBeNil) + So(v, ShouldBeNil) + + // encode test + e := &TestTransactionEncode{} + e.SetTransactionType(pi.TransactionTypeBilling) + buf, err = utils.EncodeMsgPack(e) + So(err, ShouldBeNil) + var v2 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v2) + So(err, ShouldBeNil) + So(v2.GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + + // encode with wrapper test + e2 := pi.WrapTransaction(e) + buf, err = utils.EncodeMsgPack(e2) + So(err, ShouldBeNil) + var v3 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v3) + So(err, ShouldBeNil) + So(v3.GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + tw, ok := v3.(*pi.TransactionWrapper) + So(ok, ShouldBeTrue) + So(tw.Unwrap().GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + + // test encode non-existence type + e3 := &TestTransactionEncode{} + e3.SetTransactionType(pi.TransactionTypeTransfer) + buf, err = utils.EncodeMsgPack(e3) + So(err, ShouldBeNil) + var v4 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v4) + So(err, ShouldNotBeNil) + + // test invalid decode, not enough length + buf, err = utils.EncodeMsgPack([]uint64{}) + So(err, ShouldBeNil) + var v5 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v5) + So(err, ShouldNotBeNil) + + // test invalid decode, invalid tx type + buf, err = utils.EncodeMsgPack([]uint64{1}) + So(err, ShouldBeNil) + var v6 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v6) + So(err, ShouldNotBeNil) + + // test invalid decode, nil type + buf, err = utils.EncodeMsgPack([]interface{}{nil}) + So(err, ShouldBeNil) + var v7 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v7) + So(err, ShouldNotBeNil) + + // test invalid decode, nil payload + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, nil}) + So(err, ShouldBeNil) + var v8 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v8) + So(err, ShouldNotBeNil) + + // test invalid decode, invalid payload container type + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, []uint64{}}) + So(err, ShouldBeNil) + var v9 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v9) + So(err, ShouldNotBeNil) + + // extra payload + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, e, 1, 2}) + So(err, ShouldBeNil) + var v10 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v10) + So(err, ShouldBeNil) + + // test invalid type + buf, err = utils.EncodeMsgPack(1) + So(err, ShouldBeNil) + var v11 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v11) + So(err, ShouldNotBeNil) + + // test invalid mixin + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": "invalid type"}) + So(err, ShouldBeNil) + var v12 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v12) + So(err, ShouldNotBeNil) + + // test invalid mixin type + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeNumber}) + So(err, ShouldBeNil) + var v13 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v13) + So(err, ShouldNotBeNil) + + // test tx data + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeBilling, "TestField": 1}) + So(err, ShouldBeNil) + var v14 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v14) + So(err, ShouldBeNil) + + // test invalid tx data + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeBilling, "TestField": "happy"}) + So(err, ShouldBeNil) + var v15 pi.Transaction + err = utils.DecodeMsgPack(buf.Bytes(), &v15) + So(err, ShouldNotBeNil) + }) +} + +func TestRegisterTransaction(t *testing.T) { + Convey("test registration", t, func() { + So(func() { pi.RegisterTransaction(pi.TransactionTypeTransfer, nil) }, ShouldPanic) + So(func() { pi.RegisterTransaction(pi.TransactionTypeBaseAccount, (*pi.TransactionWrapper)(nil)) }, ShouldPanic) + }) +} diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index b4d1ae675..ec7814ae0 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -77,9 +77,6 @@ func (s *metaState) loadOrStoreAccountObject( func (s *metaState) loadAccountStableBalance(addr proto.AccountAddress) (b uint64, loaded bool) { var o *accountObject - s.Lock() - defer s.Unlock() - defer func() { log.WithFields(log.Fields{ "account": addr.String(), @@ -87,6 +84,9 @@ func (s *metaState) loadAccountStableBalance(addr proto.AccountAddress) (b uint6 "loaded": loaded, }).Debug("queried stable account") }() + + s.Lock() + defer s.Unlock() if o, loaded = s.dirty.accounts[addr]; loaded && o != nil { b = o.StableCoinBalance @@ -101,9 +101,6 @@ func (s *metaState) loadAccountStableBalance(addr proto.AccountAddress) (b uint6 func (s *metaState) loadAccountCovenantBalance(addr proto.AccountAddress) (b uint64, loaded bool) { var o *accountObject - s.Lock() - defer s.Unlock() - defer func() { log.WithFields(log.Fields{ "account": addr.String(), @@ -112,6 +109,9 @@ func (s *metaState) loadAccountCovenantBalance(addr proto.AccountAddress) (b uin }).Debug("queried covenant account") }() + s.Lock() + defer s.Unlock() + if o, loaded = s.dirty.accounts[addr]; loaded && o != nil { b = o.CovenantCoinBalance return @@ -711,7 +711,7 @@ func (s *metaState) applyTransactionProcedure(t pi.Transaction) (_ func(*bolt.Tx var ( enc *bytes.Buffer - hash = t.GetHash() + hash = t.Hash() addr = t.GetAccountAddress() nonce = t.GetAccountNonce() ttype = t.GetTransactionType() diff --git a/blockproducer/rpc.go b/blockproducer/rpc.go index 358e5bad0..1fafe2f2d 100644 --- a/blockproducer/rpc.go +++ b/blockproducer/rpc.go @@ -18,9 +18,9 @@ package blockproducer import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - "github.com/CovenantSQL/CovenantSQL/blockproducer/types" + pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" ) // ChainRPCService defines a main chain RPC server. @@ -31,7 +31,7 @@ type ChainRPCService struct { // AdviseNewBlockReq defines a request of the AdviseNewBlock RPC method. type AdviseNewBlockReq struct { proto.Envelope - Block *types.Block + Block *pt.Block } // AdviseNewBlockResp defines a response of the AdviseNewBlock RPC method. @@ -42,7 +42,7 @@ type AdviseNewBlockResp struct { // AdviseTxBillingReq defines a request of the AdviseTxBilling RPC method. type AdviseTxBillingReq struct { proto.Envelope - TxBilling *types.Billing + TxBilling *pt.Billing } // AdviseTxBillingResp defines a response of the AdviseTxBilling RPC method. @@ -61,7 +61,7 @@ type FetchBlockResp struct { proto.Envelope Height uint32 Count uint32 - Block *types.Block + Block *pt.Block } // FetchBlockByCountReq define a request of the FetchBlockByCount RPC method. @@ -135,11 +135,11 @@ type QueryAccountCovenantBalanceResp struct { // AdviseNewBlock is the RPC method to advise a new block to target server. func (s *ChainRPCService) AdviseNewBlock(req *AdviseNewBlockReq, resp *AdviseNewBlockResp) error { s.chain.blocksFromRPC <- req.Block - return s.chain.pushBlock(req.Block) + return nil } // AdviseBillingRequest is the RPC method to advise a new billing request to main chain. -func (s *ChainRPCService) AdviseBillingRequest(req *ct.AdviseBillingReq, resp *ct.AdviseBillingResp) error { +func (s *ChainRPCService) AdviseBillingRequest(req *types.AdviseBillingReq, resp *types.AdviseBillingResp) error { response, err := s.chain.produceBilling(req.Req) if err != nil { return err diff --git a/blockproducer/txpool.go b/blockproducer/txpool.go index 4fd46f683..41b714ae7 100644 --- a/blockproducer/txpool.go +++ b/blockproducer/txpool.go @@ -94,7 +94,7 @@ func (p *txPool) hasTx(tx pi.Transaction) (ok bool) { return } // Check transaction hash - if ok = (tx.GetHash() == te.transactions[index].GetHash()); !ok { + if ok = (tx.Hash() == te.transactions[index].Hash()); !ok { log.Debug("transaction hash already exists") return } @@ -112,7 +112,7 @@ func (p *txPool) cmpAndMoveNextTx(tx pi.Transaction) (ok bool) { return } // Check transaction hash - if ok = (tx.GetHash() == te.transactions[0].GetHash()); !ok { + if ok = (tx.Hash() == te.transactions[0].Hash()); !ok { return } // Move forward diff --git a/blockproducer/types/baseaccount.go b/blockproducer/types/baseaccount.go index ce4facda1..c56b7a421 100644 --- a/blockproducer/types/baseaccount.go +++ b/blockproducer/types/baseaccount.go @@ -50,8 +50,8 @@ func (b *BaseAccount) GetAccountNonce() pi.AccountNonce { return pi.AccountNonce(0) } -// GetHash implements interfaces/Transaction.GetHash. -func (b *BaseAccount) GetHash() (h hash.Hash) { +// Hash implements interfaces/Transaction.Hash. +func (b *BaseAccount) Hash() (h hash.Hash) { return } diff --git a/blockproducer/types/baseaccount_test.go b/blockproducer/types/baseaccount_test.go new file mode 100644 index 000000000..fd1370735 --- /dev/null +++ b/blockproducer/types/baseaccount_test.go @@ -0,0 +1,44 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + . "github.com/smartystreets/goconvey/convey" +) + +func TestBaseAccount(t *testing.T) { + Convey("base account", t, func() { + h, err := hash.NewHashFromStr("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + So(err, ShouldBeNil) + addr := proto.AccountAddress(*h) + ba := NewBaseAccount(&Account{ + Address: addr, + }) + So(ba.GetAccountAddress(), ShouldEqual, addr) + So(ba.GetAccountNonce(), ShouldEqual, 0) + So(ba.Hash(), ShouldEqual, hash.Hash{}) + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + So(ba.Sign(priv), ShouldBeNil) + So(ba.Verify(), ShouldBeNil) + }) +} diff --git a/blockproducer/types/billing.go b/blockproducer/types/billing.go index 0beeb86f7..ba959981c 100644 --- a/blockproducer/types/billing.go +++ b/blockproducer/types/billing.go @@ -19,6 +19,7 @@ package types import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -56,7 +57,7 @@ func NewBillingHeader(nonce pi.AccountNonce, bReq *BillingRequest, producer prot type Billing struct { BillingHeader pi.TransactionTypeMixin - DefaultHashSignVerifierImpl + verifier.DefaultHashSignVerifierImpl } // NewBilling generates a new Billing. diff --git a/blockproducer/types/billing_gen.go b/blockproducer/types/billing_gen.go index 5260f1a35..bc636474f 100644 --- a/blockproducer/types/billing_gen.go +++ b/blockproducer/types/billing_gen.go @@ -18,13 +18,13 @@ func (z *Billing) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +34,7 @@ func (z *Billing) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Billing) Msgsize() (s int) { - s = 1 + 14 + z.BillingHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + s = 1 + 14 + z.BillingHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/blockproducer/types/billing_test.go b/blockproducer/types/billing_test.go index bafa32c10..1c3d6e0c2 100644 --- a/blockproducer/types/billing_test.go +++ b/blockproducer/types/billing_test.go @@ -82,13 +82,13 @@ func TestBilling_SerializeDeserialize(t *testing.T) { } if !tb.Signature.IsEqual(dec.Signature) { - t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Signature, tb.Signature) + t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Signature, dec.Signature) } if !tb.Signee.IsEqual(dec.Signee) { - t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Signee, tb.Signee) + t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Signee, dec.Signee) } - if !tb.Hash.IsEqual(&dec.Hash) { - t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Hash, tb.Hash) + if tb.Hash() != dec.Hash() { + t.Fatalf("Value not match: \n\tv1=%v\n\tv2=%v", tb.Hash(), dec.Hash()) } } @@ -120,4 +120,22 @@ func TestBilling_PackAndSignTx(t *testing.T) { if err != nil { t.Fatalf("Verify signature failed: %v", err) } + + // get + addr := hash.Hash(tb.GetAccountAddress()) + if addr.IsEqual(&hash.Hash{}) { + t.Fatal("Get hash failed") + } + + tb.GetAccountNonce() + + if tb.GetDatabaseID() == nil { + t.Fatal("Get nil DatabaseID") + } + + tb.Signature = nil + err = tb.Verify() + if err == nil { + t.Fatal("Verify signature should failed") + } } diff --git a/blockproducer/types/block.go b/blockproducer/types/block.go index a2f88d642..42bb57714 100644 --- a/blockproducer/types/block.go +++ b/blockproducer/types/block.go @@ -67,7 +67,7 @@ func (b *Block) GetTxHashes() []*hash.Hash { hs := make([]*hash.Hash, len(b.Transactions)) for i, v := range b.Transactions { - h := v.GetHash() + h := v.Hash() hs[i] = &h } return hs diff --git a/blockproducer/types/createdb.go b/blockproducer/types/createdb.go index 2d5aa00fa..33749fcb9 100644 --- a/blockproducer/types/createdb.go +++ b/blockproducer/types/createdb.go @@ -19,6 +19,7 @@ package types import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -44,7 +45,7 @@ func (h *CreateDatabaseHeader) GetAccountNonce() pi.AccountNonce { type CreateDatabase struct { CreateDatabaseHeader pi.TransactionTypeMixin - DefaultHashSignVerifierImpl + verifier.DefaultHashSignVerifierImpl } // NewCreateDatabase returns new instance. diff --git a/blockproducer/types/createdb_gen.go b/blockproducer/types/createdb_gen.go index 0b3e6b862..2d4f51a21 100644 --- a/blockproducer/types/createdb_gen.go +++ b/blockproducer/types/createdb_gen.go @@ -25,13 +25,13 @@ func (z *CreateDatabase) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -41,7 +41,7 @@ func (z *CreateDatabase) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabase) Msgsize() (s int) { - s = 1 + 21 + 1 + 6 + z.CreateDatabaseHeader.Owner.Msgsize() + 6 + z.CreateDatabaseHeader.Nonce.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + s = 1 + 21 + 1 + 6 + z.CreateDatabaseHeader.Owner.Msgsize() + 6 + z.CreateDatabaseHeader.Nonce.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/blockproducer/types/createdb_test.go b/blockproducer/types/createdb_test.go new file mode 100644 index 000000000..e61381a4b --- /dev/null +++ b/blockproducer/types/createdb_test.go @@ -0,0 +1,51 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + . "github.com/smartystreets/goconvey/convey" +) + +func TestTxCreateDatabase(t *testing.T) { + Convey("test tx create database", t, func() { + h, err := hash.NewHashFromStr("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + So(err, ShouldBeNil) + addr := proto.AccountAddress(*h) + + cd := NewCreateDatabase(&CreateDatabaseHeader{ + Owner: addr, + Nonce: 1, + }) + + So(cd.GetAccountAddress(), ShouldEqual, addr) + So(cd.GetAccountNonce(), ShouldEqual, 1) + + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + + err = cd.Sign(priv) + So(err, ShouldBeNil) + + err = cd.Verify() + So(err, ShouldBeNil) + }) +} diff --git a/blockproducer/types/msgpack_test.go b/blockproducer/types/msgpack_test.go index c448c3af6..838f244de 100644 --- a/blockproducer/types/msgpack_test.go +++ b/blockproducer/types/msgpack_test.go @@ -106,21 +106,6 @@ func TestEncodeDecodeTransactions(t *testing.T) { So(reflect.TypeOf(out.Maps[k]).String(), ShouldContainSubstring, "TransactionWrapper") } }) - Convey("test encode wrapper, decode using real type", t, func() { - var t pi.Transaction - t = pi.WrapTransaction(NewBaseAccount(&Account{})) - So(reflect.TypeOf(t).String(), ShouldContainSubstring, "TransactionWrapper") - So(t.GetTransactionType(), ShouldEqual, pi.TransactionTypeBaseAccount) - buf, err := utils.EncodeMsgPack(t) - So(err, ShouldBeNil) - - var out *BaseAccount - err = utils.DecodeMsgPack(buf.Bytes(), &out) - So(err, ShouldBeNil) - So(out, ShouldNotBeNil) - So(out.GetTransactionType(), ShouldEqual, pi.TransactionTypeBaseAccount) - So(reflect.TypeOf(out).String(), ShouldContainSubstring, "BaseAccount") - }) Convey("decode invalid data", t, func() { var testTypes = []interface{}{ "1", diff --git a/blockproducer/types/transfer.go b/blockproducer/types/transfer.go index e9a170fa0..38d6f7134 100644 --- a/blockproducer/types/transfer.go +++ b/blockproducer/types/transfer.go @@ -19,6 +19,7 @@ package types import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -35,7 +36,7 @@ type TransferHeader struct { type Transfer struct { TransferHeader pi.TransactionTypeMixin - DefaultHashSignVerifierImpl + verifier.DefaultHashSignVerifierImpl } // NewTransfer returns new instance. diff --git a/blockproducer/types/transfer_gen.go b/blockproducer/types/transfer_gen.go index 6d910fa70..c2f7f8736 100644 --- a/blockproducer/types/transfer_gen.go +++ b/blockproducer/types/transfer_gen.go @@ -12,19 +12,19 @@ func (z *Transfer) MarshalHash() (o []byte, err error) { o = hsp.Require(b, z.Msgsize()) // map header, size 3 o = append(o, 0x83, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransferHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.TransferHeader.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +34,7 @@ func (z *Transfer) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Transfer) Msgsize() (s int) { - s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 15 + z.TransferHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + s = 1 + 15 + z.TransferHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/blockproducer/types/transfer_test.go b/blockproducer/types/transfer_test.go index 28c02e239..e651054b7 100644 --- a/blockproducer/types/transfer_test.go +++ b/blockproducer/types/transfer_test.go @@ -15,3 +15,32 @@ */ package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + . "github.com/smartystreets/goconvey/convey" +) + +func TestTxTransfer(t *testing.T) { + Convey("test transfer", t, func() { + h, err := hash.NewHashFromStr("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + So(err, ShouldBeNil) + addr := proto.AccountAddress(*h) + + t := NewTransfer(&TransferHeader{ + Sender: addr, + Nonce: 1, + }) + So(t.GetAccountAddress(), ShouldEqual, addr) + So(t.GetAccountNonce(), ShouldEqual, 1) + + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + So(t.Sign(priv), ShouldBeNil) + So(t.Verify(), ShouldBeNil) + }) +} diff --git a/build.sh b/build.sh index f856edc00..fc39ea309 100755 --- a/build.sh +++ b/build.sh @@ -37,6 +37,9 @@ go test -coverpkg github.com/CovenantSQL/CovenantSQL/... -cover -race -c -tags ' cli_pkgpath="github.com/CovenantSQL/CovenantSQL/cmd/cql" CGO_ENABLED=1 go build -ldflags "-X main.version=${version} -X github.com/CovenantSQL/CovenantSQL/conf.RoleTag=C ${GOLDFLAGS}" --tags ${platform}" sqlite_omit_load_extension" -o bin/cql ${cli_pkgpath} +fuse_pkgpath="github.com/CovenantSQL/CovenantSQL/cmd/cql-fuse" +CGO_ENABLED=1 go build -ldflags "-X main.version=${version} -X github.com/CovenantSQL/CovenantSQL/conf.RoleTag=C ${GOLDFLAGS}" --tags ${platform}" sqlite_omit_load_extension" -o bin/cql-fuse ${fuse_pkgpath} + cql_adapter_pkgpath="github.com/CovenantSQL/CovenantSQL/cmd/cql-adapter" CGO_ENABLED=1 go build -ldflags "-X main.version=${version} -X github.com/CovenantSQL/CovenantSQL/conf.RoleTag=C ${GOLDFLAGS}" --tags ${platform}" sqlite_omit_load_extension" -o bin/cql-adapter ${cql_adapter_pkgpath} diff --git a/chain/interfaces/transaction.go b/chain/interfaces/transaction.go deleted file mode 100644 index 12f07957f..000000000 --- a/chain/interfaces/transaction.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package interfaces - -import ( - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -// Serializer is the interface implemented by an object that can serialize itself into binary form. -type Serializer interface { - Serialize() ([]byte, error) -} - -// Deserializer is the interface implemented by an object that can deserialize a binary -// representation of itself. -type Deserializer interface { - Deserialize(enc []byte) error -} - -// Transaction is the interface implemented by an object that can be verified and processed by -// a blockchain as a transaction. -type Transaction interface { - Serializer - Deserializer - GetDatabaseID() *proto.DatabaseID - GetHash() hash.Hash - GetIndexKey() interface{} - GetPersistenceKey() []byte - GetSequenceID() uint32 - GetTime() time.Time - Verify() error -} diff --git a/chain/persistence.go b/chain/persistence.go deleted file mode 100644 index 3cb46e4d7..000000000 --- a/chain/persistence.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - ci "github.com/CovenantSQL/CovenantSQL/chain/interfaces" - "github.com/coreos/bbolt" -) - -var ( - metaBucket = [4]byte{0x0, 0x0, 0x0, 0x0} - metaTxIndexBucket = []byte("covenantsql-tx-index-bucket") -) - -// TxPersistence defines a persistence storage for blockchain transactions. -type TxPersistence struct { - db *bolt.DB -} - -// NewTxPersistence returns a new TxPersistence instance using the given bolt database as -// underlying storage engine. -func NewTxPersistence(db *bolt.DB) (ins *TxPersistence, err error) { - // Initialize buckets - if err = db.Update(func(tx *bolt.Tx) (err error) { - meta, err := tx.CreateBucketIfNotExists(metaBucket[:]) - if err != nil { - return - } - _, err = meta.CreateBucketIfNotExists(metaTxIndexBucket) - return - }); err != nil { - return - } - // Create instance if succeed - ins = &TxPersistence{db: db} - return -} - -// PutTransaction serializes and puts the transaction tx into the storage. -func (p *TxPersistence) PutTransaction(tx ci.Transaction) (err error) { - var key, value []byte - key = tx.GetPersistenceKey() - if value, err = tx.Serialize(); err != nil { - return - } - return p.db.Update(func(tx *bolt.Tx) error { - return tx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Put(key, value) - }) -} - -// GetTransaction gets the transaction binary representation from the storage with key and -// deserialize to tx. -// -// It is important that tx must provide an interface with corresponding concrete value, or the -// deserialization will cause unexpected error. -func (p *TxPersistence) GetTransaction(key []byte, tx ci.Transaction) (ok bool, err error) { - var value []byte - if err = p.db.View(func(tx *bolt.Tx) error { - value = tx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Get(key) - return nil - }); err != nil { - return - } - if value != nil { - ok = true - err = tx.Deserialize(value) - return - } - return -} - -// DelTransaction deletes the transaction from the storage with key. -func (p *TxPersistence) DelTransaction(key []byte) (err error) { - return p.db.Update(func(tx *bolt.Tx) error { - return tx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Delete(key) - }) -} - -// PutTransactionAndUpdateIndex serializes and puts the transaction from the storage with key -// and updates transaction index ti in a single database transaction. -func (p *TxPersistence) PutTransactionAndUpdateIndex(tx ci.Transaction, ti *TxIndex) (err error) { - var ( - key = tx.GetPersistenceKey() - val []byte - ) - if val, err = tx.Serialize(); err != nil { - return - } - return p.db.Update(func(dbtx *bolt.Tx) (err error) { - if err = dbtx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Put(key, val); err != nil { - return - } - ti.StoreTx(tx) - return - }) -} - -// DelTransactionAndUpdateIndex deletes the transaction from the storage with key and updates -// transaction index ti in a single database transaction. -func (p *TxPersistence) DelTransactionAndUpdateIndex( - pkey []byte, ikey interface{}, ti *TxIndex) (err error, -) { - return p.db.Update(func(dbtx *bolt.Tx) (err error) { - if err = dbtx.Bucket(metaBucket[:]).Bucket(metaTxIndexBucket).Delete(pkey); err != nil { - return - } - ti.DelTx(ikey) - return - }) -} diff --git a/chain/persistence_test.go b/chain/persistence_test.go deleted file mode 100644 index 963ca08fc..000000000 --- a/chain/persistence_test.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - "fmt" - "path" - "reflect" - "testing" - - ci "github.com/CovenantSQL/CovenantSQL/chain/interfaces" - "github.com/coreos/bbolt" -) - -func TestBadNewTxPersistence(t *testing.T) { - fl := path.Join(testDataDir, fmt.Sprintf("%s.db", t.Name())) - db, err := bolt.Open(fl, 0600, nil) - if err = db.Close(); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if _, err = NewTxPersistence(db); err == nil { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestTxPersistenceWithClosedDB(t *testing.T) { - fl := path.Join(testDataDir, fmt.Sprintf("%s.db", t.Name())) - db, err := bolt.Open(fl, 0600, nil) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - tp, err := NewTxPersistence(db) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - if err = db.Close(); err != nil { - t.Fatalf("Error occurred: %v", err) - } - var ( - otx ci.Transaction = newRandomDemoTxImpl() - rtx ci.Transaction = &DemoTxImpl{} - ) - if _, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err == nil { - t.Fatalf("Unexpected error: %v", err) - } - if err = tp.PutTransaction(otx); err == nil { - t.Fatalf("Unexpected error: %v", err) - } - if err = tp.DelTransaction(otx.GetPersistenceKey()); err == nil { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestTxPersistence(t *testing.T) { - fl := path.Join(testDataDir, fmt.Sprintf("%s.db", t.Name())) - db, err := bolt.Open(fl, 0600, nil) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - tp, err := NewTxPersistence(db) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test operations: Get -> Put -> Get -> Del -> Get - var ( - otx ci.Transaction = newRandomDemoTxImpl() - rtx ci.Transaction = &DemoTxImpl{} - ) - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err = tp.PutTransaction(otx); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !ok { - t.Fatalf("Unexpected query result: %v", ok) - } else if !reflect.DeepEqual(otx, rtx) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, rtx) - } - if err = tp.DelTransaction(otx.GetPersistenceKey()); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if ok { - t.Fatalf("Unexpected query result: %v", ok) - } -} - -func TestTxPersistenceWithIndex(t *testing.T) { - fl := path.Join(testDataDir, fmt.Sprintf("%s.db", t.Name())) - db, err := bolt.Open(fl, 0600, nil) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - tp, err := NewTxPersistence(db) - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - ti := NewTxIndex() - - // Test operations: Get -> Put -> Get -> Del -> Get - var ( - otx ci.Transaction = newRandomDemoTxImpl() - rtx ci.Transaction = &DemoTxImpl{} - ) - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err = tp.PutTransactionAndUpdateIndex(otx, ti); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !ok { - t.Fatalf("Unexpected query result: %v", ok) - } else if !reflect.DeepEqual(otx, rtx) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, rtx) - } - if xtx, ok := ti.LoadTx(otx.GetIndexKey()); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } else if !reflect.DeepEqual(otx, xtx) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, xtx) - } - if err = tp.DelTransactionAndUpdateIndex( - otx.GetPersistenceKey(), otx.GetIndexKey(), ti, - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } - if ok, err := tp.GetTransaction(otx.GetPersistenceKey(), rtx); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if _, ok := ti.LoadTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } -} diff --git a/chain/txindex.go b/chain/txindex.go deleted file mode 100644 index 5d764e972..000000000 --- a/chain/txindex.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - "sync" - - ci "github.com/CovenantSQL/CovenantSQL/chain/interfaces" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" -) - -type txCache struct { - bh *hash.Hash - tx ci.Transaction -} - -// TxIndex defines transaction index. -type TxIndex struct { - index sync.Map -} - -// NewTxIndex returns a new TxIndex instance. -func NewTxIndex() *TxIndex { - return &TxIndex{} -} - -// StoreTx stores tx in the transaction index. -func (i *TxIndex) StoreTx(tx ci.Transaction) { - i.index.Store(tx.GetIndexKey(), &txCache{tx: tx}) -} - -// HasTx returns a boolean value indicating wether the transaction index has key or not. -func (i *TxIndex) HasTx(key interface{}) (ok bool) { - _, ok = i.index.Load(key) - return -} - -// LoadTx loads a transaction with key. -func (i *TxIndex) LoadTx(key interface{}) (tx ci.Transaction, ok bool) { - var ( - val interface{} - tc *txCache - ) - if val, ok = i.index.Load(key); ok { - if tc = val.(*txCache); tc != nil { - tx = tc.tx - } - } - return -} - -// SetBlock sets the block hash filed of txCache with key in the transaction index. -func (i *TxIndex) SetBlock(key interface{}, bh hash.Hash) (ok bool) { - var ( - val interface{} - tc *txCache - ) - if val, ok = i.index.Load(key); ok { - if tc = val.(*txCache); tc != nil { - tc.bh = &bh - } - } - return -} - -// DelTx deletes transaction with key in the transaction index. -func (i *TxIndex) DelTx(key interface{}) { - i.index.Delete(key) -} - -// ResetBlock resets the block hash field of txCache with key in the transaction index. -func (i *TxIndex) ResetBlock(key interface{}) (ok bool) { - var ( - val interface{} - tc *txCache - ) - if val, ok = i.index.Load(key); ok { - if tc = val.(*txCache); tc != nil { - tc.bh = nil - } - } - return -} - -// CheckTxState checks the transaction state for block packing with key in the transaction index. -func (i *TxIndex) CheckTxState(key interface{}) error { - var ( - ok bool - val interface{} - ) - if val, ok = i.index.Load(key); !ok { - return ErrUnknownTx - } - if tc := val.(*txCache); tc == nil { - return ErrCorruptedIndex - } else if tc.bh != nil { - return ErrDuplicateTx - } - return nil -} - -// FetchUnpackedTxes fetches all unpacked tranactions and returns them as a slice. -func (i *TxIndex) FetchUnpackedTxes() (txes []ci.Transaction) { - i.index.Range(func(key interface{}, val interface{}) bool { - if tc := val.(*txCache); tc != nil && tc.bh == nil { - txes = append(txes, tc.tx) - } - return true - }) - return -} diff --git a/chain/txindex_test.go b/chain/txindex_test.go deleted file mode 100644 index bbe109d23..000000000 --- a/chain/txindex_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - "reflect" - "testing" - - ci "github.com/CovenantSQL/CovenantSQL/chain/interfaces" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" -) - -func TestTxIndex(t *testing.T) { - var ( - ti = NewTxIndex() - otx ci.Transaction = newRandomDemoTxImpl() - ) - // Test operations: Get -> Put -> Get -> Del -> Get - if ok := ti.HasTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if _, ok := ti.LoadTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if ok := ti.SetBlock(otx.GetIndexKey(), hash.Hash{}); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err := ti.CheckTxState(otx.GetIndexKey()); err != ErrUnknownTx { - t.Fatalf("Unexpected error: %v", err) - } - ti.StoreTx(otx) - if ok := ti.HasTx(otx.GetIndexKey()); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if xtx, ok := ti.LoadTx(otx.GetIndexKey()); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } else if !reflect.DeepEqual(otx, xtx) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, xtx) - } - if err := ti.CheckTxState(otx.GetIndexKey()); err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if ok := ti.SetBlock(otx.GetIndexKey(), hash.Hash{}); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err := ti.CheckTxState(otx.GetIndexKey()); err != ErrDuplicateTx { - t.Fatalf("Unexpected error: %v", err) - } - if ok := ti.ResetBlock(otx.GetIndexKey()); !ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if txes := ti.FetchUnpackedTxes(); len(txes) != 1 { - t.Fatalf("Unexpected query result: %v", txes) - } else if !reflect.DeepEqual(otx, txes[0]) { - t.Fatalf("Unexpected result:\n\torigin = %v\n\toutput = %v", otx, txes[0]) - } - ti.DelTx(otx.GetIndexKey()) - if ok := ti.HasTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if _, ok := ti.LoadTx(otx.GetIndexKey()); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if ok := ti.SetBlock(otx.GetIndexKey(), hash.Hash{}); ok { - t.Fatalf("Unexpected query result: %v", ok) - } - if err := ti.CheckTxState(otx.GetIndexKey()); err != ErrUnknownTx { - t.Fatalf("Unexpected error: %v", err) - } -} diff --git a/chain/xxx_gen_test.go b/chain/xxx_gen_test.go deleted file mode 100644 index ed7deaa59..000000000 --- a/chain/xxx_gen_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package chain - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *DemoHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - o = hsp.AppendTime(o, z.Timestamp) - o = append(o, 0x83) - o = hsp.AppendUint32(o, z.SequenceID) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *DemoHeader) Msgsize() (s int) { - s = 1 + 11 + z.DatabaseID.Msgsize() + 10 + hsp.TimeSize + 11 + hsp.Uint32Size - return -} diff --git a/chain/xxx_test.go b/chain/xxx_test.go deleted file mode 100644 index 80c28b153..000000000 --- a/chain/xxx_test.go +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package chain - -import ( - "io/ioutil" - "math/rand" - "os" - "testing" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils" -) - -var ( - testDataDir string - testPrivKey *asymmetric.PrivateKey - testPubKey *asymmetric.PublicKey -) - -func createRandomString(offset, length int) string { - buff := make([]byte, rand.Intn(length)+offset) - rand.Read(buff) - for i, v := range buff { - buff[i] = v%(0x7f-0x20) + 0x20 - } - return string(buff) -} - -type DemoHeader struct { - DatabaseID proto.DatabaseID - SequenceID uint32 - Timestamp time.Time -} - -type DemoTxImpl struct { - DemoHeader - HeaderHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature -} - -func newRandomDemoTxImpl() (i *DemoTxImpl) { - header := DemoHeader{ - DatabaseID: proto.DatabaseID(createRandomString(10, 10)), - SequenceID: rand.Uint32(), - Timestamp: time.Now().UTC(), - } - - enc, err := header.MarshalHash() - if err != nil { - panic(err) - } - - hh := hash.HashH(enc) - sig, err := testPrivKey.Sign(hh[:]) - if err != nil { - panic(err) - } - - i = &DemoTxImpl{ - DemoHeader: header, - HeaderHash: hh, - Signee: testPubKey, - Signature: sig, - } - return -} - -func (i *DemoTxImpl) Serialize() (enc []byte, err error) { - if b, err := utils.EncodeMsgPack(i); err == nil { - enc = b.Bytes() - } - return -} - -func (i *DemoTxImpl) Deserialize(enc []byte) error { - return utils.DecodeMsgPack(enc, i) -} - -func (i *DemoTxImpl) GetDatabaseID() *proto.DatabaseID { - return &i.DatabaseID -} - -func (i *DemoTxImpl) GetHash() hash.Hash { - return i.HeaderHash -} - -func (i *DemoTxImpl) GetIndexKey() interface{} { - return i.HeaderHash -} - -func (i *DemoTxImpl) GetPersistenceKey() []byte { - return i.HeaderHash[:] -} - -func (i *DemoTxImpl) GetSequenceID() uint32 { - return i.SequenceID -} - -func (i *DemoTxImpl) GetTime() time.Time { - return i.Timestamp -} - -func (i *DemoTxImpl) Verify() (err error) { - var enc []byte - if enc, err = i.DemoHeader.MarshalHash(); err != nil { - return - } else if h := hash.THashH(enc); !i.HeaderHash.IsEqual(&h) { - return - } else if !i.Signature.Verify(h[:], i.Signee) { - return - } - return -} - -func setup() { - // Setup RNG - rand.Seed(time.Now().UnixNano()) - - var err error - // Create temp directory - testDataDir, err = ioutil.TempDir("", "covenantsql") - if err != nil { - panic(err) - } - // Create key pair for test - testPrivKey, testPubKey, err = asymmetric.GenSecp256k1KeyPair() - if err != nil { - panic(err) - } -} - -func teardown() { - if err := os.RemoveAll(testDataDir); err != nil { - panic(err) - } -} - -func TestMain(m *testing.M) { - os.Exit(func() int { - setup() - defer teardown() - return m.Run() - }()) -} diff --git a/chainbus/bus.go b/chainbus/bus.go new file mode 100644 index 000000000..e2bcfebec --- /dev/null +++ b/chainbus/bus.go @@ -0,0 +1,225 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package chainbus + +import ( + "fmt" + "reflect" + "sync" + + bi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" +) + +// ChainSuber defines subscribing-related bus behavior +type ChainSuber interface { + Subscribe(topic bi.TransactionType, handler interface{}) error + SubscribeAsync(topic bi.TransactionType, handler interface{}, transactional bool) error + SubscribeOnce(topic bi.TransactionType, handler interface{}) error + SubscribeOnceAsync(topic bi.TransactionType, handler interface{}) error + Unsubscribe(topic bi.TransactionType, handler interface{}) error +} + +// ChainPuber defines publishing-related bus behavior +type ChainPuber interface { + Publish(topic bi.TransactionType, args ...interface{}) +} + +// BusController defines bus control behavior (checking handler's presence, synchronization) +type BusController interface { + HasCallback(topic bi.TransactionType) bool + WaitAsync() +} + +// Bus englobes global (subscribe, publish, control) bus behavior +type Bus interface { + BusController + ChainSuber + ChainPuber +} + +// ChainBus - box for handlers and callbacks. +type ChainBus struct { + handlers map[bi.TransactionType][]*eventHandler + lock sync.Mutex // a lock for the map + wg sync.WaitGroup +} + +type eventHandler struct { + callBack reflect.Value + flagOnce bool + async bool + transactional bool + sync.Mutex // lock for an event handler - useful for running async callbacks serially +} + +// New returns new ChainBus with empty handlers. +func New() Bus { + b := &ChainBus{ + make(map[bi.TransactionType][]*eventHandler), + sync.Mutex{}, + sync.WaitGroup{}, + } + return b +} + +// doSubscribe handles the subscription logic and is utilized by the public Subscribe functions +func (bus *ChainBus) doSubscribe(topic bi.TransactionType, fn interface{}, handler *eventHandler) error { + bus.lock.Lock() + defer bus.lock.Unlock() + if !(reflect.TypeOf(fn).Kind() == reflect.Func) { + return fmt.Errorf("%s is not of type reflect.Func", reflect.TypeOf(fn).Kind()) + } + bus.handlers[topic] = append(bus.handlers[topic], handler) + return nil +} + +// Subscribe subscribes to a topic. +// Returns error if `fn` is not a function. +func (bus *ChainBus) Subscribe(topic bi.TransactionType, fn interface{}) error { + return bus.doSubscribe(topic, fn, &eventHandler{ + reflect.ValueOf(fn), false, false, false, sync.Mutex{}, + }) +} + +// SubscribeAsync subscribes to a topic with an asynchronous callback +// Transactional determines whether subsequent callbacks for a topic are +// run serially (true) or concurrently (false) +// Returns error if `fn` is not a function. +func (bus *ChainBus) SubscribeAsync(topic bi.TransactionType, fn interface{}, transactional bool) error { + return bus.doSubscribe(topic, fn, &eventHandler{ + reflect.ValueOf(fn), false, true, transactional, sync.Mutex{}, + }) +} + +// SubscribeOnce subscribes to a topic once. Handler will be removed after executing. +// Returns error if `fn` is not a function. +func (bus *ChainBus) SubscribeOnce(topic bi.TransactionType, fn interface{}) error { + return bus.doSubscribe(topic, fn, &eventHandler{ + reflect.ValueOf(fn), true, false, false, sync.Mutex{}, + }) +} + +// SubscribeOnceAsync subscribes to a topic once with an asynchronous callback +// Handler will be removed after executing. +// Returns error if `fn` is not a function. +func (bus *ChainBus) SubscribeOnceAsync(topic bi.TransactionType, fn interface{}) error { + return bus.doSubscribe(topic, fn, &eventHandler{ + reflect.ValueOf(fn), true, true, false, sync.Mutex{}, + }) +} + +// HasCallback returns true if exists any callback subscribed to the topic. +func (bus *ChainBus) HasCallback(topic bi.TransactionType) bool { + bus.lock.Lock() + defer bus.lock.Unlock() + _, ok := bus.handlers[topic] + if ok { + return len(bus.handlers[topic]) > 0 + } + return false +} + +// Unsubscribe removes callback defined for a topic. +// Returns error if there are no callbacks subscribed to the topic. +func (bus *ChainBus) Unsubscribe(topic bi.TransactionType, handler interface{}) error { + bus.lock.Lock() + defer bus.lock.Unlock() + if _, ok := bus.handlers[topic]; ok && len(bus.handlers[topic]) > 0 { + bus.removeHandler(topic, bus.findHandlerIdx(topic, reflect.ValueOf(handler))) + return nil + } + return fmt.Errorf("topic %s doesn't exist", topic) +} + +// Publish executes callback defined for a topic. Any additional argument will be transferred to the callback. +func (bus *ChainBus) Publish(topic bi.TransactionType, args ...interface{}) { + bus.lock.Lock() // will unlock if handler is not found or always after setUpPublish + defer bus.lock.Unlock() + if handlers, ok := bus.handlers[topic]; ok && 0 < len(handlers) { + // Handlers slice may be changed by removeHandler and Unsubscribe during iteration, + // so make a copy and iterate the copied slice. + copyHandlers := make([]*eventHandler, 0, len(handlers)) + copyHandlers = append(copyHandlers, handlers...) + for i, handler := range copyHandlers { + if handler.flagOnce { + bus.removeHandler(topic, i) + } + if !handler.async { + bus.doPublish(handler, topic, args...) + } else { + bus.wg.Add(1) + if handler.transactional { + handler.Lock() + } + go bus.doPublishAsync(handler, topic, args...) + } + } + } +} + +func (bus *ChainBus) doPublish(handler *eventHandler, topic bi.TransactionType, args ...interface{}) { + passedArguments := bus.setUpPublish(topic, args...) + handler.callBack.Call(passedArguments) +} + +func (bus *ChainBus) doPublishAsync(handler *eventHandler, topic bi.TransactionType, args ...interface{}) { + defer bus.wg.Done() + if handler.transactional { + defer handler.Unlock() + } + bus.doPublish(handler, topic, args...) +} + +func (bus *ChainBus) removeHandler(topic bi.TransactionType, idx int) { + if _, ok := bus.handlers[topic]; !ok { + return + } + l := len(bus.handlers[topic]) + + if !(0 <= idx && idx < l) { + return + } + + copy(bus.handlers[topic][idx:], bus.handlers[topic][idx+1:]) + bus.handlers[topic][l-1] = nil // or the zero value of T + bus.handlers[topic] = bus.handlers[topic][:l-1] +} + +func (bus *ChainBus) findHandlerIdx(topic bi.TransactionType, callback reflect.Value) int { + if _, ok := bus.handlers[topic]; ok { + for idx, handler := range bus.handlers[topic] { + if handler.callBack == callback { + return idx + } + } + } + return -1 +} + +func (bus *ChainBus) setUpPublish(topic bi.TransactionType, args ...interface{}) []reflect.Value { + + passedArguments := make([]reflect.Value, 0) + for _, arg := range args { + passedArguments = append(passedArguments, reflect.ValueOf(arg)) + } + return passedArguments +} + +// WaitAsync waits for all async callbacks to complete +func (bus *ChainBus) WaitAsync() { + bus.wg.Wait() +} diff --git a/chainbus/bus_test.go b/chainbus/bus_test.go new file mode 100644 index 000000000..552bc8dcc --- /dev/null +++ b/chainbus/bus_test.go @@ -0,0 +1,175 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package chainbus + +import ( + "sync/atomic" + "testing" + "time" + + bi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" +) + +func TestNew(t *testing.T) { + bus := New() + if bus == nil { + t.Log("New EventBus not created!") + t.Fail() + } +} + +func TestHasCallback(t *testing.T) { + bus := New() + bus.Subscribe(bi.TransactionType(1), func() {}) + if bus.HasCallback(bi.TransactionType(2)) { + t.Fail() + } + if !bus.HasCallback(bi.TransactionType(1)) { + t.Fail() + } +} + +func TestSubscribe(t *testing.T) { + bus := New() + if bus.Subscribe(bi.TransactionType(1), func() {}) != nil { + t.Fail() + } + if bus.Subscribe(bi.TransactionType(1), "String") == nil { + t.Fail() + } +} + +func TestSubscribeOnce(t *testing.T) { + bus := New() + if bus.SubscribeOnce(bi.TransactionType(1), func() {}) != nil { + t.Fail() + } + if bus.SubscribeOnce(bi.TransactionType(1), "String") == nil { + t.Fail() + } +} + +func TestSubscribeOnceAndManySubscribe(t *testing.T) { + bus := New() + event := bi.TransactionType(1) + flag := 0 + fn := func() { flag++ } + bus.SubscribeOnce(event, fn) + bus.Subscribe(event, fn) + bus.Subscribe(event, fn) + bus.Publish(event) + + if flag != 3 { + t.Fail() + } +} + +func TestUnsubscribe(t *testing.T) { + bus := New() + handler := func() {} + bus.Subscribe(bi.TransactionType(1), handler) + if bus.Unsubscribe(bi.TransactionType(1), handler) != nil { + t.Fail() + } + if bus.Unsubscribe(bi.TransactionType(1), handler) == nil { + t.Fail() + } +} + +func TestPublish(t *testing.T) { + bus := New() + bus.Subscribe(bi.TransactionType(1), func(a int, b int) { + if a != b { + t.Fail() + } + }) + bus.Publish(bi.TransactionType(1), 10, 10) +} + +func TestSubcribeOnceAsync(t *testing.T) { + results := make([]int, 0) + + bus := New() + bus.SubscribeOnceAsync(bi.TransactionType(1), func(a int, out *[]int) { + *out = append(*out, a) + }) + + bus.Publish(bi.TransactionType(1), 10, &results) + bus.Publish(bi.TransactionType(1), 10, &results) + + bus.WaitAsync() + + if len(results) != 1 { + t.Fail() + } + + if bus.HasCallback(bi.TransactionType(1)) { + t.Fail() + } +} + +func TestSubscribeAsyncTransactional(t *testing.T) { + results := make([]int, 0) + + bus := New() + bus.SubscribeAsync(bi.TransactionType(1), func(a int, out *[]int, dur string) { + sleep, _ := time.ParseDuration(dur) + time.Sleep(sleep) + *out = append(*out, a) + }, true) + + bus.Publish(bi.TransactionType(1), 1, &results, "1s") + bus.Publish(bi.TransactionType(1), 2, &results, "0s") + + bus.WaitAsync() + + if len(results) != 2 { + t.Fail() + } + + if results[0] != 1 || results[1] != 2 { + t.Fail() + } +} + +func TestSubscribeAsync(t *testing.T) { + results := make(chan int) + + bus := New() + bus.SubscribeAsync(bi.TransactionType(1), func(a int, out chan<- int) { + out <- a + }, false) + + bus.Publish(bi.TransactionType(1), 1, results) + bus.Publish(bi.TransactionType(1), 2, results) + + var numResults int32 + + go func() { + for range results { + atomic.AddInt32(&numResults, 1) + } + }() + + bus.WaitAsync() + + time.Sleep(10 * time.Millisecond) + + if atomic.LoadInt32(&numResults) != 2 { + t.Fail() + } +} diff --git a/chainbus/doc.go b/chainbus/doc.go new file mode 100644 index 000000000..d606ba5bb --- /dev/null +++ b/chainbus/doc.go @@ -0,0 +1,39 @@ +//The MIT License (MIT) +// +//Copyright (c) 2014 Alex Saskevich +// +//Permission is hereby granted, free of charge, to any person obtaining a copy +//of this software and associated documentation files (the "Software"), to deal +//in the Software without restriction, including without limitation the rights +//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +//copies of the Software, and to permit persons to whom the Software is +//furnished to do so, subject to the following conditions: +// +//The above copyright notice and this permission notice shall be included in all +//copies or substantial portions of the Software. +// +//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +//SOFTWARE. + +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package chainbus diff --git a/client/conn.go b/client/conn.go index 449a304fe..eeec4d1db 100644 --- a/client/conn.go +++ b/client/conn.go @@ -29,19 +29,19 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) // conn implements an interface sql.Conn. type conn struct { dbID proto.DatabaseID - queries []wt.Query + queries []types.Query localNodeID proto.NodeID privKey *asymmetric.PrivateKey - ackCh chan *wt.Ack + ackCh chan *types.Ack inTransaction bool closed int32 pCaller *rpc.PersistentCaller @@ -64,15 +64,17 @@ func newConn(cfg *Config) (c *conn, err error) { dbID: proto.DatabaseID(cfg.DatabaseID), localNodeID: localNodeID, privKey: privKey, - queries: make([]wt.Query, 0), + queries: make([]types.Query, 0), } + var peers *proto.Peers // get peers from BP - if _, err = cacheGetPeers(c.dbID, c.privKey); err != nil { + if peers, err = cacheGetPeers(c.dbID, c.privKey); err != nil { log.WithError(err).Error("cacheGetPeers failed") c = nil return } + c.pCaller = rpc.NewPersistentCaller(peers.Leader) err = c.startAckWorkers(2) if err != nil { @@ -86,7 +88,7 @@ func newConn(cfg *Config) (c *conn, err error) { } func (c *conn) startAckWorkers(workerCount int) (err error) { - c.ackCh = make(chan *wt.Ack, workerCount*4) + c.ackCh = make(chan *types.Ack, workerCount*4) for i := 0; i < workerCount; i++ { go c.ackWorker() } @@ -120,7 +122,7 @@ func (c *conn) ackWorker() { continue } - var ackRes wt.AckResponse + var ackRes types.AckResponse // send ack back if err = pc.Call(route.DBSAck.String(), ack, &ackRes); err != nil { log.WithError(err).Warning("send ack failed") @@ -203,7 +205,7 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name sq := convertQuery(query, args) var affectedRows, lastInsertID int64 - if affectedRows, lastInsertID, _, err = c.addQuery(wt.WriteQuery, sq); err != nil { + if affectedRows, lastInsertID, _, err = c.addQuery(types.WriteQuery, sq); err != nil { return } @@ -224,7 +226,7 @@ func (c *conn) QueryContext(ctx context.Context, query string, args []driver.Nam // TODO(xq262144): make use of the ctx argument sq := convertQuery(query, args) - _, _, rows, err = c.addQuery(wt.ReadQuery, sq) + _, _, rows, err = c.addQuery(types.ReadQuery, sq) return } @@ -246,7 +248,7 @@ func (c *conn) Commit() (err error) { if len(c.queries) > 0 { // send query - if _, _, _, err = c.sendQuery(wt.WriteQuery, c.queries); err != nil { + if _, _, _, err = c.sendQuery(types.WriteQuery, c.queries); err != nil { return } } @@ -276,10 +278,10 @@ func (c *conn) Rollback() error { return nil } -func (c *conn) addQuery(queryType wt.QueryType, query *wt.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { +func (c *conn) addQuery(queryType types.QueryType, query *types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { if c.inTransaction { // check query type, enqueue query - if queryType == wt.ReadQuery { + if queryType == types.ReadQuery { // read query is not supported in transaction err = ErrQueryInTransaction return @@ -301,10 +303,10 @@ func (c *conn) addQuery(queryType wt.QueryType, query *wt.Query) (affectedRows i "args": query.Args, }).Debug("execute query") - return c.sendQuery(queryType, []wt.Query{*query}) + return c.sendQuery(queryType, []types.Query{*query}) } -func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { +func (c *conn) sendQuery(queryType types.QueryType, queries []types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { var peers *proto.Peers if peers, err = cacheGetPeers(c.dbID, c.privKey); err != nil { return @@ -326,9 +328,9 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (affectedRo }() // build request - req := &wt.Request{ - Header: wt.SignedRequestHeader{ - RequestHeader: wt.RequestHeader{ + req := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ QueryType: queryType, NodeID: c.localNodeID, DatabaseID: c.dbID, @@ -337,7 +339,7 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (affectedRo Timestamp: getLocalTime(), }, }, - Payload: wt.RequestPayload{ + Payload: types.RequestPayload{ Queries: queries, }, } @@ -346,8 +348,7 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (affectedRo return } - c.pCaller = rpc.NewPersistentCaller(peers.Leader) - var response wt.Response + var response types.Response if err = c.pCaller.Call(route.DBSQuery.String(), req, &response); err != nil { return } @@ -358,15 +359,15 @@ func (c *conn) sendQuery(queryType wt.QueryType, queries []wt.Query) (affectedRo } rows = newRows(&response) - if queryType == wt.WriteQuery { + if queryType == types.WriteQuery { affectedRows = response.Header.AffectedRows lastInsertID = response.Header.LastInsertID } // build ack - c.ackCh <- &wt.Ack{ - Header: wt.SignedAckHeader{ - AckHeader: wt.AckHeader{ + c.ackCh <- &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ Response: response.Header, NodeID: c.localNodeID, Timestamp: getLocalTime(), @@ -381,13 +382,13 @@ func getLocalTime() time.Time { return time.Now().UTC() } -func convertQuery(query string, args []driver.NamedValue) (sq *wt.Query) { +func convertQuery(query string, args []driver.NamedValue) (sq *types.Query) { // rebuild args to named args - sq = &wt.Query{ + sq = &types.Query{ Pattern: query, } - sq.Args = make([]wt.NamedArg, len(args)) + sq.Args = make([]types.NamedArg, len(args)) for i, v := range args { sq.Args[i].Name = v.Name diff --git a/client/driver.go b/client/driver.go index 274c32e70..5a789ae88 100644 --- a/client/driver.go +++ b/client/driver.go @@ -33,8 +33,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/pkg/errors" ) @@ -84,7 +84,7 @@ func (d *covenantSQLDriver) Open(dsn string) (conn driver.Conn, err error) { } // ResourceMeta defines new database resources requirement descriptions. -type ResourceMeta wt.ResourceMeta +type ResourceMeta types.ResourceMeta // Init defines init process for client. func Init(configFile string, masterKey []byte) (err error) { @@ -122,12 +122,8 @@ func Create(meta ResourceMeta) (dsn string, err error) { return } - req := new(bp.CreateDatabaseRequest) - req.Header.ResourceMeta = wt.ResourceMeta(meta) - if req.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { - err = errors.Wrap(err, "get local public key failed") - return - } + req := new(types.CreateDatabaseRequest) + req.Header.ResourceMeta = types.ResourceMeta(meta) var privateKey *asymmetric.PrivateKey if privateKey, err = kms.GetLocalPrivateKey(); err != nil { err = errors.Wrap(err, "get local private key failed") @@ -137,7 +133,7 @@ func Create(meta ResourceMeta) (dsn string, err error) { err = errors.Wrap(err, "sign request failed") return } - res := new(bp.CreateDatabaseResponse) + res := new(types.CreateDatabaseResponse) if err = requestBP(route.BPDBCreateDatabase, req, res); err != nil { err = errors.Wrap(err, "call BPDB.CreateDatabase failed") @@ -167,11 +163,8 @@ func Drop(dsn string) (err error) { return } - req := new(bp.DropDatabaseRequest) + req := new(types.DropDatabaseRequest) req.Header.DatabaseID = proto.DatabaseID(cfg.DatabaseID) - if req.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { - return - } var privateKey *asymmetric.PrivateKey if privateKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -179,7 +172,7 @@ func Drop(dsn string) (err error) { if err = req.Sign(privateKey); err != nil { return } - res := new(bp.DropDatabaseResponse) + res := new(types.DropDatabaseResponse) err = requestBP(route.BPDBDropDatabase, req, res) return @@ -343,7 +336,7 @@ func cacheGetPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers } func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *proto.Peers, err error) { - req := new(bp.GetDatabaseRequest) + req := new(types.GetDatabaseRequest) req.Header.DatabaseID = dbID defer func() { @@ -357,7 +350,7 @@ func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *pro return } - res := new(bp.GetDatabaseResponse) + res := new(types.GetDatabaseResponse) if err = requestBP(route.BPDBGetDatabase, req, res); err != nil { return } diff --git a/client/driver_test.go b/client/driver_test.go index f412b19f7..018cd9665 100644 --- a/client/driver_test.go +++ b/client/driver_test.go @@ -36,6 +36,9 @@ func TestInit(t *testing.T) { stopTestService, confDir, err = startTestService() So(err, ShouldBeNil) defer stopTestService() + // already init ed + err = Init(filepath.Join(confDir, "config.yaml"), []byte("")) + So(err, ShouldNotBeNil) // fake driver not initialized atomic.StoreUint32(&driverInitialized, 0) err = Init(filepath.Join(confDir, "config.yaml"), []byte("")) diff --git a/client/helper_test.go b/client/helper_test.go index c8cdead5a..9b4be6670 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -38,11 +38,10 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/worker" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) const ( @@ -57,7 +56,7 @@ var ( // fake BPDB service type stubBPDBService struct{} -func (s *stubBPDBService) CreateDatabase(req *bp.CreateDatabaseRequest, resp *bp.CreateDatabaseResponse) (err error) { +func (s *stubBPDBService) CreateDatabase(req *types.CreateDatabaseRequest, resp *types.CreateDatabaseResponse) (err error) { if resp.Header.InstanceMeta, err = s.getInstanceMeta(proto.DatabaseID("db")); err != nil { return } @@ -74,11 +73,11 @@ func (s *stubBPDBService) CreateDatabase(req *bp.CreateDatabaseRequest, resp *bp return } -func (s *stubBPDBService) DropDatabase(req *bp.DropDatabaseRequest, resp *bp.DropDatabaseRequest) (err error) { +func (s *stubBPDBService) DropDatabase(req *types.DropDatabaseRequest, resp *types.DropDatabaseRequest) (err error) { return } -func (s *stubBPDBService) GetDatabase(req *bp.GetDatabaseRequest, resp *bp.GetDatabaseResponse) (err error) { +func (s *stubBPDBService) GetDatabase(req *types.GetDatabaseRequest, resp *types.GetDatabaseResponse) (err error) { if resp.Header.InstanceMeta, err = s.getInstanceMeta(req.Header.DatabaseID); err != nil { return } @@ -95,8 +94,8 @@ func (s *stubBPDBService) GetDatabase(req *bp.GetDatabaseRequest, resp *bp.GetDa return } -func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitServiceResponse) (err error) { - resp.Header.Instances = make([]wt.ServiceInstance, 0) +func (s *stubBPDBService) GetNodeDatabases(req *types.InitService, resp *types.InitServiceResponse) (err error) { + resp.Header.Instances = make([]types.ServiceInstance, 0) if resp.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { return } @@ -110,7 +109,7 @@ func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitSer return } -func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { +func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return @@ -187,10 +186,10 @@ func startTestService() (stopTestService func(), tempDir string, err error) { } // add database - var req *wt.UpdateService - var res wt.UpdateServiceResponse + var req *types.UpdateService + var res types.UpdateServiceResponse var peers *proto.Peers - var block *ct.Block + var block *types.Block dbID := proto.DatabaseID("db") @@ -203,9 +202,9 @@ func startTestService() (stopTestService func(), tempDir string, err error) { } // build create database request - req = new(wt.UpdateService) - req.Header.Op = wt.CreateDB - req.Header.Instance = wt.ServiceInstance{ + req = new(types.UpdateService) + req.Header.Op = types.CreateDB + req.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, Peers: peers, GenesisBlock: block, @@ -306,7 +305,7 @@ func initNode() (cleanupFunc func(), tempDir string, server *rpc.Server, err err } // copied from sqlchain.xxx_test. -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -317,9 +316,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: rootHash, @@ -327,12 +326,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error Timestamp: time.Now().UTC(), }, }, - Queries: make([]*hash.Hash, rand.Intn(10)+10), - } - - for i := range b.Queries { - b.Queries[i] = new(hash.Hash) - rand.Read(b.Queries[i][:]) } if isGenesis { diff --git a/client/result_test.go b/client/result_test.go new file mode 100644 index 000000000..be6975fcc --- /dev/null +++ b/client/result_test.go @@ -0,0 +1,39 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package client + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestExecResult(t *testing.T) { + Convey("test result", t, func() { + r := &execResult{ + affectedRows: 1, + lastInsertID: 2, + } + + i, err := r.LastInsertId() + So(i, ShouldEqual, 2) + So(err, ShouldBeNil) + i, err = r.RowsAffected() + So(i, ShouldEqual, 1) + So(err, ShouldBeNil) + }) +} diff --git a/client/rows.go b/client/rows.go index cd599f146..dcacc2c06 100644 --- a/client/rows.go +++ b/client/rows.go @@ -21,16 +21,16 @@ import ( "io" "strings" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" ) type rows struct { columns []string types []string - data []wt.ResponseRow + data []types.ResponseRow } -func newRows(res *wt.Response) *rows { +func newRows(res *types.Response) *rows { return &rows{ columns: res.Payload.Columns, types: res.Payload.DeclTypes, diff --git a/client/rows_test.go b/client/rows_test.go index 80795c7e8..15602b4eb 100644 --- a/client/rows_test.go +++ b/client/rows_test.go @@ -15,3 +15,45 @@ */ package client + +import ( + "database/sql/driver" + "io" + "testing" + + "github.com/CovenantSQL/CovenantSQL/types" + . "github.com/smartystreets/goconvey/convey" +) + +func TestRowsStructure(t *testing.T) { + Convey("test rows", t, func() { + r := newRows(&types.Response{ + Payload: types.ResponsePayload{ + Columns: []string{ + "a", + }, + DeclTypes: []string{ + "int", + }, + Rows: []types.ResponseRow{ + { + Values: []interface{}{1}, + }, + }, + }, + }) + columns := r.Columns() + So(columns, ShouldResemble, []string{"a"}) + So(r.ColumnTypeDatabaseTypeName(0), ShouldEqual, "INT") + + dest := make([]driver.Value, 1) + err := r.Next(dest) + So(err, ShouldBeNil) + So(dest[0], ShouldEqual, 1) + err = r.Next(dest) + So(err, ShouldEqual, io.EOF) + err = r.Close() + So(err, ShouldBeNil) + So(r.data, ShouldBeNil) + }) +} diff --git a/client/stmt_test.go b/client/stmt_test.go index 4672a9a92..a3aebb0fa 100644 --- a/client/stmt_test.go +++ b/client/stmt_test.go @@ -17,7 +17,10 @@ package client import ( + "context" "database/sql" + "database/sql/driver" + "fmt" "testing" . "github.com/smartystreets/goconvey/convey" @@ -95,10 +98,84 @@ func TestStmt(t *testing.T) { _, err = stmt.Exec() So(err, ShouldNotBeNil) + ctx := context.Background() + err = ExecuteTx(ctx, db, nil /* txopts */, func(tx *sql.Tx) error { + _, err := tx.Exec("insert into test values(?)", 7) + if err != nil { + return err + } + _, err = tx.Exec("insert into test values(?)", 8) + if err != nil { + return err + } + _, err = tx.Exec("insert into test values(?)", 9) + if err != nil { + return err + } + return err + }) + So(err, ShouldBeNil) + + row = db.QueryRow("select count(1) as cnt from test") + So(row, ShouldNotBeNil) + err = row.Scan(&result) + So(err, ShouldBeNil) + So(result, ShouldEqual, 6) + + err = ExecuteTx(ctx, db, nil /* txopts */, func(tx *sql.Tx) error { + _, err := tx.Exec("insert into test values(?)", 10) + if err != nil { + return err + } + _, err = tx.Exec("insert into testNoExist values(?)", 11) + if err != nil { + return err + } + _, err = tx.Exec("insert into test values(?)", 12) + if err != nil { + return err + } + return err + }) + So(err, ShouldNotBeNil) + + err = ExecuteTx(ctx, db, nil /* txopts */, func(tx *sql.Tx) error { + _, err := tx.Exec("insert into test values(?)", 10) + if err != nil { + return err + } + return fmt.Errorf("some error") + }) + So(err, ShouldNotBeNil) + + row = db.QueryRow("select count(1) as cnt from test") + So(row, ShouldNotBeNil) + err = row.Scan(&result) + So(err, ShouldBeNil) + So(result, ShouldEqual, 6) + db.Close() // prepare on closed _, err = db.Prepare("select * from test") So(err, ShouldNotBeNil) + + err = ExecuteTx(nil, db, nil /* txopts */, func(tx *sql.Tx) error { + return nil + }) + So(err, ShouldNotBeNil) + + // closed stmt and old args + cs := newStmt(nil, "test query") + cs.Close() + + _, err = cs.Query([]driver.Value{1}) + So(err, ShouldNotBeNil) + + _, err = cs.Exec([]driver.Value{2}) + err = ExecuteTx(nil, db, nil /* txopts */, func(tx *sql.Tx) error { + return nil + }) + So(err, ShouldNotBeNil) }) } diff --git a/client/tx.go b/client/tx.go new file mode 100644 index 000000000..047d1ccfe --- /dev/null +++ b/client/tx.go @@ -0,0 +1,50 @@ +/* + * Copyright 2016 The Cockroach Authors. + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package client + +import ( + "context" + "database/sql" + "database/sql/driver" +) + +// ExecuteTx starts a transaction, and runs fn in it +func ExecuteTx( + ctx context.Context, db *sql.DB, txopts *sql.TxOptions, fn func(*sql.Tx) error, +) error { + // Start a transaction. + tx, err := db.BeginTx(ctx, txopts) + if err != nil { + return err + } + return ExecuteInTx(tx, func() error { return fn(tx) }) +} + +// ExecuteInTx runs fn inside tx which should already have begun. +func ExecuteInTx(tx driver.Tx, fn func() error) (err error) { + err = fn() + if err == nil { + // Ignore commit errors. The tx has already been committed by RELEASE. + err = tx.Commit() + } else { + // We always need to execute a Rollback() so sql.DB releases the + // connection. + _ = tx.Rollback() + } + return +} diff --git a/cmd/cql-adapter/storage/covenantsql.go b/cmd/cql-adapter/storage/covenantsql.go index e32f0a7ad..9750c891c 100644 --- a/cmd/cql-adapter/storage/covenantsql.go +++ b/cmd/cql-adapter/storage/covenantsql.go @@ -102,8 +102,10 @@ func (s *CovenantSQLStorage) Exec(dbID string, query string) (affectedRows int64 var result sql.Result result, err = conn.Exec(query) - affectedRows, _ = result.RowsAffected() - lastInsertID, _ = result.LastInsertId() + if err == nil { + affectedRows, _ = result.RowsAffected() + lastInsertID, _ = result.LastInsertId() + } return } diff --git a/cmd/cql-explorer/service.go b/cmd/cql-explorer/service.go index 0ccda7ff3..d77181285 100644 --- a/cmd/cql-explorer/service.go +++ b/cmd/cql-explorer/service.go @@ -215,7 +215,7 @@ func (s *Service) getTxByHash(h *hash.Hash) (tx pi.Transaction, c uint32, height continue } - if curH := curTx.GetHash(); h.IsEqual(&curH) { + if curH := curTx.Hash(); h.IsEqual(&curH) { tx = curTx break } @@ -367,7 +367,7 @@ func (s *Service) saveTransaction(c uint32, tx pi.Transaction) (err error) { return ErrNilTransaction } - txHash := tx.GetHash() + txHash := tx.Hash() var txKey []byte diff --git a/cmd/cql-fuse/block.go b/cmd/cql-fuse/block.go new file mode 100644 index 000000000..ceb421336 --- /dev/null +++ b/cmd/cql-fuse/block.go @@ -0,0 +1,361 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "fmt" + "strings" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +// BlockSize is the size of each data block. It must not +// change throughout the lifetime of the filesystem. +const BlockSize = 4 << 10 // 4KB + +func min(a, b uint64) uint64 { + if a < b { + return a + } + return b +} + +// blockRange describes a range of blocks. +// If the first and last block are the same, the effective data range +// will be: [startOffset, lastLength) +type blockRange struct { + start int // index of the start block + startOffset uint64 // starting offset within the first block + startLength uint64 // length of data in first block + last int // index of the last block + lastLength uint64 // length of the last block +} + +// newBlockRange returns the block range for 'size' bytes from 'from'. +func newBlockRange(from, length uint64) blockRange { + end := from + length + return blockRange{ + start: int(from / BlockSize), + startOffset: from % BlockSize, + startLength: min(length, BlockSize-(from%BlockSize)), + last: int(end / BlockSize), + lastLength: end % BlockSize, + } +} + +// shrink resizes the data to a smaller length. +// Requirement: from > to. +// If truncates are done on block boundaries, this is reasonably +// efficient. However, if truncating in the middle of a block, +// we need to fetch the block first, truncate it, and write it again. +func shrink(e sqlExecutor, inodeID, from, to uint64) error { + delRange := newBlockRange(to, from-to) + deleteFrom := delRange.start + + if delRange.startOffset > 0 { + // We're truncating in the middle of a block, fetch it, truncate its + // data, and write it again. + // TODO(marc): this would be more efficient if we had LEFT for bytes. + data, err := getBlockData(e, inodeID, delRange.start) + if err != nil { + return err + } + data = data[:delRange.startOffset] + if err := updateBlockData(e, inodeID, delRange.start, data); err != nil { + return err + } + // We don't need to delete this block. + deleteFrom++ + } + + deleteTo := delRange.last + if delRange.lastLength == 0 { + // The last block did not previously exist. + deleteTo-- + } + if deleteTo < deleteFrom { + return nil + } + + // There is something to delete. + // TODO(marc): would it be better to pass the block IDs? + delStmt := `DELETE FROM fs_BLOCK WHERE id = ? AND block >= ?` + if _, err := e.Exec(delStmt, inodeID, deleteFrom); err != nil { + return err + } + + return nil +} + +// grow resizes the data to a larger length. +// Requirement: to > from. +// If the file ended in a partial block, we fetch it, grow it, +// and write it back. +func grow(e sqlExecutor, inodeID, from, to uint64) error { + addRange := newBlockRange(from, to-from) + insertFrom := addRange.start + + if addRange.startOffset > 0 { + // We need to extend the original 'last block'. + // Fetch it, grow it, and update it. + // TODO(marc): this would be more efficient if we had RPAD for bytes. + data, err := getBlockData(e, inodeID, addRange.start) + if err != nil { + return err + } + data = append(data, make([]byte, addRange.startLength, addRange.startLength)...) + if err := updateBlockData(e, inodeID, addRange.start, data); err != nil { + return err + } + // We don't need to insert this block. + insertFrom++ + } + + insertTo := addRange.last + if insertTo < insertFrom { + return nil + } + + // Build the sql statement and blocks to insert. + // We don't share this functionality with 'write' because we can repeat empty blocks. + // This would be shorter if we weren't trying to be efficient. + // TODO(marc): this would also be better if we supported sparse files. + paramStrings := []string{} + params := []interface{}{} + count := 1 // placeholder count starts at 1. + if insertFrom != insertTo { + // We have full blocks. Only send a full block once. + for i := insertFrom; i < insertTo; i++ { + params = append(params, make([]byte, BlockSize, BlockSize)) + } + count++ + } + + // Go over all blocks that are certainly full. + for i := insertFrom; i < insertTo; i++ { + paramStrings = append(paramStrings, fmt.Sprintf("(%d, %d, ?)", inodeID, i)) + } + + // Check the last block. + if addRange.lastLength > 0 { + // Not empty, write it. It can't be a full block, because we + // would have an empty block right after. + params = append(params, make([]byte, addRange.lastLength, addRange.lastLength)) + paramStrings = append(paramStrings, fmt.Sprintf("(%d, %d, ?)", + inodeID, addRange.last)) + count++ + } + + if len(paramStrings) == 0 { + // We had only one block, and it was empty. Nothing do to. + return nil + } + + insStmt := fmt.Sprintf(`INSERT INTO fs_block VALUES %s`, strings.Join(paramStrings, ",")) + if _, err := e.Exec(insStmt, params...); err != nil { + return err + } + + return nil +} + +// read returns the data [from, to). +// Requires: to > from and [to, from) is contained in the file. +func read(e sqlExecutor, inodeID, from, to uint64) ([]byte, error) { + readRange := newBlockRange(from, to-from) + end := readRange.last + if readRange.lastLength == 0 { + end-- + } + + blockInfos, err := getBlocksBetween(e, inodeID, readRange.start, end) + if err != nil { + return nil, err + } + if len(blockInfos) != end-readRange.start+1 { + return nil, fmt.Errorf("wrong number of blocks, asked for [%d-%d], got %d back", + readRange.start, end, len(blockInfos)) + } + + if readRange.lastLength != 0 { + // We have a last partial block, truncate it. + last := len(blockInfos) - 1 + blockInfos[last].data = blockInfos[last].data[:readRange.lastLength] + } + blockInfos[0].data = blockInfos[0].data[readRange.startOffset:] + + var data []byte + for _, b := range blockInfos { + data = append(data, b.data...) + } + + return data, nil +} + +// write commits data to the blocks starting at 'offset' +// Amount of data to write must be non-zero. +// If offset is greated than 'originalSize', the file is grown first. +// We always write all or nothing. +func write(e sqlExecutor, inodeID, originalSize, offset uint64, data []byte) error { + if offset > originalSize { + diff := offset - originalSize + if diff > BlockSize*2 { + // we need to grow the file by at least two blocks. Use growing method + // which only sends empty blocks once. + if err := grow(e, inodeID, originalSize, offset); err != nil { + return err + } + originalSize = offset + } else if diff > 0 { + // don't grow the file first, just change what we need to write. + data = append(make([]byte, diff, diff), data...) + offset = originalSize + } + } + + // Now we know that offset is <= originalSize. + writeRange := newBlockRange(offset, uint64(len(data))) + writeFrom := writeRange.start + + if writeRange.startOffset > 0 { + // We're partially overwriting a block (this includes appending + // to the last block): fetch it, grow it, and update it. + // TODO(marc): this would be more efficient if we had RPAD for bytes. + blockData, err := getBlockData(e, inodeID, writeRange.start) + if err != nil { + return err + } + blockData = append(blockData[:writeRange.startOffset], data[:writeRange.startLength]...) + data = data[writeRange.startLength:] + if err := updateBlockData(e, inodeID, writeRange.start, blockData); err != nil { + return err + } + // We don't need to insert this block. + writeFrom++ + } + + writeTo := writeRange.last + if writeRange.lastLength == 0 { + // Last block is empty, don't update/insert it. + writeTo-- + } + if writeTo < writeFrom { + return nil + } + + // Figure out last existing block. Needed to tell the difference + // between insert and update. + lastBlock := int(originalSize / BlockSize) + if originalSize%BlockSize == 0 { + // Empty blocks do not exist (size=0 -> lastblock=-1). + lastBlock-- + } + + // Process updates first. + for i := writeFrom; i <= writeTo; i++ { + if i > lastBlock { + // We've reached the end of existing blocks, no more UPDATE. + break + } + if len(data) == 0 { + panic(fmt.Sprintf("reached end of data, but still have %d blocks to write", + writeTo-i)) + } + toWrite := min(BlockSize, uint64(len(data))) + blockData := data[:toWrite] + data = data[toWrite:] + if toWrite != BlockSize { + // This is the last block, and it's partial, fetch the original + // data from this block and append. + // TODO(marc): we could fetch this at the same time as the first + // partial block, if any. This would make overwriting in the middle + // of the file on non-block boundaries a bit more efficient. + origData, err := getBlockData(e, inodeID, i) + if err != nil { + return err + } + toWrite = min(toWrite, uint64(len(origData))) + blockData = append(blockData, origData[toWrite:]...) + } + // TODO(marc): is there a way to do batch updates? + if err := updateBlockData(e, inodeID, i, blockData); err != nil { + return err + } + } + + if len(data) == 0 { + return nil + } + + paramStrings := []string{} + params := []interface{}{} + count := 1 // placeholder count starts at 1. + + for i := lastBlock + 1; i <= writeTo; i++ { + if len(data) == 0 { + panic(fmt.Sprintf("reached end of data, but still have %d blocks to write", + writeTo-i)) + } + toWrite := min(BlockSize, uint64(len(data))) + blockData := data[:toWrite] + data = data[toWrite:] + paramStrings = append(paramStrings, fmt.Sprintf("(%d, %d, ?)", + inodeID, i)) + params = append(params, blockData) + count++ + } + + if len(data) != 0 { + panic(fmt.Sprintf("processed all blocks, but still have %d of data to write", len(data))) + } + + insStmt := fmt.Sprintf(`INSERT INTO fs_block VALUES %s`, strings.Join(paramStrings, ",")) + log.Warn(insStmt, params) + if _, err := e.Exec(insStmt, params...); err != nil { + return err + } + + return nil +} + +// resize changes the size of the data for the inode with id 'inodeID' +// from 'from' to 'to'. This may grow or shrink. +func resizeBlocks(e sqlExecutor, inodeID, from, to uint64) error { + if to < from { + return shrink(e, inodeID, from, to) + } else if to > from { + return grow(e, inodeID, from, to) + } + return nil +} diff --git a/cmd/cql-fuse/block_test.go b/cmd/cql-fuse/block_test.go new file mode 100644 index 000000000..13ec53664 --- /dev/null +++ b/cmd/cql-fuse/block_test.go @@ -0,0 +1,540 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "bytes" + "context" + "database/sql" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "sync" + "syscall" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +var ( + baseDir = utils.GetProjectSrcDir() + testWorkingDir = FJ(baseDir, "./test/") + logDir = FJ(testWorkingDir, "./log/") + db *sql.DB +) + +var nodeCmds []*utils.CMD + +var FJ = filepath.Join + +func TestMain(m *testing.M) { + os.Exit(func() int { + var stop func() + db, stop = initTestDB() + defer stop() + defer db.Close() + return m.Run() + }()) +} + +func startNodes() { + ctx := context.Background() + + // wait for ports to be available + var err error + + err = utils.WaitForPorts(ctx, "127.0.0.1", []int{ + 6122, + 6121, + 6120, + }, time.Millisecond*200) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + // start 3bps + var cmd *utils.CMD + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_0/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/leader.cover.out"), + }, + "leader", testWorkingDir, logDir, true, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_1/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/follower1.cover.out"), + }, + "follower1", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_2/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/follower2.cover.out"), + }, + "follower2", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + err = utils.WaitToConnect(ctx, "127.0.0.1", []int{ + 6122, + 6121, + 6120, + }, time.Second) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + err = utils.WaitForPorts(ctx, "127.0.0.1", []int{ + 3144, + 3145, + 3146, + }, time.Millisecond*200) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + time.Sleep(10 * time.Second) + + // start 3miners + os.RemoveAll(FJ(testWorkingDir, "./fuse/node_miner_0/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_miner_0/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/miner0.cover.out"), + }, + "miner0", testWorkingDir, logDir, true, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + os.RemoveAll(FJ(testWorkingDir, "./fuse/node_miner_1/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_miner_1/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/miner1.cover.out"), + }, + "miner1", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + os.RemoveAll(FJ(testWorkingDir, "./fuse/node_miner_2/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./fuse/node_miner_2/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql-fuse/miner2.cover.out"), + }, + "miner2", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } +} + +func stopNodes() { + var wg sync.WaitGroup + testDir := FJ(testWorkingDir, "./fuse") + for _, nodeCmd := range nodeCmds { + wg.Add(1) + go func(thisCmd *utils.CMD) { + defer wg.Done() + thisCmd.Cmd.Process.Signal(syscall.SIGTERM) + thisCmd.Cmd.Wait() + grepRace := exec.Command("/bin/sh", "-c", "grep -A 50 'DATA RACE' "+thisCmd.LogPath) + out, _ := grepRace.Output() + if len(out) > 2 { + log.Fatalf("DATA RACE in %s :\n%s", thisCmd.Cmd.Path, string(out)) + } + }(nodeCmd) + } + + wg.Wait() + cmd := exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.db' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.db-shm' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.db-wal' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name 'db.meta' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name 'public.keystore' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.public.keystore' -exec rm -vf {} \;`, testDir)) + cmd.Run() + cmd = exec.Command("/bin/sh", "-c", fmt.Sprintf(`cd %s && find . -name '*.ldb' -exec rm -vrf {} \;`, testDir)) + cmd.Run() +} + +func initTestDB() (*sql.DB, func()) { + + startNodes() + var err error + + time.Sleep(10 * time.Second) + + err = client.Init(FJ(testWorkingDir, "./fuse/node_c/config.yaml"), []byte("")) + if err != nil { + log.Errorf("init client failed: %v", err) + return nil, stopNodes + } + + // create + dsn, err := client.Create(client.ResourceMeta{Node: 1}) + if err != nil { + log.Errorf("create db failed: %v", err) + return nil, stopNodes + } + + log.Infof("the created database dsn is %v", dsn) + + db, err := sql.Open("covenantsql", dsn) + if err != nil { + log.Errorf("open db failed: %v", err) + return nil, stopNodes + } + + if err := initSchema(db); err != nil { + stopNodes() + log.Fatal(err) + } + + return db, stopNodes +} + +func getAllBlocks(db *sql.DB, inode uint64) ([]byte, error) { + blocks, err := getBlocks(db, inode) + if err != nil { + return nil, err + } + num := len(blocks) + var data []byte + for i, b := range blocks { + if i != b.block { + // We can't have missing blocks. + return nil, fmt.Errorf("gap in block list, found block %d at index %d", b.block, i) + } + bl := uint64(len(b.data)) + if bl == 0 { + return nil, fmt.Errorf("empty block found at %d (out of %d blocks)", i, num) + } + if i != (num-1) && bl != BlockSize { + return nil, fmt.Errorf("non-blocksize %d at %d (out of %d blocks)", bl, i, num) + } + data = append(data, b.data...) + } + return data, nil +} + +func TestBlockInfo(t *testing.T) { + testCases := []struct { + start, length uint64 + expected blockRange + }{ + {0, 0, blockRange{0, 0, 0, 0, 0}}, + {0, BlockSize * 4, blockRange{0, 0, BlockSize, 4, 0}}, + {0, BlockSize*4 + 500, blockRange{0, 0, BlockSize, 4, 500}}, + {500, BlockSize * 4, blockRange{0, 500, BlockSize - 500, 4, 500}}, + {BlockSize, BlockSize * 4, blockRange{1, 0, BlockSize, 5, 0}}, + {BlockSize, 500, blockRange{1, 0, 500, 1, 500}}, + {500, 1000, blockRange{0, 500, 1000, 0, 1500}}, + } + + for tcNum, tc := range testCases { + actual := newBlockRange(tc.start, tc.length) + if !reflect.DeepEqual(actual, tc.expected) { + t.Errorf("#%d: expected:\n%+v\ngot:\n%+v", tcNum, tc.expected, actual) + } + } +} + +func tryGrow(db *sql.DB, data []byte, id, newSize uint64) ([]byte, error) { + originalSize := uint64(len(data)) + data = append(data, make([]byte, newSize-originalSize)...) + if err := grow(db, id, originalSize, newSize); err != nil { + return nil, err + } + newData, err := getAllBlocks(db, id) + if err != nil { + return nil, err + } + if uint64(len(newData)) != newSize { + return nil, fmt.Errorf("getAllBlocks lengths don't match: got %d, expected %d", len(newData), newSize) + } + if !bytes.Equal(data, newData) { + return nil, fmt.Errorf("getAllBlocks data doesn't match") + } + + if newSize == 0 { + return newData, nil + } + + // Check the read as well. + newData, err = read(db, id, 0, newSize) + if err != nil { + return nil, err + } + + if uint64(len(newData)) != newSize { + return nil, fmt.Errorf("read lengths don't match: got %d, expected %d", len(newData), newSize) + } + if !bytes.Equal(data, newData) { + return nil, fmt.Errorf("read data doesn't match") + } + + return newData, nil +} + +func tryShrink(db *sql.DB, data []byte, id, newSize uint64) ([]byte, error) { + originalSize := uint64(len(data)) + data = data[:newSize] + if err := shrink(db, id, originalSize, newSize); err != nil { + return nil, err + } + newData, err := getAllBlocks(db, id) + if err != nil { + return nil, err + } + if uint64(len(newData)) != newSize { + return nil, fmt.Errorf("getAllData lengths don't match: got %d, expected %d", len(newData), newSize) + } + if !bytes.Equal(data, newData) { + return nil, fmt.Errorf("getAllData data doesn't match") + } + + if newSize == 0 { + return newData, nil + } + + // Check the read as well. + newData, err = read(db, id, 0, newSize) + if err != nil { + return nil, err + } + + if uint64(len(newData)) != newSize { + return nil, fmt.Errorf("read lengths don't match: got %d, expected %d", len(newData), newSize) + } + if !bytes.Equal(data, newData) { + return nil, fmt.Errorf("read data doesn't match") + } + + return newData, nil +} + +func TestShrinkGrow(t *testing.T) { + + id := uint64(10) + + var err error + data := []byte{} + + if data, err = tryGrow(db, data, id, BlockSize*4+500); err != nil { + log.Fatal(err) + } + if data, err = tryGrow(db, data, id, BlockSize*4+600); err != nil { + log.Fatal(err) + } + if data, err = tryGrow(db, data, id, BlockSize*5); err != nil { + log.Fatal(err) + } + + // Shrink it down to 0. + if data, err = tryShrink(db, data, id, 0); err != nil { + log.Fatal(err) + } + if data, err = tryGrow(db, data, id, BlockSize*3+500); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, BlockSize*3+300); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, BlockSize*3); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, 0); err != nil { + log.Fatal(err) + } + if data, err = tryGrow(db, data, id, BlockSize); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, BlockSize-200); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, BlockSize-500); err != nil { + log.Fatal(err) + } + if data, err = tryShrink(db, data, id, 0); err != nil { + log.Fatal(err) + } +} + +func TestReadWriteBlocks(t *testing.T) { + + id := uint64(10) + rng, _ := NewPseudoRand() + length := BlockSize*3 + 500 + part1 := RandBytes(rng, length) + + if err := write(db, id, 0, 0, part1); err != nil { + log.Fatal(err) + } + + readData, err := read(db, id, 0, uint64(length)) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(part1, readData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(readData), len(part1)) + } + + verboseData, err := getAllBlocks(db, id) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(verboseData, part1) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(verboseData), len(part1)) + } + + // Write with hole in the middle. + part2 := make([]byte, BlockSize*2+250, BlockSize*2+250) + fullData := append(part1, part2...) + part3 := RandBytes(rng, BlockSize+123) + if err := write(db, id, uint64(len(part1)), uint64(len(fullData)), part3); err != nil { + log.Fatal(err) + } + fullData = append(fullData, part3...) + readData, err = read(db, id, 0, uint64(len(fullData))) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(fullData, readData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(readData), len(fullData)) + } + + verboseData, err = getAllBlocks(db, id) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(verboseData, fullData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(verboseData), len(fullData)) + } + + // Now write into the middle of the file. + part2 = RandBytes(rng, len(part2)) + if err := write(db, id, uint64(len(fullData)), uint64(len(part1)), part2); err != nil { + log.Fatal(err) + } + fullData = append(part1, part2...) + fullData = append(fullData, part3...) + readData, err = read(db, id, 0, uint64(len(fullData))) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(fullData, readData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(readData), len(fullData)) + } + + verboseData, err = getAllBlocks(db, id) + if err != nil { + log.Fatal(err) + } + if !bytes.Equal(verboseData, fullData) { + t.Errorf("Bytes differ. lengths: %d, expected %d", len(verboseData), len(fullData)) + } + + // New file. + id2 := uint64(20) + if err := write(db, id2, 0, 0, []byte("1")); err != nil { + log.Fatal(err) + } + readData, err = read(db, id2, 0, 1) + if err != nil { + log.Fatal(err) + } + if string(readData) != "1" { + log.Fatalf("mismatch: %s", readData) + } + + if err := write(db, id2, 1, 0, []byte("22")); err != nil { + log.Fatal(err) + } + readData, err = read(db, id2, 0, 2) + if err != nil { + log.Fatal(err) + } + if string(readData) != "22" { + log.Fatalf("mismatch: %s", readData) + } + + id3 := uint64(30) + part1 = RandBytes(rng, BlockSize) + // Write 5 blocks. + var offset uint64 + for i := 0; i < 5; i++ { + if err := write(db, id3, offset, offset, part1); err != nil { + log.Fatal(err) + } + offset += BlockSize + } +} diff --git a/cmd/cql-fuse/fs.go b/cmd/cql-fuse/fs.go new file mode 100644 index 000000000..82d9e1166 --- /dev/null +++ b/cmd/cql-fuse/fs.go @@ -0,0 +1,319 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "context" + "database/sql" + "os" + "syscall" + "time" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" +) + +const rootNodeID = 1 + +const ( + fsSchema = ` +CREATE TABLE IF NOT EXISTS fs_namespace ( + parentID INT, + name STRING, + id INT, + PRIMARY KEY (parentID, name) +); + +CREATE TABLE IF NOT EXISTS fs_inode ( + id INT PRIMARY KEY, + inode STRING +); + +CREATE TABLE IF NOT EXISTS fs_block ( + id INT, + block INT, + data BYTES, + PRIMARY KEY (id, block) +); +` +) + +var _ fs.FS = &CFS{} // Root +var _ fs.FSInodeGenerator = &CFS{} // GenerateInode + +// CFS implements a filesystem on top of cockroach. +type CFS struct { + db *sql.DB +} + +func initSchema(db *sql.DB) error { + _, err := db.Exec(fsSchema) + return err +} + +// create inserts a new node. +// parentID: inode ID of the parent directory. +// name: name of the new node +// node: new node +func (cfs CFS) create(ctx context.Context, parentID uint64, name string, node *Node) error { + inode := node.toJSON() + const insertNode = `INSERT INTO fs_inode VALUES (?, ?)` + const insertNamespace = `INSERT INTO fs_namespace VALUES (?, ?, ?)` + + err := client.ExecuteTx(ctx, cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + if _, err := tx.Exec(insertNode, node.ID, inode); err != nil { + return err + } + if _, err := tx.Exec(insertNamespace, parentID, name, node.ID); err != nil { + return err + } + return nil + }) + return err +} + +// remove removes a node give its name and its parent ID. +// If 'checkChildren' is true, fails if the node has children. +func (cfs CFS) remove(ctx context.Context, parentID uint64, name string, checkChildren bool) error { + const lookupSQL = `SELECT id FROM fs_namespace WHERE (parentID, name) = (?, ?)` + const deleteNamespace = `DELETE FROM fs_namespace WHERE (parentID, name) = (?, ?)` + const deleteInode = `DELETE FROM fs_inode WHERE id = ?` + const deleteBlock = `DELETE FROM fs_block WHERE id = ?` + // Start by looking up the node ID. + var id uint64 + if err := cfs.db.QueryRow(lookupSQL, parentID, name).Scan(&id); err != nil { + return err + } + // Check if there are any children. + if checkChildren { + if err := checkIsEmpty(cfs.db, id); err != nil { + return err + } + } + + err := client.ExecuteTx(ctx, cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + // Delete all entries. + if _, err := tx.Exec(deleteNamespace, parentID, name); err != nil { + return err + } + if _, err := tx.Exec(deleteInode, id); err != nil { + return err + } + if _, err := tx.Exec(deleteBlock, id); err != nil { + return err + } + return nil + }) + return err +} + +func (cfs CFS) lookup(parentID uint64, name string) (*Node, error) { + return getInode(cfs.db, parentID, name) +} + +// list returns the children of the node with id 'parentID'. +// Dirent consists of: +// Inode uint64 +// Type DirentType (optional) +// Name string +// TODO(pmattis): lookup all inodes and fill in the type, this will save a Getattr(). +func (cfs CFS) list(parentID uint64) ([]fuse.Dirent, error) { + rows, err := cfs.db.Query(`SELECT name, id FROM fs_namespace WHERE parentID = ?`, parentID) + if err != nil { + return nil, err + } + + var results []fuse.Dirent + for rows.Next() { + dirent := fuse.Dirent{Type: fuse.DT_Unknown} + if err := rows.Scan(&dirent.Name, &dirent.Inode); err != nil { + return nil, err + } + results = append(results, dirent) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return results, nil +} + +// validateRename takes a source and destination node and verifies that +// a rename can be performed from source to destination. +// source must not be nil. destination can be. +func validateRename(e sqlExecutor, source, destination *Node) error { + if destination == nil { + // No object at destination: good. + return nil + } + + if source.isDir() { + if destination.isDir() { + // Both are directories: destination must be empty + return checkIsEmpty(e, destination.ID) + } + // directory -> file: not allowed. + return fuse.Errno(syscall.ENOTDIR) + } + + // Source is a file. + if destination.isDir() { + // file -> directory: not allowed. + return fuse.Errno(syscall.EISDIR) + } + return nil +} + +// rename moves 'oldParentID/oldName' to 'newParentID/newName'. +// If 'newParentID/newName' already exists, it is deleted. +// See NOTE on node.go:Rename. +func (cfs CFS) rename( + ctx context.Context, oldParentID, newParentID uint64, oldName, newName string, +) error { + if oldParentID == newParentID && oldName == newName { + return nil + } + + const deleteNamespace = `DELETE FROM fs_namespace WHERE (parentID, name) = (?, ?)` + const insertNamespace = `INSERT INTO fs_namespace VALUES (?, ?, ?)` + const updateNamespace = `UPDATE fs_namespace SET id = ? WHERE (parentID, name) = (?, ?)` + const deleteInode = `DELETE FROM fs_inode WHERE id = ?` + + // Lookup source inode. + srcObject, err := getInode(cfs.db, oldParentID, oldName) + if err != nil { + return err + } + + // Lookup destination inode. + destObject, err := getInode(cfs.db, newParentID, newName) + if err != nil && err != sql.ErrNoRows { + return err + } + + // Check that the rename is allowed. + if err := validateRename(cfs.db, srcObject, destObject); err != nil { + return err + } + + err = client.ExecuteTx(ctx, cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + // At this point we know the following: + // - srcObject is not nil + // - destObject may be nil. If not, its inode can be deleted. + if destObject == nil { + // No new object: use INSERT. + if _, err := tx.Exec(deleteNamespace, oldParentID, oldName); err != nil { + return err + } + + if _, err := tx.Exec(insertNamespace, newParentID, newName, srcObject.ID); err != nil { + return err + } + } else { + // Destination exists. + if _, err := tx.Exec(deleteNamespace, oldParentID, oldName); err != nil { + return err + } + + if _, err := tx.Exec(updateNamespace, srcObject.ID, newParentID, newName); err != nil { + return err + } + + if _, err := tx.Exec(deleteInode, destObject.ID); err != nil { + return err + } + } + return nil + }) + return err +} + +// Root returns the filesystem's root node. +// This node is special: it has a fixed ID and is not persisted. +func (cfs CFS) Root() (fs.Node, error) { + return &Node{cfs: cfs, ID: rootNodeID, Mode: os.ModeDir | defaultPerms}, nil +} + +// GenerateInode returns a new inode ID. +func (cfs CFS) GenerateInode(parentInode uint64, name string) uint64 { + return cfs.newUniqueID() +} + +func (cfs CFS) newUniqueID() (id uint64) { + // cockroach's unique_rowid() Contains time and space (node ID) components + // https://www.cockroachlabs.com/docs/stable/sql-faqs.html#\ + // what-are-the-differences-between-uuid-sequences-and-unique_rowid + // So, we just build one in the same way. + var idRand uint32 + nodeIDBytes, err := kms.GetLocalNodeIDBytes() + if err == nil { + idRand = hash.FNVHash32uint(nodeIDBytes) + } + return uint64(time.Now().UnixNano()) + uint64(idRand)<<32 + //if err := cfs.db.QueryRow(`SELECT unique_rowid()`).Scan(&id); err != nil { + // panic(err) + //} + //return +} + +// newFileNode returns a new node struct corresponding to a file. +func (cfs CFS) newFileNode() *Node { + return &Node{ + cfs: cfs, + ID: cfs.newUniqueID(), + Mode: defaultPerms, + } +} + +// newDirNode returns a new node struct corresponding to a directory. +func (cfs CFS) newDirNode() *Node { + return &Node{ + cfs: cfs, + ID: cfs.newUniqueID(), + Mode: os.ModeDir | defaultPerms, + } +} + +// newSymlinkNode returns a new node struct corresponding to a symlink. +func (cfs CFS) newSymlinkNode() *Node { + return &Node{ + cfs: cfs, + ID: cfs.newUniqueID(), + // Symlinks don't have permissions, allow all. + Mode: os.ModeSymlink | allPerms, + } +} diff --git a/cmd/cql-fuse/main.go b/cmd/cql-fuse/main.go new file mode 100644 index 000000000..6f0f9f4b5 --- /dev/null +++ b/cmd/cql-fuse/main.go @@ -0,0 +1,160 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +// This is a simple fuse filesystem that stores all metadata and data +// in cockroach. +// +// Inode relationships are stored in the `namespace` table, and inodes +// themselves in the `inode` table. +// +// Data blocks are stored in the `block` table, indexed by inode ID +// and block number. +// +// Basic functionality is implemented, including: +// - mk/rm directory +// - create/rm files +// - read/write files +// - rename +// - symlinks +// +// WARNING: concurrent access on a single mount is fine. However, +// behavior is undefined (read broken) when mounted more than once at the +// same time. Specifically, read/writes will not be seen right away and +// may work on out of date information. +// +// One caveat of the implemented features is that handles are not +// reference counted so if an inode is deleted, all open file descriptors +// pointing to it become invalid. +// +// Some TODOs (definitely not a comprehensive list): +// - support basic attributes (mode, timestamps) +// - support other types: hard links +// - add ref counting (and handle open/release) +// - sparse files: don't store empty blocks +// - sparse files 2: keep track of holes + +package main + +import ( + "database/sql" + "flag" + "fmt" + "os" + "os/signal" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils/log" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + _ "bazil.org/fuse/fs/fstestutil" +) + +var usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, " %s -config -dsn -mount \n\n", os.Args[0]) + flag.PrintDefaults() +} + +func main() { + var config, dsn, mountPoint, password string + + flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") + flag.StringVar(&mountPoint, "mount", "./", "dir to mount") + flag.StringVar(&dsn, "dsn", "", "database url") + flag.StringVar(&password, "password", "", "master key password for covenantsql") + flag.Usage = usage + flag.Parse() + + log.SetLevel(log.DebugLevel) + + err := client.Init(config, []byte(password)) + if err != nil { + log.Fatal(err) + } + + if err != nil { + log.Fatal(err) + } + + db, err := sql.Open("covenantsql", dsn) + if err != nil { + log.Fatal(err) + } + + defer func() { _ = db.Close() }() + + if err := initSchema(db); err != nil { + log.Fatal(err) + } + + cfs := CFS{db} + // Mount filesystem. + c, err := fuse.Mount( + mountPoint, + fuse.FSName("CovenantFS"), + fuse.Subtype("CovenantFS"), + fuse.LocalVolume(), + fuse.VolumeName(""), + ) + if err != nil { + log.Fatal(err) + } + defer func() { + _ = c.Close() + }() + + go func() { + sig := make(chan os.Signal, 1) + signal.Notify(sig, os.Interrupt) + for range sig { + if err := fuse.Unmount(mountPoint); err != nil { + log.Printf("Signal received, but could not unmount: %s", err) + } else { + break + } + } + }() + + // Serve root. + err = fs.Serve(c, cfs) + if err != nil { + log.Fatal(err) + } + + // check if the mount process has an error to report + <-c.Ready + if err := c.MountError; err != nil { + log.Fatal(err) + } +} diff --git a/cmd/cql-fuse/node.go b/cmd/cql-fuse/node.go new file mode 100644 index 000000000..5ae46a38a --- /dev/null +++ b/cmd/cql-fuse/node.go @@ -0,0 +1,421 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "log" + "math" + "os" + "sync" + "syscall" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + + "github.com/CovenantSQL/CovenantSQL/client" +) + +var _ fs.Node = &Node{} // Attr +var _ fs.NodeSetattrer = &Node{} // Setattr +var _ fs.NodeStringLookuper = &Node{} // Lookup +var _ fs.HandleReadDirAller = &Node{} // HandleReadDirAller +var _ fs.NodeMkdirer = &Node{} // Mkdir +var _ fs.NodeCreater = &Node{} // Create +var _ fs.NodeRemover = &Node{} // Remove +var _ fs.HandleWriter = &Node{} // Write +var _ fs.HandleReader = &Node{} // Read +var _ fs.NodeFsyncer = &Node{} // Fsync +var _ fs.NodeRenamer = &Node{} // Rename +var _ fs.NodeSymlinker = &Node{} // Symlink +var _ fs.NodeReadlinker = &Node{} // Readlink + +// Default permissions: we don't have any right now. +const defaultPerms = 0755 + +// All permissions. +const allPerms = 0777 + +// Maximum file size. +const maxSize = math.MaxUint64 + +// Maximum length of a symlink target. +const maxSymlinkTargetLength = 4096 + +// Node implements the Node interface. +// ID, Mode, and SymlinkTarget are currently immutable after node creation. +// Size (for files only) is protected by mu. +type Node struct { + cfs CFS + // ID is a unique ID allocated at node creation time. + ID uint64 + // Used for type only, permissions are ignored. + Mode os.FileMode + // SymlinkTarget is the path a symlink points to. + SymlinkTarget string + + // Other fields to add: + // nLinks: number of hard links + // openFDs: number of open file descriptors + // timestamps (probably just ctime and mtime) + + // Implicit fields: + // numBlocks: number of 512b blocks + // blocksize: preferred block size + // mode bits: permissions + + // For regular files only. + // Data blocks are addressed by inode number and offset. + // Any op accessing Size and blocks must lock 'mu'. + mu sync.RWMutex + Size uint64 +} + +// convenience functions to query the mode. +func (n *Node) isDir() bool { + return n.Mode.IsDir() +} + +func (n *Node) isRegular() bool { + return n.Mode.IsRegular() +} + +func (n *Node) isSymlink() bool { + return n.Mode&os.ModeSymlink != 0 +} + +// toJSON returns the json-encoded string for this node. +func (n *Node) toJSON() string { + ret, err := json.Marshal(n) + if err != nil { + panic(err) + } + return string(ret) +} + +// Attr fills attr with the standard metadata for the node. +func (n *Node) Attr(_ context.Context, a *fuse.Attr) error { + a.Inode = n.ID + a.Mode = n.Mode + // Does preferred block size make sense on things other + // than regular files? + a.BlockSize = BlockSize + + if n.isRegular() { + n.mu.RLock() + defer n.mu.RUnlock() + a.Size = n.Size + + // Blocks is the number of 512 byte blocks, regardless of + // filesystem blocksize. + a.Blocks = (n.Size + 511) / 512 + } else if n.isSymlink() { + // Symlink: use target name length. + a.Size = uint64(len(n.SymlinkTarget)) + } + return nil +} + +// Setattr modifies node metadata. This includes changing the size. +func (n *Node) Setattr( + ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse, +) error { + if !req.Valid.Size() { + // We can exit early since only setting the size is implemented. + return nil + } + + if !n.isRegular() { + // Setting the size is only available on regular files. + return fuse.Errno(syscall.EINVAL) + } + + if req.Size > maxSize { + // Too big. + return fuse.Errno(syscall.EFBIG) + } + + n.mu.Lock() + defer n.mu.Unlock() + + if req.Size == n.Size { + // Nothing to do. + return nil + } + + // Store the current size in case we need to rollback. + originalSize := n.Size + + // Wrap everything inside a transaction. + err := client.ExecuteTx(ctx, n.cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + // Resize blocks as needed. + if err := resizeBlocks(tx, n.ID, n.Size, req.Size); err != nil { + return err + } + + n.Size = req.Size + return updateNode(tx, n) + }) + + if err != nil { + // Reset our size. + log.Print(err) + n.Size = originalSize + return err + } + return nil +} + +// Lookup looks up a specific entry in the receiver, +// which must be a directory. Lookup should return a Node +// corresponding to the entry. If the name does not exist in +// the directory, Lookup should return ENOENT. +// +// Lookup need not to handle the names "." and "..". +func (n *Node) Lookup(_ context.Context, name string) (fs.Node, error) { + if !n.isDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + node, err := n.cfs.lookup(n.ID, name) + if err != nil { + if err == sql.ErrNoRows { + return nil, fuse.ENOENT + } + return nil, err + } + node.cfs = n.cfs + return node, nil +} + +// ReadDirAll returns the list of child inodes. +func (n *Node) ReadDirAll(_ context.Context) ([]fuse.Dirent, error) { + if !n.isDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + return n.cfs.list(n.ID) +} + +// Mkdir creates a directory in 'n'. +// We let the sql query fail if the directory already exists. +// TODO(marc): better handling of errors. +func (n *Node) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) { + if !n.isDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + if !req.Mode.IsDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + + node := n.cfs.newDirNode() + err := n.cfs.create(ctx, n.ID, req.Name, node) + if err != nil { + return nil, err + } + return node, nil +} + +// Create creates a new file in the receiver directory. +func (n *Node) Create( + ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse, +) (fs.Node, fs.Handle, error) { + if !n.isDir() { + return nil, nil, fuse.Errno(syscall.ENOTDIR) + } + if req.Mode.IsDir() { + return nil, nil, fuse.Errno(syscall.EISDIR) + } else if !req.Mode.IsRegular() { + return nil, nil, fuse.Errno(syscall.EINVAL) + } + + node := n.cfs.newFileNode() + err := n.cfs.create(ctx, n.ID, req.Name, node) + if err != nil { + return nil, nil, err + } + return node, node, nil +} + +// Remove may be unlink or rmdir. +func (n *Node) Remove(ctx context.Context, req *fuse.RemoveRequest) error { + if !n.isDir() { + return fuse.Errno(syscall.ENOTDIR) + } + + if req.Dir { + // Rmdir. + return n.cfs.remove(ctx, n.ID, req.Name, true /* checkChildren */) + } + // Unlink file/symlink. + return n.cfs.remove(ctx, n.ID, req.Name, false /* !checkChildren */) +} + +// Write writes data to 'n'. It may overwrite existing data, or grow it. +func (n *Node) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error { + if !n.isRegular() { + return fuse.Errno(syscall.EINVAL) + } + if req.Offset < 0 { + return fuse.Errno(syscall.EINVAL) + } + if len(req.Data) == 0 { + return nil + } + + n.mu.Lock() + defer n.mu.Unlock() + + newSize := uint64(req.Offset) + uint64(len(req.Data)) + if newSize > maxSize { + return fuse.Errno(syscall.EFBIG) + } + + // Store the current size in case we need to rollback. + originalSize := n.Size + + // Wrap everything inside a transaction. + err := client.ExecuteTx(ctx, n.cfs.db, nil /* txopts */, func(tx *sql.Tx) error { + + // Update blocks. They will be added as needed. + if err := write(tx, n.ID, n.Size, uint64(req.Offset), req.Data); err != nil { + return err + } + + if newSize > originalSize { + // This was an append, commit the size change. + n.Size = newSize + if err := updateNode(tx, n); err != nil { + return err + } + } + return nil + }) + + if err != nil { + // Reset our size. + log.Print(err) + n.Size = originalSize + return err + } + + // We always write everything. + resp.Size = len(req.Data) + return nil +} + +// Read reads data from 'n'. +func (n *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { + if !n.isRegular() { + return fuse.Errno(syscall.EINVAL) + } + if req.Offset < 0 { + // Before beginning of file. + return fuse.Errno(syscall.EINVAL) + } + if req.Size == 0 { + // No bytes requested. + return nil + } + offset := uint64(req.Offset) + + n.mu.RLock() + defer n.mu.RUnlock() + if offset >= n.Size { + // Beyond end of file. + return nil + } + + to := min(n.Size, offset+uint64(req.Size)) + if offset == to { + return nil + } + + data, err := read(n.cfs.db, n.ID, offset, to) + if err != nil { + return err + } + resp.Data = data + return nil +} + +// Fsync is a noop for us, we always push writes to the DB. We do need to implement it though. +func (n *Node) Fsync(_ context.Context, _ *fuse.FsyncRequest) error { + return nil +} + +// Rename renames 'req.OldName' to 'req.NewName', optionally moving it to 'newDir'. +// If req.NewName exists, it is deleted. It is assumed that it cannot be a directory. +// NOTE: we do not keep track of opens, so we delete existing destinations right away. +// This means that anyone holding an open file descriptor on the destination will fail +// when trying to use it. +// To properly handle this, we need to count references (including inode -> inode refs, +// and open handles) and delete the inode only when it reaches zero. +func (n *Node) Rename(ctx context.Context, req *fuse.RenameRequest, newDir fs.Node) error { + newNode, ok := newDir.(*Node) + if !ok { + return fmt.Errorf("newDir is not a Node: %v", newDir) + } + if !n.isDir() || !newNode.isDir() { + return fuse.Errno(syscall.ENOTDIR) + } + return n.cfs.rename(ctx, n.ID, newNode.ID, req.OldName, req.NewName) +} + +// Symlink creates a new symbolic link in the receiver node, which must +// be a directory. +func (n *Node) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) { + if !n.isDir() { + return nil, fuse.Errno(syscall.ENOTDIR) + } + if len(req.Target) > maxSymlinkTargetLength { + return nil, fuse.Errno(syscall.ENAMETOOLONG) + } + node := n.cfs.newSymlinkNode() + node.SymlinkTarget = req.Target + err := n.cfs.create(ctx, n.ID, req.NewName, node) + if err != nil { + return nil, err + } + return node, nil +} + +// Readlink reads a symbolic link. +func (n *Node) Readlink(_ context.Context, req *fuse.ReadlinkRequest) (string, error) { + if !n.isSymlink() { + return "", fuse.Errno(syscall.EINVAL) + } + return n.SymlinkTarget, nil +} diff --git a/cmd/cql-fuse/randbytes.go b/cmd/cql-fuse/randbytes.go new file mode 100644 index 000000000..e2532e875 --- /dev/null +++ b/cmd/cql-fuse/randbytes.go @@ -0,0 +1,62 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) +package main + +import ( + "math/rand" + "time" +) + +var randLetters = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +// NewPseudoRand returns an instance of math/rand.Rand seeded from crypto/rand +// and its seed so we can easily and cheaply generate unique streams of +// numbers. The created object is not safe for concurrent access. +func NewPseudoRand() (*rand.Rand, int64) { + seed := time.Now().UnixNano() + return rand.New(rand.NewSource(seed)), seed +} + +// RandBytes returns a byte slice of the given length with random +// data. +func RandBytes(r *rand.Rand, size int) []byte { + if size <= 0 { + return nil + } + + arr := make([]byte, size) + for i := 0; i < len(arr); i++ { + arr[i] = randLetters[r.Intn(len(randLetters))] + } + return arr +} diff --git a/cmd/cql-fuse/sql.go b/cmd/cql-fuse/sql.go new file mode 100644 index 000000000..6009e78fd --- /dev/null +++ b/cmd/cql-fuse/sql.go @@ -0,0 +1,153 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2015 The Cockroach Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +// implied. See the License for the specific language governing +// permissions and limitations under the License. See the AUTHORS file +// for names of contributors. +// +// Author: Marc Berhault (marc@cockroachlabs.com) + +package main + +import ( + "database/sql" + "encoding/json" + "syscall" + + "bazil.org/fuse" +) + +// sqlExecutor is an interface needed for basic queries. +// It is implemented by both sql.DB and sql.Txn. +type sqlExecutor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +// getInode looks up an inode given its name and its parent ID. +// If not found, error will be sql.ErrNoRows. +func getInode(e sqlExecutor, parentID uint64, name string) (*Node, error) { + var raw string + const sql = `SELECT inode FROM fs_inode WHERE id = +(SELECT id FROM fs_namespace WHERE (parentID, name) = (?, ?))` + if err := e.QueryRow(sql, parentID, name).Scan(&raw); err != nil { + return nil, err + } + + node := &Node{} + err := json.Unmarshal([]byte(raw), node) + return node, err +} + +// checkIsEmpty returns nil if 'id' has no children. +func checkIsEmpty(e sqlExecutor, id uint64) error { + var count uint64 + const countSQL = ` +SELECT COUNT(parentID) FROM fs_namespace WHERE parentID = ?` + if err := e.QueryRow(countSQL, id).Scan(&count); err != nil { + return err + } + if count != 0 { + return fuse.Errno(syscall.ENOTEMPTY) + } + return nil +} + +// updateNode updates an existing node descriptor. +func updateNode(e sqlExecutor, node *Node) error { + inode := node.toJSON() + const sql = ` +UPDATE fs_inode SET inode = ? WHERE id = ?; +` + if _, err := e.Exec(sql, inode, node.ID); err != nil { + return err + } + return nil +} + +// getBlockData returns the block data for a single block. +func getBlockData(e sqlExecutor, inodeID uint64, block int) ([]byte, error) { + var data []byte + const sql = `SELECT data FROM fs_block WHERE id = ? AND block = ?` + if err := e.QueryRow(sql, inodeID, block).Scan(&data); err != nil { + return nil, err + } + return data, nil +} + +// updateBlockData overwrites the data for a single block. +func updateBlockData(e sqlExecutor, inodeID uint64, block int, data []byte) error { + const sql = `UPDATE fs_block SET data = ? WHERE (id, block) = (?, ?)` + if _, err := e.Exec(sql, data, inodeID, block); err != nil { + return err + } + return nil +} + +type blockInfo struct { + block int + data []byte +} + +// getBlocks fetches all the blocks for a given inode and returns +// a list of blockInfo objects. +func getBlocks(e sqlExecutor, inodeID uint64) ([]blockInfo, error) { + stmt := `SELECT block, data FROM fs_block WHERE id = ?` + rows, err := e.Query(stmt, inodeID) + if err != nil { + return nil, err + } + return buildBlockInfos(rows) +} + +// getBlocksBetween fetches blocks with IDs [start, end] for a given inode +// and returns a list of blockInfo objects. +func getBlocksBetween(e sqlExecutor, inodeID uint64, start, end int) ([]blockInfo, error) { + stmt := `SELECT block, data FROM fs_block WHERE id = ? AND block >= ? AND block <= ?` + rows, err := e.Query(stmt, inodeID, start, end) + if err != nil { + return nil, err + } + return buildBlockInfos(rows) +} + +func buildBlockInfos(rows *sql.Rows) ([]blockInfo, error) { + var results []blockInfo + for rows.Next() { + b := blockInfo{} + if err := rows.Scan(&b.block, &b.data); err != nil { + return nil, err + } + results = append(results, b) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return results, nil +} diff --git a/cmd/cql-minerd/benchGNTE.sh b/cmd/cql-minerd/benchGNTE.sh index b26b2d207..fd6da72bb 100755 --- a/cmd/cql-minerd/benchGNTE.sh +++ b/cmd/cql-minerd/benchGNTE.sh @@ -6,3 +6,21 @@ go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ + +go test -cpu=4 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ && \ +go test -cpu=4 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ && \ +go test -cpu=4 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ && \ +go test -cpu=4 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ && \ +go test -cpu=4 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ + +go test -cpu=2 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ && \ +go test -cpu=2 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ && \ +go test -cpu=2 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ && \ +go test -cpu=2 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ && \ +go test -cpu=2 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ + +go test -cpu=1 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ && \ +go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ && \ +go test -cpu=1 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ && \ +go test -cpu=1 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ && \ +go test -cpu=1 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ diff --git a/cmd/cql-minerd/dbms.go b/cmd/cql-minerd/dbms.go index d117aec44..d033f16de 100644 --- a/cmd/cql-minerd/dbms.go +++ b/cmd/cql-minerd/dbms.go @@ -30,10 +30,9 @@ import ( "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/worker" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/pkg/errors" ) @@ -88,14 +87,14 @@ func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { } // load genesis block - var block *ct.Block + var block *types.Block if block, err = loadGenesisBlock(testFixture); err != nil { err = errors.Wrap(err, "load genesis block failed") return } // add to dbms - instance := &wt.ServiceInstance{ + instance := &types.ServiceInstance{ DatabaseID: testFixture.DatabaseID, Peers: dbPeers, GenesisBlock: block, @@ -110,7 +109,7 @@ func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { return } -func loadGenesisBlock(fixture *conf.MinerDatabaseFixture) (block *ct.Block, err error) { +func loadGenesisBlock(fixture *conf.MinerDatabaseFixture) (block *types.Block, err error) { if fixture.GenesisBlockFile == "" { err = os.ErrNotExist return @@ -148,7 +147,7 @@ func loadGenesisBlock(fixture *conf.MinerDatabaseFixture) (block *ct.Block, err } // copied from sqlchain.xxx_test. -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -159,9 +158,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: rootHash, @@ -169,12 +168,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error Timestamp: time.Now().UTC(), }, }, - Queries: make([]*hash.Hash, rand.Intn(10)+10), - } - - for i := range b.Queries { - b.Queries[i] = new(hash.Hash) - rand.Read(b.Queries[i][:]) } if isGenesis { diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index eedc70c67..bd6237d67 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -21,11 +21,13 @@ package main import ( "context" "database/sql" + "fmt" "io/ioutil" "math/rand" "os" "os/exec" "path/filepath" + "runtime" "sync" "sync/atomic" "syscall" @@ -36,6 +38,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/go-sqlite3-encrypt" . "github.com/smartystreets/goconvey/convey" ) @@ -326,7 +329,7 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) // create - dsn, err := client.Create(client.ResourceMeta{Node: 1}) + dsn, err := client.Create(client.ResourceMeta{Node: 2}) So(err, ShouldBeNil) log.Infof("the created database dsn is %v", dsn) @@ -389,17 +392,22 @@ func TestFullProcess(t *testing.T) { }) } +const ROWSTART = 1000000 +const TABLENAME = "insert_table0" + func prepareBenchTable(db *sql.DB) { - _, err := db.Exec("DROP TABLE IF EXISTS test;") + _, err := db.Exec("DROP TABLE IF EXISTS " + TABLENAME + ";") So(err, ShouldBeNil) - _, err = db.Exec("CREATE TABLE test ( indexedColumn, nonIndexedColumn );") + _, err = db.Exec(`CREATE TABLE ` + TABLENAME + ` ("k" INT, "v1" TEXT, PRIMARY KEY("k"))`) So(err, ShouldBeNil) - _, err = db.Exec("CREATE INDEX testIndexedColumn ON test ( indexedColumn );") + _, err = db.Exec("REPLACE INTO "+TABLENAME+" VALUES(?, ?)", ROWSTART-1, "test") So(err, ShouldBeNil) +} - _, err = db.Exec("INSERT INTO test VALUES(?, ?)", 4, 4) +func cleanBenchTable(db *sql.DB) { + _, err := db.Exec("DELETE FROM "+TABLENAME+" WHERE k >= ?", ROWSTART) So(err, ShouldBeNil) } @@ -409,38 +417,26 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { prepareBenchTable(db) } - var i int32 - var insertedCount int - - rand.Seed(time.Now().UnixNano()) - start := (rand.Int31() % 100) * 10000 - - //b.Run("benchmark Single INSERT", func(b *testing.B) { - // b.ResetTimer() - // insertedCount = b.N - // for i := 0; i < b.N; i++ { - // _, err = db.Exec("INSERT INTO test ( indexedColumn, nonIndexedColumn ) VALUES"+ - // "(?, ?)", int(start)+i, i, - // ) - // if err != nil { - // b.Fatal(err) - // } - // } - //}) - // - //if createDB { - // prepareBenchTable(db) - //} - // - b.Run("benchmark Multi INSERT", func(b *testing.B) { + cleanBenchTable(db) + + var i int64 + i = -1 + + b.Run("benchmark INSERT", func(b *testing.B) { b.ResetTimer() - insertedCount = b.N b.RunParallel(func(pb *testing.PB) { for pb.Next() { - ii := atomic.AddInt32(&i, 1) - _, err = db.Exec("INSERT INTO test ( indexedColumn, nonIndexedColumn ) VALUES"+ - "(?, ?)", start+ii, ii, + ii := atomic.AddInt64(&i, 1) + _, err = db.Exec("INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ + "(?, ?)", ROWSTART+ii, ii, ) + for err != nil && err.Error() == sqlite3.ErrBusy.Error() { + // retry forever + log.Warnf("ROWSTART+ii = %d retried", ROWSTART+ii) + _, err = db.Exec("INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ + "(?, ?)", ROWSTART+ii, ii, + ) + } if err != nil { b.Fatal(err) } @@ -448,37 +444,56 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { }) }) - rowCount := db.QueryRow("SELECT COUNT(1) FROM test") - var count int + routineCount := runtime.NumGoroutine() + if routineCount > 100 { + b.Errorf("go routine count: %d", routineCount) + } else { + log.Infof("go routine count: %d", routineCount) + } + + rowCount := db.QueryRow("SELECT COUNT(1) FROM " + TABLENAME) + var count int64 err = rowCount.Scan(&count) if err != nil { b.Fatal(err) } - log.Warnf("Row Count: %d", count) + log.Warnf("Row Count: %v", count) b.Run("benchmark SELECT", func(b *testing.B) { b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { - i := atomic.AddInt32(&i, 1) - index := int(i)%insertedCount + int(start) + 1 - row := db.QueryRow("SELECT nonIndexedColumn FROM test WHERE indexedColumn = ? LIMIT 1", index) - var result int + var index int64 + if createDB { //only data by insert + index = rand.Int63n(count-1) + ROWSTART + } else { //has data before ROWSTART + index = rand.Int63n(count - 1) + } + //log.Debugf("index = %d", index) + row := db.QueryRow("SELECT v1 FROM "+TABLENAME+" WHERE k = ? LIMIT 1", index) + var result []byte err = row.Scan(&result) - if err != nil || result < 0 { - log.Errorf("i = %d", i) + if err != nil || (len(result) == 0) { + log.Errorf("index = %d", index) b.Fatal(err) } } }) }) - row := db.QueryRow("SELECT nonIndexedColumn FROM test LIMIT 1") + routineCount = runtime.NumGoroutine() + if routineCount > 100 { + b.Errorf("go routine count: %d", routineCount) + } else { + log.Infof("go routine count: %d", routineCount) + } - var result int - err = row.Scan(&result) - So(err, ShouldBeNil) - So(result, ShouldEqual, 4) + //row := db.QueryRow("SELECT nonIndexedColumn FROM test LIMIT 1") + + //var result int + //err = row.Scan(&result) + //So(err, ShouldBeNil) + //So(result, ShouldEqual, 4) err = db.Close() So(err, ShouldBeNil) @@ -487,18 +502,18 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { log.Warnf("Benchmark for %d Miners, BypassSignature: %v", minerCount, bypassSign) asymmetric.BypassSignature = bypassSign - //if minerCount > 0 { - // startNodesProfile(bypassSign) - // utils.WaitToConnect(context.Background(), "127.0.0.1", []int{ - // 2144, - // 2145, - // 2146, - // 3122, - // 3121, - // 3120, - // }, 2*time.Second) - // time.Sleep(time.Second) - //} + if minerCount > 0 { + startNodesProfile(bypassSign) + utils.WaitToConnect(context.Background(), "127.0.0.1", []int{ + 2144, + 2145, + 2146, + 3122, + 3121, + 3120, + }, 2*time.Second) + time.Sleep(time.Second) + } // Create temp directory testDataDir, err := ioutil.TempDir(testWorkingDir, "covenantsql") @@ -506,9 +521,9 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { panic(err) } defer os.RemoveAll(testDataDir) - clientConf := FJ(testWorkingDir, "./service/node_c/config.yaml") + clientConf := FJ(testWorkingDir, "./integration/node_c/config.yaml") tempConf := FJ(testDataDir, "config.yaml") - clientKey := FJ(testWorkingDir, "./service/node_c/private.key") + clientKey := FJ(testWorkingDir, "./integration/node_c/private.key") tempKey := FJ(testDataDir, "private.key") utils.CopyFile(clientConf, tempConf) utils.CopyFile(clientKey, tempKey) @@ -536,7 +551,7 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { db, err := sql.Open("covenantsql", dsn) So(err, ShouldBeNil) - benchDB(b, db, true) + benchDB(b, db, minerCount > 0) err = client.Drop(dsn) So(err, ShouldBeNil) @@ -545,17 +560,33 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { } func BenchmarkSQLite(b *testing.B) { - os.Remove("./foo.db") - defer os.Remove("./foo.db") - - db, err := sql.Open("sqlite3", "./foo.db?_journal_mode=WAL&_synchronous=NORMAL&cache=shared") - if err != nil { - log.Fatal(err) + var db *sql.DB + var createDB bool + millionFile := fmt.Sprintf("/data/sqlite_bigdata/insert_multi_sqlitedb0_1_%v", ROWSTART) + f, err := os.Open(millionFile) + if err != nil && os.IsNotExist(err) { + os.Remove("./foo.db") + defer os.Remove("./foo.db") + + db, err = sql.Open("sqlite3", "./foo.db?_journal_mode=WAL&_synchronous=NORMAL&cache=shared") + if err != nil { + log.Fatal(err) + } + createDB = true + defer db.Close() + } else { + f.Close() + db, err = sql.Open("sqlite3", millionFile+"?_journal_mode=WAL&_synchronous=NORMAL&cache=shared") + log.Infof("Testing sqlite3 million data exist file %v", millionFile) + if err != nil { + log.Fatal(err) + } + createDB = false + defer db.Close() } - defer db.Close() Convey("bench SQLite", b, func() { - benchDB(b, db, true) + benchDB(b, db, createDB) }) } diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 716bd8fee..1dfbd5b34 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -26,7 +26,7 @@ import ( "os" "os/signal" "runtime" - "runtime/trace" + //"runtime/trace" "syscall" "time" @@ -254,22 +254,22 @@ func main() { go graphite.Graphite(metrics.DefaultRegistry, 5*time.Second, minerName, addr) } - if traceFile != "" { - f, err := os.Create(traceFile) - if err != nil { - log.WithError(err).Fatal("failed to create trace output file") - } - defer func() { - if err := f.Close(); err != nil { - log.WithError(err).Fatal("failed to close trace file") - } - }() - - if err := trace.Start(f); err != nil { - log.WithError(err).Fatal("failed to start trace") - } - defer trace.Stop() - } + //if traceFile != "" { + // f, err := os.Create(traceFile) + // if err != nil { + // log.WithError(err).Fatal("failed to create trace output file") + // } + // defer func() { + // if err := f.Close(); err != nil { + // log.WithError(err).Fatal("failed to close trace file") + // } + // }() + + // if err := trace.Start(f); err != nil { + // log.WithError(err).Fatal("failed to start trace") + // } + // defer trace.Stop() + //} <-signalCh diff --git a/cmd/cql-mysql-adapter/cursor.go b/cmd/cql-mysql-adapter/cursor.go index 1622f5537..7583c1996 100644 --- a/cmd/cql-mysql-adapter/cursor.go +++ b/cmd/cql-mysql-adapter/cursor.go @@ -290,7 +290,7 @@ func (c *Cursor) UseDB(dbName string) (err error) { } // HandleQuery handle COM_QUERY comamnd, like SELECT, INSERT, UPDATE, etc... -// if Result has a Resultset (SELECT, SHOW, etc...), we will send this as the repsonse, otherwise, we will send Result. +// if Result has a Resultset (SELECT, SHOW, etc...), we will send this as the response, otherwise, we will send Result. func (c *Cursor) HandleQuery(query string) (r *my.Result, err error) { var processed bool diff --git a/cmd/cql-observer/api.go b/cmd/cql-observer/api.go index 61b82aee0..8e4c0c483 100644 --- a/cmd/cql-observer/api.go +++ b/cmd/cql-observer/api.go @@ -27,9 +27,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/gorilla/mux" ) @@ -50,6 +49,10 @@ func sendResponse(code int, success bool, msg interface{}, data interface{}, rw }) } +func notSupported(rw http.ResponseWriter, _ *http.Request) { + sendResponse(500, false, fmt.Sprintf("not supported in %v", version), nil, rw) +} + type explorerAPI struct { service *Service } @@ -76,26 +79,7 @@ func (a *explorerAPI) GetAck(rw http.ResponseWriter, r *http.Request) { } // format ack to json response - sendResponse(200, true, "", map[string]interface{}{ - "ack": map[string]interface{}{ - "request": map[string]interface{}{ - "hash": ack.Response.Request.Hash.String(), - "timestamp": a.formatTime(ack.Response.Request.Timestamp), - "node": ack.Response.Request.NodeID, - "type": ack.Response.Request.QueryType.String(), - "count": ack.Response.Request.BatchCount, - }, - "response": map[string]interface{}{ - "hash": ack.Response.Hash.String(), - "timestamp": a.formatTime(ack.Response.Timestamp), - "node": ack.Response.NodeID, - "log_position": ack.Response.LogOffset, - }, - "hash": ack.Hash.String(), - "timestamp": a.formatTime(ack.AckHeader.Timestamp), - "node": ack.AckHeader.NodeID, - }, - }, rw) + sendResponse(200, true, "", a.formatAck(ack), rw) } func (a *explorerAPI) GetRequest(rw http.ResponseWriter, r *http.Request) { @@ -122,7 +106,7 @@ func (a *explorerAPI) GetRequest(rw http.ResponseWriter, r *http.Request) { sendResponse(200, true, "", a.formatRequest(req), rw) } -func (a *explorerAPI) GetRequestByOffset(rw http.ResponseWriter, r *http.Request) { +func (a *explorerAPI) GetResponse(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) dbID, err := a.getDBID(vars) @@ -131,28 +115,46 @@ func (a *explorerAPI) GetRequestByOffset(rw http.ResponseWriter, r *http.Request return } - offsetStr := vars["offset"] - if offsetStr == "" { - sendResponse(400, false, "", nil, rw) + h, err := a.getHash(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) return } - offset, err := strconv.ParseUint(offsetStr, 10, 64) + resp, err := a.service.getResponseHeader(dbID, h) + if err != nil { + sendResponse(500, false, err, nil, rw) + return + } + + sendResponse(200, true, "", a.formatResponseHeader(resp), rw) +} + +func (a *explorerAPI) GetBlock(rw http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + dbID, err := a.getDBID(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + h, err := a.getHash(vars) if err != nil { sendResponse(400, false, err, nil, rw) return } - req, err := a.service.getRequestByOffset(dbID, offset) + _, height, block, err := a.service.getBlock(dbID, h) if err != nil { sendResponse(500, false, err, nil, rw) return } - sendResponse(200, true, "", a.formatRequest(req), rw) + sendResponse(200, true, "", a.formatBlock(height, block), rw) } -func (a *explorerAPI) GetBlock(rw http.ResponseWriter, r *http.Request) { +func (a *explorerAPI) GetBlockV3(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) dbID, err := a.getDBID(vars) @@ -167,13 +169,13 @@ func (a *explorerAPI) GetBlock(rw http.ResponseWriter, r *http.Request) { return } - height, block, err := a.service.getBlock(dbID, h) + count, height, block, err := a.service.getBlock(dbID, h) if err != nil { sendResponse(500, false, err, nil, rw) return } - sendResponse(200, true, "", a.formatBlock(height, block), rw) + sendResponse(200, true, "", a.formatBlockV3(count, height, block), rw) } func (a *explorerAPI) GetBlockByCount(rw http.ResponseWriter, r *http.Request) { @@ -187,7 +189,7 @@ func (a *explorerAPI) GetBlockByCount(rw http.ResponseWriter, r *http.Request) { countStr := vars["count"] if countStr == "" { - sendResponse(400, false, "", nil, rw) + sendResponse(400, false, "empty count", nil, rw) return } @@ -208,6 +210,38 @@ func (a *explorerAPI) GetBlockByCount(rw http.ResponseWriter, r *http.Request) { sendResponse(200, true, "", a.formatBlockV2(count, height, block), rw) } +func (a *explorerAPI) GetBlockByCountV3(rw http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + dbID, err := a.getDBID(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + countStr := vars["count"] + if countStr == "" { + sendResponse(400, false, "empty count", nil, rw) + return + } + + countNumber, err := strconv.ParseInt(countStr, 10, 32) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + count := int32(countNumber) + + height, block, err := a.service.getBlockByCount(dbID, count) + if err != nil { + sendResponse(500, false, err, nil, rw) + return + } + + sendResponse(200, true, "", a.formatBlockV3(count, height, block), rw) +} + func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) @@ -219,7 +253,7 @@ func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) heightStr := vars["height"] if heightStr == "" { - sendResponse(400, false, "", nil, rw) + sendResponse(400, false, "empty height", nil, rw) return } @@ -231,7 +265,7 @@ func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) height := int32(heightNumber) - block, err := a.service.getBlockByHeight(dbID, height) + _, block, err := a.service.getBlockByHeight(dbID, height) if err != nil { sendResponse(500, false, err, nil, rw) return @@ -240,7 +274,39 @@ func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) sendResponse(200, true, "", a.formatBlock(height, block), rw) } -func (a *explorerAPI) getHighestBlock(rw http.ResponseWriter, r *http.Request) { +func (a *explorerAPI) GetBlockByHeightV3(rw http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + + dbID, err := a.getDBID(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + heightStr := vars["height"] + if heightStr == "" { + sendResponse(400, false, "empty height", nil, rw) + return + } + + heightNumber, err := strconv.ParseInt(heightStr, 10, 32) + if err != nil { + sendResponse(400, false, err, nil, rw) + return + } + + height := int32(heightNumber) + + count, block, err := a.service.getBlockByHeight(dbID, height) + if err != nil { + sendResponse(500, false, err, nil, rw) + return + } + + sendResponse(200, true, "", a.formatBlockV3(count, height, block), rw) +} + +func (a *explorerAPI) GetHighestBlock(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) dbID, err := a.getDBID(vars) @@ -271,7 +337,7 @@ func (a *explorerAPI) getHighestBlock(rw http.ResponseWriter, r *http.Request) { sendResponse(200, true, "", a.formatBlock(height, block), rw) } -func (a *explorerAPI) getHighestBlockV2(rw http.ResponseWriter, r *http.Request) { +func (a *explorerAPI) GetHighestBlockV2(rw http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) dbID, err := a.getDBID(vars) @@ -302,36 +368,46 @@ func (a *explorerAPI) getHighestBlockV2(rw http.ResponseWriter, r *http.Request) sendResponse(200, true, "", a.formatBlockV2(count, height, block), rw) } -func (a *explorerAPI) formatBlock(height int32, b *ct.Block) map[string]interface{} { - queries := make([]string, 0, len(b.Queries)) +func (a *explorerAPI) GetHighestBlockV3(rw http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) - for _, q := range b.Queries { - queries = append(queries, q.String()) + dbID, err := a.getDBID(vars) + if err != nil { + sendResponse(400, false, err, nil, rw) + return } - return map[string]interface{}{ - "block": map[string]interface{}{ - "height": height, - "hash": b.BlockHash().String(), - "genesis_hash": b.GenesisHash().String(), - "timestamp": a.formatTime(b.Timestamp()), - "version": b.SignedHeader.Version, - "producer": b.Producer(), - "queries": queries, - }, + count, height, block, err := a.service.getHighestBlockV2(dbID) + if err == ErrNotFound { + // try to add subscription + err = a.service.subscribe(dbID, "oldest") + if err == nil { + count, height, block, err = a.service.getHighestBlockV2(dbID) + if err != nil { + sendResponse(500, false, err, nil, rw) + return + } + } else { + sendResponse(400, false, err, nil, rw) + return + } + } else if err != nil { + sendResponse(500, false, err, nil, rw) + return } + + sendResponse(200, true, "", a.formatBlockV3(count, height, block), rw) } -func (a *explorerAPI) formatBlockV2(count, height int32, b *ct.Block) map[string]interface{} { - queries := make([]string, 0, len(b.Queries)) +func (a *explorerAPI) formatBlock(height int32, b *types.Block) (res map[string]interface{}) { + queries := make([]string, 0, len(b.Acks)) - for _, q := range b.Queries { - queries = append(queries, q.String()) + for _, q := range b.Acks { + queries = append(queries, q.Hash().String()) } return map[string]interface{}{ "block": map[string]interface{}{ - "count": count, "height": height, "hash": b.BlockHash().String(), "genesis_hash": b.GenesisHash().String(), @@ -343,7 +419,40 @@ func (a *explorerAPI) formatBlockV2(count, height int32, b *ct.Block) map[string } } -func (a *explorerAPI) formatRequest(req *wt.Request) map[string]interface{} { +func (a *explorerAPI) formatBlockV2(count, height int32, b *types.Block) (res map[string]interface{}) { + res = a.formatBlock(height, b) + res["block"].(map[string]interface{})["count"] = count + return +} + +func (a *explorerAPI) formatBlockV3(count, height int32, b *types.Block) (res map[string]interface{}) { + res = a.formatBlockV2(count, height, b) + blockRes := res["block"].(map[string]interface{}) + blockRes["acks"] = func() (acks []interface{}) { + acks = make([]interface{}, 0, len(b.Acks)) + + for _, ack := range b.Acks { + acks = append(acks, a.formatAck(ack)["ack"]) + } + + return + }() + blockRes["queries"] = func() (tracks []interface{}) { + tracks = make([]interface{}, 0, len(b.QueryTxs)) + + for _, tx := range b.QueryTxs { + t := a.formatRequest(tx.Request) + t["response"] = a.formatResponseHeader(tx.Response)["response"] + tracks = append(tracks, t) + } + + return + }() + + return +} + +func (a *explorerAPI) formatRequest(req *types.Request) map[string]interface{} { // get queries queries := make([]map[string]interface{}, 0, req.Header.BatchCount) @@ -365,7 +474,7 @@ func (a *explorerAPI) formatRequest(req *wt.Request) map[string]interface{} { return map[string]interface{}{ "request": map[string]interface{}{ - "hash": req.Header.Hash.String(), + "hash": req.Header.Hash().String(), "timestamp": a.formatTime(req.Header.Timestamp), "node": req.Header.NodeID, "type": req.Header.QueryType.String(), @@ -375,6 +484,52 @@ func (a *explorerAPI) formatRequest(req *wt.Request) map[string]interface{} { } } +func (a *explorerAPI) formatResponseHeader(resp *types.SignedResponseHeader) map[string]interface{} { + return map[string]interface{}{ + "response": map[string]interface{}{ + "hash": resp.Hash().String(), + "timestamp": a.formatTime(resp.Timestamp), + "node": resp.NodeID, + "row_count": resp.RowCount, + "log_id": resp.LogOffset, + "last_insert_id": resp.LastInsertID, + "affected_rows": resp.AffectedRows, + }, + "request": map[string]interface{}{ + "hash": resp.Request.Hash().String(), + "timestamp": a.formatTime(resp.Request.Timestamp), + "node": resp.Request.NodeID, + "type": resp.Request.QueryType.String(), + "count": resp.Request.BatchCount, + }, + } +} + +func (a *explorerAPI) formatAck(ack *types.SignedAckHeader) map[string]interface{} { + return map[string]interface{}{ + "ack": map[string]interface{}{ + "request": map[string]interface{}{ + "hash": ack.Response.Request.Hash().String(), + "timestamp": a.formatTime(ack.Response.Request.Timestamp), + "node": ack.Response.Request.NodeID, + "type": ack.Response.Request.QueryType.String(), + "count": ack.Response.Request.BatchCount, + }, + "response": map[string]interface{}{ + "hash": ack.Response.Hash().String(), + "timestamp": a.formatTime(ack.Response.Timestamp), + "node": ack.Response.NodeID, + "log_id": ack.Response.LogOffset, // savepoint id in eventual consistency mode + "last_insert_id": ack.Response.LastInsertID, + "affected_rows": ack.Response.AffectedRows, + }, + "hash": ack.Hash().String(), + "timestamp": a.formatTime(ack.Timestamp), + "node": ack.NodeID, + }, + } +} + func (a *explorerAPI) formatTime(t time.Time) float64 { return float64(t.UnixNano()) / 1e6 } @@ -398,7 +553,9 @@ func (a *explorerAPI) getHash(vars map[string]string) (h *hash.Hash, err error) func startAPI(service *Service, listenAddr string) (server *http.Server, err error) { router := mux.NewRouter() router.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { - sendResponse(http.StatusOK, true, nil, nil, rw) + sendResponse(http.StatusOK, true, nil, map[string]interface{}{ + "version": version, + }, rw) }).Methods("GET") api := &explorerAPI{ @@ -406,14 +563,20 @@ func startAPI(service *Service, listenAddr string) (server *http.Server, err err } v1Router := router.PathPrefix("/v1").Subrouter() v1Router.HandleFunc("/ack/{db}/{hash}", api.GetAck).Methods("GET") - v1Router.HandleFunc("/offset/{db}/{offset:[0-9]+}", api.GetRequestByOffset).Methods("GET") + v1Router.HandleFunc("/offset/{db}/{offset:[0-9]+}", notSupported).Methods("GET") v1Router.HandleFunc("/request/{db}/{hash}", api.GetRequest).Methods("GET") v1Router.HandleFunc("/block/{db}/{hash}", api.GetBlock).Methods("GET") v1Router.HandleFunc("/count/{db}/{count:[0-9]+}", api.GetBlockByCount).Methods("GET") v1Router.HandleFunc("/height/{db}/{height:[0-9]+}", api.GetBlockByHeight).Methods("GET") - v1Router.HandleFunc("/head/{db}", api.getHighestBlock).Methods("GET") + v1Router.HandleFunc("/head/{db}", api.GetHighestBlock).Methods("GET") v2Router := router.PathPrefix("/v2").Subrouter() - v2Router.HandleFunc("/head/{db}", api.getHighestBlockV2).Methods("GET") + v2Router.HandleFunc("/head/{db}", api.GetHighestBlockV2).Methods("GET") + v3Router := router.PathPrefix("/v3").Subrouter() + v3Router.HandleFunc("/response/{db}/{hash}", api.GetResponse).Methods("GET") + v3Router.HandleFunc("/block/{db}/{hash}", api.GetBlockV3).Methods("GET") + v3Router.HandleFunc("/count/{db}/{count:[0-9]+}", api.GetBlockByCountV3).Methods("GET") + v3Router.HandleFunc("/height/{db}/{height:[0-9]+}", api.GetBlockByHeightV3).Methods("GET") + v3Router.HandleFunc("/head/{db}", api.GetHighestBlockV3).Methods("GET") server = &http.Server{ Addr: listenAddr, diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index 14457edb7..76a1b57c1 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -35,9 +35,9 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/jmoiron/jsonq" . "github.com/smartystreets/goconvey/convey" ) @@ -187,7 +187,7 @@ func stopNodes() { } func getJSON(pattern string, args ...interface{}) (result *jsonq.JsonQuery, err error) { - url := "http://localhost:4663/v1/" + fmt.Sprintf(pattern, args...) + url := "http://localhost:4663/" + fmt.Sprintf(pattern, args...) resp, err := http.Get(url) if err != nil { return @@ -195,6 +195,15 @@ func getJSON(pattern string, args ...interface{}) (result *jsonq.JsonQuery, err var res map[string]interface{} err = json.NewDecoder(resp.Body).Decode(&res) + if err != nil { + return + } + log.WithFields(log.Fields{ + "pattern": pattern, + "args": args, + "response": res, + "code": resp.StatusCode, + }).Debug("send test request") result = jsonq.NewQuery(res) success, err := result.Bool("success") if err != nil { @@ -353,38 +362,41 @@ func TestFullProcess(t *testing.T) { time.Sleep(blockProducePeriod * 2) // test get genesis block by height - res, err := getJSON("height/%v/0", dbID) + res, err := getJSON("v1/height/%v/0", dbID) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("block")), ShouldNotBeNil) So(ensureSuccess(res.Int("block", "height")), ShouldEqual, 0) genesisHash := ensureSuccess(res.String("block", "hash")).(string) // test get first containable block - res, err = getJSON("height/%v/1", dbID) + res, err = getJSON("v3/height/%v/1", dbID) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("block")), ShouldNotBeNil) So(ensureSuccess(res.Int("block", "height")), ShouldEqual, 1) So(ensureSuccess(res.String("block", "hash")), ShouldNotBeEmpty) So(ensureSuccess(res.String("block", "genesis_hash")), ShouldEqual, genesisHash) - So(ensureSuccess(res.ArrayOfStrings("block", "queries")), ShouldNotBeEmpty) + So(ensureSuccess(res.ArrayOfObjects("block", "queries")), ShouldNotBeEmpty) blockHash := ensureSuccess(res.String("block", "hash")).(string) byHeightBlockResult := ensureSuccess(res.Interface()) // test get block by hash - res, err = getJSON("block/%v/%v", dbID, blockHash) + res, err = getJSON("v3/block/%v/%v", dbID, blockHash) So(err, ShouldBeNil) So(ensureSuccess(res.Interface()), ShouldResemble, byHeightBlockResult) + // test get block by hash using v1 version, returns ack hashes as queries + res, err = getJSON("v1/block/%v/%v", dbID, blockHash) + So(err, ShouldBeNil) + ackHashes, err := res.ArrayOfStrings("block", "queries") So(err, ShouldBeNil) So(ackHashes, ShouldNotBeEmpty) // test get acked query in block - var logOffset int var reqHash string for _, ackHash := range ackHashes { - res, err = getJSON("ack/%v/%v", dbID, ackHash) + res, err = getJSON("v1/ack/%v/%v", dbID, ackHash) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("ack")), ShouldNotBeNil) So(ensureSuccess(res.String("ack", "hash")), ShouldNotBeEmpty) @@ -393,12 +405,9 @@ func TestFullProcess(t *testing.T) { queryType, err := res.String("ack", "request", "type") So(err, ShouldBeNil) - So(queryType, ShouldBeIn, []string{wt.WriteQuery.String(), wt.ReadQuery.String()}) + So(queryType, ShouldBeIn, []string{types.WriteQuery.String(), types.ReadQuery.String()}) - if queryType == wt.WriteQuery.String() { - logOffset, err = res.Int("ack", "response", "log_position") - So(err, ShouldBeNil) - So(logOffset, ShouldBeGreaterThanOrEqualTo, 0) + if queryType == types.WriteQuery.String() { reqHash, err = res.String("ack", "request", "hash") So(err, ShouldBeNil) So(reqHash, ShouldNotBeEmpty) @@ -407,36 +416,24 @@ func TestFullProcess(t *testing.T) { // must contains a write query So(reqHash, ShouldNotBeEmpty) - So(logOffset, ShouldBeGreaterThanOrEqualTo, 0) // test get request entity by request hash - res, err = getJSON("request/%v/%v", dbID, reqHash) + res, err = getJSON("v1/request/%v/%v", dbID, reqHash) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("request")), ShouldNotBeNil) So(ensureSuccess(res.String("request", "hash")), ShouldNotBeEmpty) - So(ensureSuccess(res.String("request", "type")), ShouldEqual, wt.WriteQuery.String()) + So(ensureSuccess(res.String("request", "type")), ShouldEqual, types.WriteQuery.String()) So(ensureSuccess(res.Int("request", "count")), ShouldEqual, 1) // no transaction batch is used So(ensureSuccess(res.ArrayOfObjects("request", "queries")), ShouldNotBeEmpty) So(ensureSuccess(res.String("request", "queries", "0", "pattern")), ShouldNotBeEmpty) - byHashRequestResult := ensureSuccess(res.Interface()) - - // test get request entity by log offset - res, err = getJSON("offset/%v/%v", dbID, logOffset) - So(err, ShouldBeNil) - So(ensureSuccess(res.Interface()), ShouldResemble, byHashRequestResult) - - // test get first log offset, should be a create table statement - res, err = getJSON("offset/%v/0", dbID) - So(err, ShouldBeNil) - So(ensureSuccess(res.String("request", "queries", "0", "pattern")), ShouldContainSubstring, "CREATE TABLE") // test get genesis block by height - res, err = getJSON("height/%v/0", dbID2) + res, err = getJSON("v3/height/%v/0", dbID2) So(err, ShouldNotBeNil) log.Info(err, res) // test get genesis block by height - res, err = getJSON("head/%v", dbID2) + res, err = getJSON("v3/head/%v", dbID2) So(err, ShouldBeNil) So(ensureSuccess(res.Interface("block")), ShouldNotBeNil) So(ensureSuccess(res.Int("block", "height")), ShouldEqual, 0) diff --git a/cmd/cql-observer/service.go b/cmd/cql-observer/service.go index addabbbee..c9fbe25a9 100644 --- a/cmd/cql-observer/service.go +++ b/cmd/cql-observer/service.go @@ -25,7 +25,6 @@ import ( "sync/atomic" "time" - bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -33,10 +32,9 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/sqlchain" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/coreos/bbolt" ) @@ -57,20 +55,20 @@ const ( | | \--> [count] => height | | | [block]-->[`dbID`] - | | |---> [height+hash] => block - | | \--> [height+hash] => block + | | |---> [height+hash+count] => block + | | \--> [height+hash+count] => block | | | [ack]-->[`dbID`] - | | |---> [hash] => ack - | | \--> [hash] => ack + | | |---> [hash] => height+offset + | | \--> [hash] => height+offset | | | [request]-->[`dbID`] - | | |---> [offset+hash] => request - | | \--> [offset+hash] => request + | | |---> [hash] => height+offset + | | \--> [hash] => height+offset | | - | [offset]-->[`dbID`] - | |---> [hash] => offset - | \--> [hash] => offset + | [response]-->[`dbID`] + | |---> [hash] => height+offset + | \--> [hash] => height+offset | \-> [subscription] \---> [`dbID`] => height @@ -81,16 +79,18 @@ var ( ErrStopped = errors.New("observer service has stopped") // ErrNotFound defines error on fail to found specified resource ErrNotFound = errors.New("resource not found") + // ErrInconsistentData represents corrupted observation data. + ErrInconsistentData = errors.New("inconsistent data") // bolt db buckets blockBucket = []byte("block") blockCount2HeightBucket = []byte("block-count-to-height") ackBucket = []byte("ack") requestBucket = []byte("request") + responseBucket = []byte("response") subscriptionBucket = []byte("subscription") blockHeightBucket = []byte("height") - logOffsetBucket = []byte("offset") // blockProducePeriod defines the block producing interval blockProducePeriod = 60 * time.Second @@ -142,7 +142,7 @@ func NewService() (service *Service, err error) { if _, err = tx.CreateBucketIfNotExists(blockHeightBucket); err != nil { return } - _, err = tx.CreateBucketIfNotExists(logOffsetBucket) + _, err = tx.CreateBucketIfNotExists(responseBucket) return }); err != nil { return @@ -170,16 +170,6 @@ func NewService() (service *Service, err error) { return } -func offsetToBytes(offset uint64) (data []byte) { - data = make([]byte, 8) - binary.BigEndian.PutUint64(data, offset) - return -} - -func bytesToOffset(data []byte) uint64 { - return uint64(binary.BigEndian.Uint64(data)) -} - func int32ToBytes(h int32) (data []byte) { data = make([]byte, 4) binary.BigEndian.PutUint32(data, uint32(h)) @@ -204,11 +194,11 @@ func (s *Service) subscribe(dbID proto.DatabaseID, resetSubscribePosition string switch resetSubscribePosition { case "newest": - fromPos = ct.ReplicateFromNewest + fromPos = types.ReplicateFromNewest case "oldest": - fromPos = ct.ReplicateFromBeginning + fromPos = types.ReplicateFromBeginning default: - fromPos = ct.ReplicateFromNewest + fromPos = types.ReplicateFromNewest } s.subscription[dbID] = fromPos @@ -219,7 +209,7 @@ func (s *Service) subscribe(dbID proto.DatabaseID, resetSubscribePosition string } else { // not resetting if _, exists := s.subscription[dbID]; !exists { - s.subscription[dbID] = ct.ReplicateFromNewest + s.subscription[dbID] = types.ReplicateFromNewest shouldStartSubscribe = true } } @@ -253,21 +243,6 @@ func (s *Service) AdviseNewBlock(req *sqlchain.MuxAdviseNewBlockReq, resp *sqlch return s.addBlock(req.DatabaseID, req.Count, req.Block) } -// AdviseAckedQuery handles acked query replication request from the remote database chain service. -func (s *Service) AdviseAckedQuery(req *sqlchain.MuxAdviseAckedQueryReq, resp *sqlchain.MuxAdviseAckedQueryResp) (err error) { - if atomic.LoadInt32(&s.stopped) == 1 { - // stopped - return ErrStopped - } - - if req.Query == nil { - log.WithField("node", req.GetNodeID().String()).Info("received empty acked query") - return - } - - return s.addAckedQuery(req.DatabaseID, req.Query) -} - func (s *Service) start() (err error) { if atomic.LoadInt32(&s.stopped) == 1 { // stopped @@ -322,11 +297,12 @@ func (s *Service) startSubscribe(dbID proto.DatabaseID) (err error) { return } -func (s *Service) addAckedQuery(dbID proto.DatabaseID, ack *wt.SignedAckHeader) (err error) { +func (s *Service) addAck(dbID proto.DatabaseID, height int32, offset int32, ack *types.SignedAckHeader) (err error) { log.WithFields(log.Fields{ - "ack": ack.Hash.String(), - "db": dbID, - }).Debug("add ack query") + "height": height, + "ack": ack.Hash().String(), + "db": dbID, + }).Debug("add ack") if atomic.LoadInt32(&s.stopped) == 1 { // stopped @@ -340,71 +316,62 @@ func (s *Service) addAckedQuery(dbID proto.DatabaseID, ack *wt.SignedAckHeader) return } - // fetch original query - if ack.Response.Request.QueryType == wt.WriteQuery { - req := &wt.GetRequestReq{} - resp := &wt.GetRequestResp{} - - req.DatabaseID = dbID - req.LogOffset = ack.Response.LogOffset - - if err = s.minerRequest(dbID, route.DBSGetRequest.String(), req, resp); err != nil { + // store ack + return s.db.Update(func(tx *bolt.Tx) (err error) { + ab, err := tx.Bucket(ackBucket).CreateBucketIfNotExists([]byte(dbID)) + if err != nil { return } + err = ab.Put(ack.Hash().AsBytes(), utils.ConcatAll(int32ToBytes(height), int32ToBytes(offset))) + return + }) +} - key := offsetToBytes(req.LogOffset) - key = append(key, resp.Request.Header.Hash.CloneBytes()...) +func (s *Service) addQueryTracker(dbID proto.DatabaseID, height int32, offset int32, qt *types.QueryAsTx) (err error) { + log.WithFields(log.Fields{ + "req": qt.Request.Header.Hash(), + "resp": qt.Response.Hash(), + }).Debug("add query tracker") - log.WithFields(log.Fields{ - "offset": req.LogOffset, - "reqHash": resp.Request.Header.Hash.String(), - "reqQueries": resp.Request.Payload.Queries, - }).Debug("add write request") + if atomic.LoadInt32(&s.stopped) == 1 { + // stopped + return ErrStopped + } - var reqBytes *bytes.Buffer - if reqBytes, err = utils.EncodeMsgPack(resp.Request); err != nil { - return - } + s.lock.Lock() + defer s.lock.Unlock() - if err = s.db.Update(func(tx *bolt.Tx) (err error) { - qb, err := tx.Bucket(requestBucket).CreateBucketIfNotExists([]byte(dbID)) - if err != nil { - return - } - if err = qb.Put(key, reqBytes.Bytes()); err != nil { - return - } - ob, err := tx.Bucket(logOffsetBucket).CreateBucketIfNotExists([]byte(dbID)) - if err != nil { - return - } - err = ob.Put(resp.Request.Header.Hash.CloneBytes(), offsetToBytes(req.LogOffset)) - return - }); err != nil { - return - } + if err = qt.Request.Verify(); err != nil { + return + } + if err = qt.Response.Verify(); err != nil { + return } - // store ack + dataBytes := utils.ConcatAll(int32ToBytes(height), int32ToBytes(offset)) + + // store request and response return s.db.Update(func(tx *bolt.Tx) (err error) { - ab, err := tx.Bucket(ackBucket).CreateBucketIfNotExists([]byte(dbID)) + reqb, err := tx.Bucket(requestBucket).CreateBucketIfNotExists([]byte(dbID)) if err != nil { return } - ackBytes, err := utils.EncodeMsgPack(ack) + resb, err := tx.Bucket(responseBucket).CreateBucketIfNotExists([]byte(dbID)) if err != nil { return } - err = ab.Put(ack.Hash.CloneBytes(), ackBytes.Bytes()) + if err = reqb.Put(qt.Request.Header.Hash().AsBytes(), dataBytes); err != nil { + return + } + err = resb.Put(qt.Response.Hash().AsBytes(), dataBytes) return }) } -func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *ct.Block) (err error) { +func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *types.Block) (err error) { instance, err := s.getUpstream(dbID) h := int32(b.Timestamp().Sub(instance.GenesisBlock.Timestamp()) / blockProducePeriod) - key := int32ToBytes(h) - key = append(key, b.BlockHash().CloneBytes()...) + key := utils.ConcatAll(int32ToBytes(h), b.BlockHash().AsBytes(), int32ToBytes(count)) // It's actually `countToBytes` ckey := int32ToBytes(count) blockBytes, err := utils.EncodeMsgPack(b) @@ -416,9 +383,10 @@ func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *ct.Block) (err "count": count, "height": h, "producer": b.Producer(), + "block": b, }).Debugf("Add new block %v -> %v", b.BlockHash(), b.ParentHash()) - return s.db.Update(func(tx *bolt.Tx) (err error) { + if err = s.db.Update(func(tx *bolt.Tx) (err error) { bb, err := tx.Bucket(blockBucket).CreateBucketIfNotExists([]byte(dbID)) if err != nil { return @@ -431,7 +399,7 @@ func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *ct.Block) (err return } if count >= 0 { - if err = cb.Put(ckey, key); err != nil { + if err = cb.Put(ckey, int32ToBytes(h)); err != nil { return } } @@ -441,7 +409,25 @@ func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *ct.Block) (err } err = hb.Put(b.BlockHash()[:], int32ToBytes(h)) return - }) + }); err != nil { + return + } + + // save ack + for i, q := range b.Acks { + if err = s.addAck(dbID, h, int32(i), q); err != nil { + return + } + } + + // save queries + for i, q := range b.QueryTxs { + if err = s.addQueryTracker(dbID, h, int32(i), q); err != nil { + return + } + } + + return } func (s *Service) stop() (err error) { @@ -483,11 +469,11 @@ func (s *Service) minerRequest(dbID proto.DatabaseID, method string, request int return s.caller.CallNode(instance.Peers.Leader, method, request, response) } -func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *wt.ServiceInstance, err error) { +func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *types.ServiceInstance, err error) { log.WithField("db", dbID).Info("get peers info for database") if iInstance, exists := s.upstreamServers.Load(dbID); exists { - instance = iInstance.(*wt.ServiceInstance) + instance = iInstance.(*types.ServiceInstance) return } @@ -501,12 +487,12 @@ func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *wt.ServiceInstan return } - req := &bp.GetDatabaseRequest{} + req := &types.GetDatabaseRequest{} req.Header.DatabaseID = dbID if err = req.Sign(privateKey); err != nil { return } - resp := &bp.GetDatabaseResponse{} + resp := &types.GetDatabaseResponse{} // get peers list from block producer if err = s.caller.CallNode(curBP, route.BPDBGetDatabase.String(), req, resp); err != nil { return @@ -521,81 +507,167 @@ func (s *Service) getUpstream(dbID proto.DatabaseID) (instance *wt.ServiceInstan return } -func (s *Service) getAck(dbID proto.DatabaseID, h *hash.Hash) (ack *wt.SignedAckHeader, err error) { - err = s.db.View(func(tx *bolt.Tx) error { +func (s *Service) getAck(dbID proto.DatabaseID, h *hash.Hash) (ack *types.SignedAckHeader, err error) { + var ( + blockHeight int32 + dataOffset int32 + ) + + if err = s.db.View(func(tx *bolt.Tx) (err error) { bucket := tx.Bucket(ackBucket).Bucket([]byte(dbID)) if bucket == nil { return ErrNotFound } - ackBytes := bucket.Get(h.CloneBytes()) + ackBytes := bucket.Get(h.AsBytes()) if ackBytes == nil { return ErrNotFound } - return utils.DecodeMsgPack(ackBytes, &ack) - }) + // get block height and object offset in block + if len(ackBytes) != 8 { + // invalid data payload + return ErrInconsistentData + } + + blockHeight = bytesToInt32(ackBytes[:4]) + dataOffset = bytesToInt32(ackBytes[4:]) + + return + }); err != nil { + return + } + + // get data from block + var b *types.Block + if _, b, err = s.getBlockByHeight(dbID, blockHeight); err != nil { + return + } + + if dataOffset < 0 || int32(len(b.Acks)) <= dataOffset { + err = ErrInconsistentData + return + } + + ack = b.Acks[int(dataOffset)] + + // verify hash + ackHash := ack.Hash() + if !ackHash.IsEqual(h) { + err = ErrInconsistentData + } return } -func (s *Service) getRequest(dbID proto.DatabaseID, h *hash.Hash) (request *wt.Request, err error) { - err = s.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(logOffsetBucket).Bucket([]byte(dbID)) +func (s *Service) getRequest(dbID proto.DatabaseID, h *hash.Hash) (request *types.Request, err error) { + var ( + blockHeight int32 + dataOffset int32 + ) + if err = s.db.View(func(tx *bolt.Tx) (err error) { + bucket := tx.Bucket(requestBucket).Bucket([]byte(dbID)) if bucket == nil { return ErrNotFound } - reqKey := bucket.Get(h.CloneBytes()) - if reqKey == nil { + reqBytes := bucket.Get(h.AsBytes()) + if reqBytes == nil { return ErrNotFound } - reqKey = append([]byte{}, reqKey...) - reqKey = append(reqKey, h.CloneBytes()...) - - bucket = tx.Bucket(requestBucket).Bucket([]byte(dbID)) - if bucket == nil { - return ErrNotFound + // get block height and object offset in block + if len(reqBytes) != 8 { + // invalid data payload + return ErrInconsistentData } - reqBytes := bucket.Get(reqKey) - if reqBytes == nil { - return ErrNotFound - } + blockHeight = bytesToInt32(reqBytes[:4]) + dataOffset = bytesToInt32(reqBytes[4:]) - return utils.DecodeMsgPack(reqBytes, &request) - }) + return + }); err != nil { + return + } + + // get data from block + var b *types.Block + if _, b, err = s.getBlockByHeight(dbID, blockHeight); err != nil { + return + } + + if dataOffset < 0 || int32(len(b.QueryTxs)) <= dataOffset { + err = ErrInconsistentData + return + } + + request = b.QueryTxs[int(dataOffset)].Request + + // verify hash + reqHash := request.Header.Hash() + if !reqHash.IsEqual(h) { + err = ErrInconsistentData + } return } -func (s *Service) getRequestByOffset(dbID proto.DatabaseID, offset uint64) (request *wt.Request, err error) { - err = s.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(requestBucket).Bucket([]byte(dbID)) +func (s *Service) getResponseHeader(dbID proto.DatabaseID, h *hash.Hash) (response *types.SignedResponseHeader, err error) { + var ( + blockHeight int32 + dataOffset int32 + ) + if err = s.db.View(func(tx *bolt.Tx) (err error) { + bucket := tx.Bucket(requestBucket).Bucket([]byte(dbID)) if bucket == nil { return ErrNotFound } - keyPrefix := offsetToBytes(offset) - cur := bucket.Cursor() + respBytes := bucket.Get(h.AsBytes()) + if respBytes == nil { + return ErrNotFound + } - for k, v := cur.Seek(keyPrefix); k != nil && bytes.HasPrefix(k, keyPrefix); k, v = cur.Next() { - if v != nil { - return utils.DecodeMsgPack(v, &request) - } + // get block height and object offset in block + if len(respBytes) != 8 { + // invalid data payload + return ErrInconsistentData } - return ErrNotFound - }) + blockHeight = bytesToInt32(respBytes[:4]) + dataOffset = bytesToInt32(respBytes[4:]) + + return + }); err != nil { + return + } + + // get data from block + var b *types.Block + if _, b, err = s.getBlockByHeight(dbID, blockHeight); err != nil { + return + } + + if dataOffset < 0 || int32(len(b.QueryTxs)) <= dataOffset { + err = ErrInconsistentData + return + } + + response = b.QueryTxs[int(dataOffset)].Response + + // verify hash + respHash := response.Hash() + if !respHash.IsEqual(h) { + err = ErrInconsistentData + } return } -func (s *Service) getHighestBlock(dbID proto.DatabaseID) (height int32, b *ct.Block, err error) { +func (s *Service) getHighestBlock(dbID proto.DatabaseID) (height int32, b *types.Block, err error) { err = s.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket(blockBucket).Bucket([]byte(dbID)) @@ -617,7 +689,7 @@ func (s *Service) getHighestBlock(dbID proto.DatabaseID) (height int32, b *ct.Bl } func (s *Service) getHighestBlockV2( - dbID proto.DatabaseID) (count, height int32, b *ct.Block, err error, + dbID proto.DatabaseID) (count, height int32, b *types.Block, err error, ) { err = s.db.View(func(tx *bolt.Tx) (err error) { var ( @@ -651,7 +723,7 @@ func (s *Service) getHighestBlockV2( return } -func (s *Service) getBlockByHeight(dbID proto.DatabaseID, height int32) (b *ct.Block, err error) { +func (s *Service) getBlockByHeight(dbID proto.DatabaseID, height int32) (count int32, b *types.Block, err error) { err = s.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket(blockBucket).Bucket([]byte(dbID)) @@ -664,6 +736,10 @@ func (s *Service) getBlockByHeight(dbID proto.DatabaseID, height int32) (b *ct.B cur := bucket.Cursor() for k, v := cur.Seek(keyPrefix); k != nil && bytes.HasPrefix(k, keyPrefix); k, v = cur.Next() { if v != nil { + if len(k) < 4+hash.HashSize+4 { + return ErrInconsistentData + } + count = bytesToInt32(k[4+hash.HashSize:]) return utils.DecodeMsgPack(v, &b) } } @@ -675,7 +751,7 @@ func (s *Service) getBlockByHeight(dbID proto.DatabaseID, height int32) (b *ct.B } func (s *Service) getBlockByCount( - dbID proto.DatabaseID, count int32) (height int32, b *ct.Block, err error, + dbID proto.DatabaseID, count int32) (height int32, b *types.Block, err error, ) { err = s.db.View(func(tx *bolt.Tx) (err error) { var ( @@ -709,7 +785,7 @@ func (s *Service) getBlockByCount( return } -func (s *Service) getBlock(dbID proto.DatabaseID, h *hash.Hash) (height int32, b *ct.Block, err error) { +func (s *Service) getBlock(dbID proto.DatabaseID, h *hash.Hash) (count int32, height int32, b *types.Block, err error) { err = s.db.View(func(tx *bolt.Tx) error { bucket := tx.Bucket(blockHeightBucket).Bucket([]byte(dbID)) @@ -717,37 +793,45 @@ func (s *Service) getBlock(dbID proto.DatabaseID, h *hash.Hash) (height int32, b return ErrNotFound } - blockKey := bucket.Get(h.CloneBytes()) - if blockKey == nil { + blockKeyPrefix := bucket.Get(h.AsBytes()) + if blockKeyPrefix == nil { return ErrNotFound } - blockKey = append([]byte{}, blockKey...) - blockKey = append(blockKey, h.CloneBytes()...) + blockKeyPrefix = append([]byte{}, blockKeyPrefix...) + blockKeyPrefix = append(blockKeyPrefix, h.AsBytes()...) bucket = tx.Bucket(blockBucket).Bucket([]byte(dbID)) if bucket == nil { return ErrNotFound } - blockBytes := bucket.Get(blockKey) + var ( + blockKey []byte + blockBytes []byte + ) + + cur := bucket.Cursor() + for blockKey, blockBytes = cur.Seek(blockKeyPrefix); blockKey != nil && bytes.HasPrefix(blockKey, blockKeyPrefix); blockKey, blockBytes = cur.Next() { + if blockBytes != nil { + break + } + } + if blockBytes == nil { return ErrNotFound } - return utils.DecodeMsgPack(blockBytes, &b) - }) - - if err == nil { - // compute height - var instance *wt.ServiceInstance - instance, err = s.getUpstream(dbID) - if err != nil { - return + // decode count from block key + if len(blockKey) < 4+hash.HashSize+4 { + return ErrInconsistentData } - height = int32(b.Timestamp().Sub(instance.GenesisBlock.Timestamp()) / blockProducePeriod) - } + height = bytesToInt32(blockKey[:4]) + count = bytesToInt32(blockKey[4+hash.HashSize:]) + + return utils.DecodeMsgPack(blockBytes, &b) + }) return } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 5ec683a4c..91791193e 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -29,9 +29,6 @@ import ( "strconv" "strings" - "github.com/CovenantSQL/CovenantSQL/client" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/go-sqlite3-encrypt" "github.com/xo/dburl" "github.com/xo/usql/drivers" @@ -39,6 +36,10 @@ import ( "github.com/xo/usql/handler" "github.com/xo/usql/rline" "github.com/xo/usql/text" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) const name = "cql" @@ -220,8 +221,8 @@ func main() { return } - log.Infof("stable coin balance is: %#v", stableCoinBalance) - log.Infof("covenant coin balance is: %#v", covenantCoinBalance) + log.Infof("stable coin balance is: %d", stableCoinBalance) + log.Infof("covenant coin balance is: %d", covenantCoinBalance) return } diff --git a/cmd/cqld/adapter.go b/cmd/cqld/adapter.go index 41427b8b9..eec701a13 100644 --- a/cmd/cqld/adapter.go +++ b/cmd/cqld/adapter.go @@ -29,10 +29,10 @@ import ( kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" + "github.com/CovenantSQL/CovenantSQL/storage" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/pkg/errors" ) @@ -182,7 +182,7 @@ func (s *LocalStorage) compileLog(payload *KayakPayload) (result *compiledLog, e nodeToSet: &nodeToSet, } case CmdSetDatabase: - var instance wt.ServiceInstance + var instance types.ServiceInstance if err = utils.DecodeMsgPack(payload.Data, &instance); err != nil { log.WithError(err).Error("compileLog: unmarshal instance meta failed") return @@ -201,7 +201,7 @@ func (s *LocalStorage) compileLog(payload *KayakPayload) (result *compiledLog, e }, } case CmdDeleteDatabase: - var instance wt.ServiceInstance + var instance types.ServiceInstance if err = utils.DecodeMsgPack(payload.Data, &instance); err != nil { log.WithError(err).Error("compileLog: unmarshal instance id failed") return @@ -294,7 +294,7 @@ func (s *KayakKVServer) Reset() (err error) { } // GetDatabase implements blockproducer.DBMetaPersistence. -func (s *KayakKVServer) GetDatabase(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { +func (s *KayakKVServer) GetDatabase(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { var result [][]interface{} query := "SELECT `meta` FROM `databases` WHERE `id` = ? LIMIT 1" _, _, result, err = s.KVStorage.Query(context.Background(), []storage.Query{ @@ -327,7 +327,7 @@ func (s *KayakKVServer) GetDatabase(dbID proto.DatabaseID) (instance wt.ServiceI } // SetDatabase implements blockproducer.DBMetaPersistence. -func (s *KayakKVServer) SetDatabase(meta wt.ServiceInstance) (err error) { +func (s *KayakKVServer) SetDatabase(meta types.ServiceInstance) (err error) { var metaBuf *bytes.Buffer if metaBuf, err = utils.EncodeMsgPack(meta); err != nil { return @@ -348,7 +348,7 @@ func (s *KayakKVServer) SetDatabase(meta wt.ServiceInstance) (err error) { // DeleteDatabase implements blockproducer.DBMetaPersistence. func (s *KayakKVServer) DeleteDatabase(dbID proto.DatabaseID) (err error) { - meta := wt.ServiceInstance{ + meta := types.ServiceInstance{ DatabaseID: dbID, } @@ -370,7 +370,7 @@ func (s *KayakKVServer) DeleteDatabase(dbID proto.DatabaseID) (err error) { } // GetAllDatabases implements blockproducer.DBMetaPersistence. -func (s *KayakKVServer) GetAllDatabases() (instances []wt.ServiceInstance, err error) { +func (s *KayakKVServer) GetAllDatabases() (instances []types.ServiceInstance, err error) { var result [][]interface{} query := "SELECT `meta` FROM `databases`" _, _, result, err = s.KVStorage.Query(context.Background(), []storage.Query{ @@ -383,14 +383,14 @@ func (s *KayakKVServer) GetAllDatabases() (instances []wt.ServiceInstance, err e return } - instances = make([]wt.ServiceInstance, 0, len(result)) + instances = make([]types.ServiceInstance, 0, len(result)) for _, row := range result { if len(row) <= 0 { continue } - var instance wt.ServiceInstance + var instance types.ServiceInstance var rawInstanceMeta []byte var ok bool if rawInstanceMeta, ok = row[0].([]byte); !ok { diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index b2c052c3d..ef0ed00c5 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -154,8 +154,8 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { server, peers, nodeID, - 2*time.Second, - 900*time.Millisecond, + time.Minute, + 20*time.Second, ) chain, err := bp.NewChain(chainConfig) if err != nil { diff --git a/cmd/hotfix/hash-upgrade/main.go b/cmd/hotfix/hash-upgrade/main.go index 1c534a00d..7e4e89a93 100644 --- a/cmd/hotfix/hash-upgrade/main.go +++ b/cmd/hotfix/hash-upgrade/main.go @@ -31,11 +31,11 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + ct "github.com/CovenantSQL/CovenantSQL/sqlchain/otypes" + "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + wt "github.com/CovenantSQL/CovenantSQL/worker/otypes" ) var ( diff --git a/cmd/hotfix/observer-upgrade/main.go b/cmd/hotfix/observer-upgrade/main.go index 137b8aaf6..c650f306d 100644 --- a/cmd/hotfix/observer-upgrade/main.go +++ b/cmd/hotfix/observer-upgrade/main.go @@ -23,7 +23,7 @@ import ( "os" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + ct "github.com/CovenantSQL/CovenantSQL/sqlchain/otypes" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/coreos/bbolt" diff --git a/crypto/asymmetric/signature.go b/crypto/asymmetric/signature.go index 77ac14580..ec7e59804 100644 --- a/crypto/asymmetric/signature.go +++ b/crypto/asymmetric/signature.go @@ -96,6 +96,9 @@ func (s *Signature) Verify(hash []byte, signee *PublicKey) bool { if BypassSignature { return true } + if signee == nil || s == nil { + return false + } signature := make([]byte, 64) copy(signature, utils.PaddedBigBytes(s.R, 32)) diff --git a/crypto/hash/hash.go b/crypto/hash/hash.go index 8108e9f3e..74b9e49e4 100644 --- a/crypto/hash/hash.go +++ b/crypto/hash/hash.go @@ -48,6 +48,11 @@ func (h Hash) String() string { return hex.EncodeToString(h[:]) } +// AsBytes returns internal bytes of hash. +func (h Hash) AsBytes() []byte { + return h[:] +} + // CloneBytes returns a copy of the bytes which represent the hash as a byte // slice. // diff --git a/crypto/verifier/common.go b/crypto/verifier/common.go new file mode 100644 index 000000000..41fa8ecb4 --- /dev/null +++ b/crypto/verifier/common.go @@ -0,0 +1,83 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package verifier + +import ( + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/pkg/errors" +) + +//go:generate hsp + +// MarshalHasher is the interface implemented by an object that can be stably marshalling hashed. +type MarshalHasher interface { + MarshalHash() ([]byte, error) +} + +// HashSignVerifier is the interface implemented by an object that contains a hash value of an +// MarshalHasher, can be signed by a private key and verified later. +type HashSignVerifier interface { + Hash() hash.Hash + Sign(MarshalHasher, *ca.PrivateKey) error + Verify(MarshalHasher) error +} + +// DefaultHashSignVerifierImpl defines a default implementation of HashSignVerifier. +type DefaultHashSignVerifierImpl struct { + DataHash hash.Hash + Signee *ca.PublicKey + Signature *ca.Signature +} + +// Hash implements HashSignVerifier.Hash. +func (i *DefaultHashSignVerifierImpl) Hash() hash.Hash { + return i.DataHash +} + +// Sign implements HashSignVerifier.Sign. +func (i *DefaultHashSignVerifierImpl) Sign(mh MarshalHasher, signer *ca.PrivateKey) (err error) { + var enc []byte + if enc, err = mh.MarshalHash(); err != nil { + return + } + var h = hash.THashH(enc) + if i.Signature, err = signer.Sign(h[:]); err != nil { + return + } + i.DataHash = h + i.Signee = signer.PubKey() + return +} + +// Verify implements HashSignVerifier.Verify. +func (i *DefaultHashSignVerifierImpl) Verify(mh MarshalHasher) (err error) { + var enc []byte + if enc, err = mh.MarshalHash(); err != nil { + return + } + var h = hash.THashH(enc) + if !i.DataHash.IsEqual(&h) { + err = errors.WithStack(ErrHashValueNotMatch) + return + } + if !i.Signature.Verify(h[:], i.Signee) { + err = errors.WithStack(ErrSignatureNotMatch) + return + } + return +} diff --git a/crypto/verifier/common_gen.go b/crypto/verifier/common_gen.go new file mode 100644 index 000000000..943ac0eda --- /dev/null +++ b/crypto/verifier/common_gen.go @@ -0,0 +1,59 @@ +package verifier + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *DefaultHashSignVerifierImpl) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if z.Signee == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signee.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x83) + if z.Signature == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Signature.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x83) + if oTemp, err := z.DataHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DefaultHashSignVerifierImpl) Msgsize() (s int) { + s = 1 + 7 + if z.Signee == nil { + s += hsp.NilSize + } else { + s += z.Signee.Msgsize() + } + s += 10 + if z.Signature == nil { + s += hsp.NilSize + } else { + s += z.Signature.Msgsize() + } + s += 9 + z.DataHash.Msgsize() + return +} diff --git a/chain/xxx_gen_test_test.go b/crypto/verifier/common_gen_test.go similarity index 66% rename from chain/xxx_gen_test_test.go rename to crypto/verifier/common_gen_test.go index 04c1be79d..4f73911c7 100644 --- a/chain/xxx_gen_test_test.go +++ b/crypto/verifier/common_gen_test.go @@ -1,4 +1,4 @@ -package chain +package verifier // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -9,8 +9,8 @@ import ( "testing" ) -func TestMarshalHashDemoHeader(t *testing.T) { - v := DemoHeader{} +func TestMarshalHashDefaultHashSignVerifierImpl(t *testing.T) { + v := DefaultHashSignVerifierImpl{} binary.Read(rand.Reader, binary.BigEndian, &v) bts1, err := v.MarshalHash() if err != nil { @@ -25,8 +25,8 @@ func TestMarshalHashDemoHeader(t *testing.T) { } } -func BenchmarkMarshalHashDemoHeader(b *testing.B) { - v := DemoHeader{} +func BenchmarkMarshalHashDefaultHashSignVerifierImpl(b *testing.B) { + v := DefaultHashSignVerifierImpl{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -34,8 +34,8 @@ func BenchmarkMarshalHashDemoHeader(b *testing.B) { } } -func BenchmarkAppendMsgDemoHeader(b *testing.B) { - v := DemoHeader{} +func BenchmarkAppendMsgDefaultHashSignVerifierImpl(b *testing.B) { + v := DefaultHashSignVerifierImpl{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalHash() b.SetBytes(int64(len(bts))) diff --git a/crypto/verifier/common_test.go b/crypto/verifier/common_test.go new file mode 100644 index 000000000..953ce61f8 --- /dev/null +++ b/crypto/verifier/common_test.go @@ -0,0 +1,101 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package verifier + +import ( + "math/big" + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +var ( + MockHash = []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } +) + +type MockHeader struct{} + +func (*MockHeader) MarshalHash() ([]byte, error) { + return MockHash, nil +} + +type MockObject struct { + MockHeader + HSV DefaultHashSignVerifierImpl +} + +func (o *MockObject) Sign(signer *asymmetric.PrivateKey) error { + return o.HSV.Sign(&o.MockHeader, signer) +} + +func (o *MockObject) Verify() error { + return o.HSV.Verify(&o.MockHeader) +} + +func TestDefaultHashSignVerifierImpl(t *testing.T) { + Convey("Given a dummy object and a pair of keys", t, func() { + var ( + obj = &MockObject{} + priv, _, err = asymmetric.GenSecp256k1KeyPair() + ) + So(err, ShouldBeNil) + So(priv, ShouldNotBeNil) + Convey("When the object is signed by the key pair", func() { + err = obj.Sign(priv) + So(err, ShouldBeNil) + Convey("The object should be verifiable", func() { + err = obj.Verify() + So(err, ShouldBeNil) + }) + Convey("The object should have data hash", func() { + So(obj.HSV.Hash(), ShouldEqual, hash.THashH(MockHash)) + }) + Convey("When the hash is modified", func() { + obj.HSV.DataHash = hash.Hash{0x0, 0x0, 0x0, 0x1} + Convey("The verifier should return hash value not match error", func() { + err = errors.Cause(obj.Verify()) + So(err, ShouldEqual, ErrHashValueNotMatch) + }) + }) + Convey("When the signee is modified", func() { + var _, pub, err = asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + obj.HSV.Signee = pub + Convey("The verifier should return signature not match error", func() { + err = errors.Cause(obj.Verify()) + So(err, ShouldEqual, ErrSignatureNotMatch) + }) + }) + Convey("When the signature is modified", func() { + var val = obj.HSV.Signature.R + val.Add(val, big.NewInt(1)) + Convey("The verifier should return signature not match error", func() { + err = errors.Cause(obj.Verify()) + So(err, ShouldEqual, ErrSignatureNotMatch) + }) + }) + }) + }) +} diff --git a/proto/errors.go b/crypto/verifier/errors.go similarity index 66% rename from proto/errors.go rename to crypto/verifier/errors.go index cee9c752c..57da3f1f0 100644 --- a/proto/errors.go +++ b/crypto/verifier/errors.go @@ -14,14 +14,13 @@ * limitations under the License. */ -package proto +package verifier -import "github.com/pkg/errors" +import "errors" var ( - // ErrHashVerification indicates a failed hash verification. - ErrHashVerification = errors.New("hash verification failed") - - // ErrSignVerification indicates a failed signature verification. - ErrSignVerification = errors.New("signature verification failed") + // ErrHashValueNotMatch indicates the hash value not match error from verifier. + ErrHashValueNotMatch = errors.New("hash value not match") + // ErrSignatureNotMatch indicates the signature not match error from verifier. + ErrSignatureNotMatch = errors.New("signature not match") ) diff --git a/common/doc.go b/kayak/doc.go similarity index 86% rename from common/doc.go rename to kayak/doc.go index c42b8932e..ea76a4746 100644 --- a/common/doc.go +++ b/kayak/doc.go @@ -14,5 +14,5 @@ * limitations under the License. */ -// Package common defines some common types which are used by multiple modules. -package common +// Package kayak implements a configurable consistency consensus middleware. +package kayak diff --git a/kayak/runtime.go b/kayak/runtime.go index c160b1b16..8c763ceda 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -126,8 +126,18 @@ type commitResult struct { // NewRuntime creates new kayak Runtime. func NewRuntime(cfg *kt.RuntimeConfig) (rt *Runtime, err error) { + if cfg == nil { + err = errors.Wrap(kt.ErrInvalidConfig, "nil config") + return + } + peers := cfg.Peers + if peers == nil { + err = errors.Wrap(kt.ErrInvalidConfig, "nil peers") + return + } + // verify peers if err = peers.Verify(); err != nil { err = errors.Wrap(err, "verify peers during kayak init failed") @@ -235,9 +245,6 @@ func (r *Runtime) Shutdown() (err error) { // Apply defines entry for Leader node. func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{}, logIndex uint64, err error) { - r.peersLock.RLock() - defer r.peersLock.RUnlock() - var commitFuture <-chan *commitResult var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmCommitEnqueue, tmLeaderRollback, @@ -280,9 +287,12 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ } else if !tmRollback.Before(tmStart) { fields["t"] = tmRollback.Sub(tmStart).Nanoseconds() } - log.WithFields(fields).Info("kayak leader apply") + log.WithFields(fields).WithError(err).Info("kayak leader apply") }() + r.peersLock.RLock() + defer r.peersLock.RUnlock() + if r.role != proto.Leader { // not leader err = kt.ErrNotLeader @@ -312,6 +322,10 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ return } + // Leader pending map handling. + r.markPendingPrepare(prepareLog.Index) + defer r.markPrepareFinished(prepareLog.Index) + tmLeaderPrepare = time.Now() // send prepare to all nodes @@ -374,7 +388,8 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ ROLLBACK: // rollback local var rollbackLog *kt.Log - if rollbackLog, err = r.leaderLogRollback(prepareLog.Index); err != nil { + var logErr error + if rollbackLog, logErr = r.leaderLogRollback(prepareLog.Index); logErr != nil { // serve error, construct rollback log failed, internal error // TODO(): CHANGE LEADER return @@ -397,6 +412,16 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { return } + var tmStart, tmEnd time.Time + + defer func() { + log.WithFields(log.Fields{ + "t": l.Type.String(), + "i": l.Index, + "c": tmEnd.Sub(tmStart).Nanoseconds(), + }).WithError(err).Info("kayak follower apply") + }() + r.peersLock.RLock() defer r.peersLock.RUnlock() @@ -497,6 +522,8 @@ func (r *Runtime) followerRollback(l *kt.Log) (err error) { err = errors.Wrap(err, "write follower rollback log failed") } + r.markPrepareFinished(l.Index) + return } @@ -519,6 +546,8 @@ func (r *Runtime) followerCommit(l *kt.Log) (err error) { err = cResult.err } + r.markPrepareFinished(l.Index) + return } @@ -659,10 +688,8 @@ func (r *Runtime) leaderDoCommit(req *commitReq) (dbCost time.Duration, tracker result, err = r.sh.Commit(req.data) dbCost = time.Now().Sub(tmStartDB) - if err == nil { - // mark last commit - atomic.StoreUint64(&r.lastCommit, l.Index) - } + // mark last commit + atomic.StoreUint64(&r.lastCommit, l.Index) // send commit tracker = r.rpc(l, r.minCommitFollowers) @@ -699,9 +726,8 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { // do commit, not wrapping underlying handler commit error _, err = r.sh.Commit(req.data) - if err == nil { - atomic.StoreUint64(&r.lastCommit, req.log.Index) - } + // mark last commit + atomic.StoreUint64(&r.lastCommit, req.log.Index) req.result <- &commitResult{err: err} @@ -757,7 +783,7 @@ func (r *Runtime) readLogs() (err error) { for { if l, err = r.wal.Read(); err != nil && err != io.EOF { err = errors.Wrap(err, "load previous logs in wal failed") - break + return } else if err == io.EOF { err = nil break @@ -773,16 +799,16 @@ func (r *Runtime) readLogs() (err error) { var prepareLog *kt.Log if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") - break + return } if lastCommit != r.lastCommit { err = errors.Wrapf(err, "last commit record in wal mismatched (expected: %v, actual: %v)", r.lastCommit, lastCommit) - break + return } if !r.pendingPrepares[prepareLog.Index] { err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") - break + return } r.lastCommit = l.Index // resolve previous prepared @@ -790,18 +816,20 @@ func (r *Runtime) readLogs() (err error) { case kt.LogRollback: var prepareLog *kt.Log if _, prepareLog, err = r.getPrepareLog(l); err != nil { - err = errors.Wrap(err, "previous prepare doe snot exists, node need full recovery") + err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") return } if !r.pendingPrepares[prepareLog.Index] { err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") - break + return } // resolve previous prepared delete(r.pendingPrepares, prepareLog.Index) + case kt.LogBarrier: + case kt.LogNoop: default: err = errors.Wrapf(kt.ErrInvalidLog, "invalid log type: %v", l.Type) - break + return } // record nextIndex @@ -842,11 +870,11 @@ func (r *Runtime) markPrepareFinished(index uint64) { } func (r *Runtime) errorSummary(errs map[proto.NodeID]error) error { - failNodes := make([]proto.NodeID, 0, len(errs)) + failNodes := make(map[proto.NodeID]error) for s, err := range errs { if err != nil { - failNodes = append(failNodes, s) + failNodes[s] = err } } @@ -880,16 +908,6 @@ func (r *Runtime) getCaller(id proto.NodeID) Caller { return rawCaller.(Caller) } -// SetCaller injects caller for test purpose. -func (r *Runtime) SetCaller(id proto.NodeID, c Caller) { - r.callerMap.Store(id, c) -} - -// RemoveCaller removes cached caller. -func (r *Runtime) RemoveCaller(id proto.NodeID) { - r.callerMap.Delete(id) -} - func (r *Runtime) goFunc(f func()) { r.wg.Add(1) go func() { diff --git a/kayak/runtime_inject_test.go b/kayak/runtime_inject_test.go new file mode 100644 index 000000000..1f729770e --- /dev/null +++ b/kayak/runtime_inject_test.go @@ -0,0 +1,24 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import "github.com/CovenantSQL/CovenantSQL/proto" + +// SetCaller injects caller for test purpose. +func (r *Runtime) SetCaller(id proto.NodeID, c Caller) { + r.callerMap.Store(id, c) +} diff --git a/kayak/test/runtime_test.go b/kayak/runtime_test.go similarity index 50% rename from kayak/test/runtime_test.go rename to kayak/runtime_test.go index 5d86b43ee..101883bab 100644 --- a/kayak/test/runtime_test.go +++ b/kayak/runtime_test.go @@ -14,12 +14,13 @@ * limitations under the License. */ -package test +package kayak_test import ( "bytes" "context" "database/sql" + "encoding/binary" "fmt" "math/rand" "net" @@ -34,7 +35,7 @@ import ( kt "github.com/CovenantSQL/CovenantSQL/kayak/types" kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" + "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/jordwest/mock-conn" @@ -190,7 +191,313 @@ func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err return client.Call(method, req, resp) } -func BenchmarkNewRuntime(b *testing.B) { +func TestRuntime(t *testing.T) { + Convey("runtime test", t, func(c C) { + lvl := log.GetLevel() + log.SetLevel(log.FatalLevel) + defer log.SetLevel(lvl) + db1, err := newSQLiteStorage("test1.db") + So(err, ShouldBeNil) + defer func() { + db1.Close() + os.Remove("test1.db") + }() + db2, err := newSQLiteStorage("test2.db") + So(err, ShouldBeNil) + defer func() { + db2.Close() + os.Remove("test2.db") + }() + + node1 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + node2 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") + + peers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Leader: node1, + Servers: []proto.NodeID{ + node1, + node2, + }, + }, + } + + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + err = peers.Sign(privKey) + So(err, ShouldBeNil) + + wal1 := kl.NewMemWal() + defer wal1.Close() + cfg1 := &kt.RuntimeConfig{ + Handler: db1, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: wal1, + NodeID: node1, + ServiceName: "Test", + MethodName: "Call", + } + rt1, err := kayak.NewRuntime(cfg1) + So(err, ShouldBeNil) + + wal2 := kl.NewMemWal() + defer wal2.Close() + cfg2 := &kt.RuntimeConfig{ + Handler: db2, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: wal2, + NodeID: node2, + ServiceName: "Test", + MethodName: "Call", + } + rt2, err := kayak.NewRuntime(cfg2) + So(err, ShouldBeNil) + + m := newFakeMux() + fs1 := newFakeService(rt1) + m.register(node1, fs1) + fs2 := newFakeService(rt2) + m.register(node2, fs2) + + rt1.SetCaller(node2, newFakeCaller(m, node2)) + rt2.SetCaller(node1, newFakeCaller(m, node1)) + + err = rt1.Start() + So(err, ShouldBeNil) + defer rt1.Shutdown() + + err = rt2.Start() + So(err, ShouldBeNil) + defer rt2.Shutdown() + + q1 := &queryStructure{ + Queries: []storage.Query{ + {Pattern: "CREATE TABLE IF NOT EXISTS test (t1 text, t2 text, t3 text)"}, + }, + } + So(err, ShouldBeNil) + + r1 := RandStringRunes(333) + r2 := RandStringRunes(333) + r3 := RandStringRunes(333) + + q2 := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, + }, + }, + } + + rt1.Apply(context.Background(), q1) + rt2.Apply(context.Background(), q2) + rt1.Apply(context.Background(), q2) + db1.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT * FROM test"}, + }) + + var count uint64 + atomic.StoreUint64(&count, 1) + + for i := 0; i != 1000; i++ { + atomic.AddUint64(&count, 1) + q := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, + }, + }, + } + + _, _, err = rt1.Apply(context.Background(), q) + So(err, ShouldBeNil) + } + + // test rollback + q := &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INVALID QUERY", + }, + }, + } + _, _, err = rt1.Apply(context.Background(), q) + So(err, ShouldNotBeNil) + + // test timeout + q = &queryStructure{ + Queries: []storage.Query{ + { + Pattern: "INSERT INTO test (t1, t2, t3) VALUES(?, ?, ?)", + Args: []sql.NamedArg{ + sql.Named("", r1), + sql.Named("", r2), + sql.Named("", r3), + }, + }, + }, + } + cancelCtx, cancelCtxFunc := context.WithCancel(context.Background()) + cancelCtxFunc() + _, _, err = rt1.Apply(cancelCtx, q) + So(err, ShouldNotBeNil) + + total := atomic.LoadUint64(&count) + _, _, d1, _ := db1.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT COUNT(1) FROM test"}, + }) + So(d1, ShouldHaveLength, 1) + So(d1[0], ShouldHaveLength, 1) + So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) + + _, _, d2, _ := db2.Query(context.Background(), []storage.Query{ + {Pattern: "SELECT COUNT(1) FROM test"}, + }) + So(d2, ShouldHaveLength, 1) + So(d2[0], ShouldHaveLength, 1) + So(fmt.Sprint(d2[0][0]), ShouldResemble, fmt.Sprint(total)) + }) + Convey("trivial cases", t, func() { + node1 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + node2 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") + node3 := proto.NodeID("000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8") + + peers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Leader: node1, + Servers: []proto.NodeID{ + node1, + node2, + }, + }, + } + + _, err := kayak.NewRuntime(nil) + So(err, ShouldNotBeNil) + _, err = kayak.NewRuntime(&kt.RuntimeConfig{}) + So(err, ShouldNotBeNil) + _, err = kayak.NewRuntime(&kt.RuntimeConfig{ + Peers: peers, + }) + So(err, ShouldNotBeNil) + + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + err = peers.Sign(privKey) + So(err, ShouldBeNil) + + _, err = kayak.NewRuntime(&kt.RuntimeConfig{ + Peers: peers, + NodeID: node3, + }) + So(err, ShouldNotBeNil) + }) + Convey("test log loading", t, func() { + w, err := kl.NewLevelDBWal("testLoad.db") + defer os.RemoveAll("testLoad.db") + So(err, ShouldBeNil) + err = w.Write(&kt.Log{ + LogHeader: kt.LogHeader{ + Index: 0, + Type: kt.LogPrepare, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), + }, + Data: []byte("happy1"), + }) + So(err, ShouldBeNil) + err = w.Write(&kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Type: kt.LogPrepare, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), + }, + Data: []byte("happy1"), + }) + So(err, ShouldBeNil) + data := make([]byte, 16) + binary.BigEndian.PutUint64(data, 0) // prepare log index + binary.BigEndian.PutUint64(data, 0) // last commit index + err = w.Write(&kt.Log{ + LogHeader: kt.LogHeader{ + Index: 2, + Type: kt.LogCommit, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), + }, + Data: data, + }) + So(err, ShouldBeNil) + data = make([]byte, 8) + binary.BigEndian.PutUint64(data, 1) // prepare log index + err = w.Write(&kt.Log{ + LogHeader: kt.LogHeader{ + Index: 3, + Type: kt.LogRollback, + Producer: proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000"), + }, + Data: data, + }) + So(err, ShouldBeNil) + w.Close() + + w, err = kl.NewLevelDBWal("testLoad.db") + So(err, ShouldBeNil) + defer w.Close() + + node1 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + peers := &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Leader: node1, + Servers: []proto.NodeID{node1}, + }, + } + + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + err = peers.Sign(privKey) + So(err, ShouldBeNil) + + cfg := &kt.RuntimeConfig{ + Handler: nil, + PrepareThreshold: 1.0, + CommitThreshold: 1.0, + PrepareTimeout: time.Second, + CommitTimeout: 10 * time.Second, + Peers: peers, + Wal: w, + NodeID: node1, + ServiceName: "Test", + MethodName: "Call", + } + rt, err := kayak.NewRuntime(cfg) + So(err, ShouldBeNil) + + So(rt.Start(), ShouldBeNil) + So(func() { rt.Start() }, ShouldNotPanic) + + So(rt.Shutdown(), ShouldBeNil) + So(func() { rt.Shutdown() }, ShouldNotPanic) + }) +} + +func BenchmarkRuntime(b *testing.B) { Convey("runtime test", b, func(c C) { log.SetLevel(log.DebugLevel) f, err := os.OpenFile("test.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) @@ -230,6 +537,7 @@ func BenchmarkNewRuntime(b *testing.B) { So(err, ShouldBeNil) wal1 := kl.NewMemWal() + defer wal1.Close() cfg1 := &kt.RuntimeConfig{ Handler: db1, PrepareThreshold: 1.0, @@ -246,6 +554,7 @@ func BenchmarkNewRuntime(b *testing.B) { So(err, ShouldBeNil) wal2 := kl.NewMemWal() + defer wal2.Close() cfg2 := &kt.RuntimeConfig{ Handler: db2, PrepareThreshold: 1.0, diff --git a/kayak/tracker.go b/kayak/tracker.go index 7eef7ed6a..986a9198b 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -90,6 +90,7 @@ func (t *rpcTracker) send() { func (t *rpcTracker) callSingle(idx int) { err := t.r.getCaller(t.nodes[idx]).Call(t.method, t.req, nil) + defer t.wg.Done() t.errLock.Lock() defer t.errLock.Unlock() t.errors[t.nodes[idx]] = err diff --git a/kayak/tracker_test.go b/kayak/tracker_test.go new file mode 100644 index 000000000..6ffccce6b --- /dev/null +++ b/kayak/tracker_test.go @@ -0,0 +1,103 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +type fakeTrackerCaller struct { + c C +} + +func (c *fakeTrackerCaller) Call(method string, req interface{}, resp interface{}) (err error) { + time.Sleep(time.Millisecond * 500) + c.c.So(method, ShouldEqual, "test") + if req != 1 { + err = errors.New("invalid result") + } + return +} + +func TestTracker(t *testing.T) { + Convey("test tracker", t, func(c C) { + nodeID1 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") + nodeID2 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") + r := &Runtime{ + rpcMethod: "test", + followers: []proto.NodeID{ + nodeID1, + nodeID2, + }, + } + r.SetCaller(nodeID1, &fakeTrackerCaller{c: c}) + r.SetCaller(nodeID2, &fakeTrackerCaller{c: c}) + t1 := newTracker(r, 1, 0) + t1.send() + _, meets, _ := t1.get(context.Background()) + So(meets, ShouldBeTrue) + + t2 := newTracker(r, 1, 1) + t2.send() + r2, meets, _ := t2.get(context.Background()) + So(r2, ShouldNotBeEmpty) + So(meets, ShouldBeTrue) + + t3 := newTracker(r, 1, 1) + t3.send() + ctx1, cancelCtx1 := context.WithTimeout(context.Background(), time.Millisecond*1) + defer cancelCtx1() + r3, meets, finished := t3.get(ctx1) + So(r3, ShouldBeEmpty) + So(meets, ShouldBeFalse) + So(finished, ShouldBeFalse) + + r3, meets, finished = t3.get(context.Background()) + So(r3, ShouldNotBeEmpty) + So(meets, ShouldBeTrue) + + t4 := newTracker(r, 1, 2) + t4.send() + r4, meets, finished := t4.get(context.Background()) + So(r4, ShouldHaveLength, 2) + So(meets, ShouldBeTrue) + So(finished, ShouldBeTrue) + + t5 := newTracker(r, 2, 2) + t5.send() + ctx2, cancelCtx2 := context.WithTimeout(context.Background(), time.Millisecond*1) + defer cancelCtx2() + r5, meets, finished := t5.get(ctx2) + So(r5, ShouldBeEmpty) + So(meets, ShouldBeFalse) + So(finished, ShouldBeFalse) + + r5, meets, finished = t5.get(context.Background()) + So(r5, ShouldHaveLength, 2) + So(meets, ShouldBeTrue) + So(finished, ShouldBeTrue) + + t5.close() + So(t5.closed, ShouldEqual, 1) + }) +} diff --git a/kayak/types/doc.go b/kayak/types/doc.go index 28c02e239..ffdd029c8 100644 --- a/kayak/types/doc.go +++ b/kayak/types/doc.go @@ -14,4 +14,5 @@ * limitations under the License. */ +// Package types defines required types of kayak. package types diff --git a/kayak/types/errors.go b/kayak/types/errors.go index baa5b824b..6ceab9c9d 100644 --- a/kayak/types/errors.go +++ b/kayak/types/errors.go @@ -33,4 +33,6 @@ var ( ErrNotInPeer = errors.New("node not in peer") // ErrNeedRecovery represents current follower node needs recovery, back-off is required by leader. ErrNeedRecovery = errors.New("need recovery") + // ErrInvalidConfig represents invalid kayak runtime config. + ErrInvalidConfig = errors.New("invalid runtime config") ) diff --git a/kayak/types/log.go b/kayak/types/log.go index a69ce2ef5..38238f72f 100644 --- a/kayak/types/log.go +++ b/kayak/types/log.go @@ -30,6 +30,8 @@ const ( LogRollback // LogCommit defines the commit phase of a commit. LogCommit + // LogCheckpoint defines the checkpoint log (created/virtually created by block production or log truncation). + LogCheckpoint // LogBarrier defines barrier log, all open windows should be waiting this operations to complete. LogBarrier // LogNoop defines noop log. @@ -44,12 +46,14 @@ func (t LogType) String() (s string) { return "LogRollback" case LogCommit: return "LogCommit" + case LogCheckpoint: + return "LogCheckpoint" case LogBarrier: return "LogBarrier" case LogNoop: return "LogNoop" default: - return + return "Unknown" } } diff --git a/kayak/types/log_test.go b/kayak/types/log_test.go new file mode 100644 index 000000000..c737e9b91 --- /dev/null +++ b/kayak/types/log_test.go @@ -0,0 +1,31 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestLogType_String(t *testing.T) { + Convey("test log string function", t, func() { + for i := LogPrepare; i <= LogNoop+1; i++ { + So(i.String(), ShouldNotBeEmpty) + } + }) +} diff --git a/kayak/wal/doc.go b/kayak/wal/doc.go index 953b5f875..31e6ee6b4 100644 --- a/kayak/wal/doc.go +++ b/kayak/wal/doc.go @@ -14,4 +14,5 @@ * limitations under the License. */ +// Package wal defines toy implementations of kayak wal. package wal diff --git a/kayak/wal/leveldb_wal.go b/kayak/wal/leveldb_wal.go index 813b8c02d..8382744de 100644 --- a/kayak/wal/leveldb_wal.go +++ b/kayak/wal/leveldb_wal.go @@ -20,7 +20,6 @@ import ( "bytes" "encoding/binary" "io" - "sort" "sync" "sync/atomic" @@ -37,20 +36,15 @@ var ( logHeaderKeyPrefix = []byte{'L', 'H'} // logDataKeyPrefix defines the leveldb data key prefix. logDataKeyPrefix = []byte{'L', 'D'} - // baseIndexKey defines the base index key. - baseIndexKey = []byte{'B', 'I'} ) // LevelDBWal defines a toy wal using leveldb as storage. type LevelDBWal struct { - db *leveldb.DB - it iterator.Iterator - base uint64 - closed uint32 - readLock sync.Mutex - read uint32 - pending []uint64 - pendingLock sync.Mutex + db *leveldb.DB + it iterator.Iterator + closed uint32 + readLock sync.Mutex + read uint32 } // NewLevelDBWal returns new leveldb wal instance. @@ -61,15 +55,6 @@ func NewLevelDBWal(filename string) (p *LevelDBWal, err error) { return } - // load current base - var baseValue []byte - if baseValue, err = p.db.Get(baseIndexKey, nil); err == nil { - // decode base - p.base = p.bytesToUint64(baseValue) - } else { - err = nil - } - return } @@ -80,17 +65,14 @@ func (p *LevelDBWal) Write(l *kt.Log) (err error) { return } + // mark wal as already read + atomic.CompareAndSwapUint32(&p.read, 0, 1) + if l == nil { err = ErrInvalidLog return } - if l.Index < p.base { - // already exists - err = ErrAlreadyExists - return - } - // build header headerKey headerKey := append(append([]byte(nil), logHeaderKeyPrefix...), p.uint64ToBytes(l.Index)...) @@ -130,13 +112,16 @@ func (p *LevelDBWal) Write(l *kt.Log) (err error) { return } - p.updatePending(l.Index) - return } // Read implements Wal.Read. func (p *LevelDBWal) Read() (l *kt.Log, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return + } + if atomic.LoadUint32(&p.read) == 1 { err = io.EOF return @@ -154,10 +139,6 @@ func (p *LevelDBWal) Read() (l *kt.Log, err error) { if p.it.Next() { // load l, err = p.load(p.it.Value()) - // update base and pending - if err == nil { - p.updatePending(l.Index) - } return } @@ -209,32 +190,15 @@ func (p *LevelDBWal) Close() { } } -func (p *LevelDBWal) updatePending(index uint64) { - p.pendingLock.Lock() - defer p.pendingLock.Unlock() - - if atomic.CompareAndSwapUint64(&p.base, index, index+1) { - // process pending - for len(p.pending) > 0 { - if !atomic.CompareAndSwapUint64(&p.base, p.pending[0], p.pending[0]+1) { - break - } - p.pending = p.pending[1:] - } - - // commit base index to database - _ = p.db.Put(baseIndexKey, p.uint64ToBytes(atomic.LoadUint64(&p.base)), nil) - } else { - i := sort.Search(len(p.pending), func(i int) bool { - return p.pending[i] >= index - }) - - if len(p.pending) == i || p.pending[i] != index { - p.pending = append(p.pending, 0) - copy(p.pending[i+1:], p.pending[i:]) - p.pending[i] = index - } +// GetDB returns the leveldb for storage extensions. +func (p *LevelDBWal) GetDB() (d *leveldb.DB, err error) { + if atomic.LoadUint32(&p.closed) == 1 { + err = ErrWalClosed + return } + + d = p.db + return } func (p *LevelDBWal) load(logHeader []byte) (l *kt.Log, err error) { @@ -266,7 +230,3 @@ func (p *LevelDBWal) uint64ToBytes(o uint64) (res []byte) { binary.BigEndian.PutUint64(res, o) return } - -func (p *LevelDBWal) bytesToUint64(b []byte) uint64 { - return binary.BigEndian.Uint64(b) -} diff --git a/kayak/wal/leveldb_wal_test.go b/kayak/wal/leveldb_wal_test.go index db21e5aca..25b005be5 100644 --- a/kayak/wal/leveldb_wal_test.go +++ b/kayak/wal/leveldb_wal_test.go @@ -17,6 +17,7 @@ package wal import ( + "io" "os" "testing" @@ -26,15 +27,17 @@ import ( ) func TestLevelDBWal_Write(t *testing.T) { - Convey("test leveldb wal write", t, func() { + Convey("wal write/get/close", t, func() { + dbFile := "testWrite.ldb" + var p *LevelDBWal var err error - p, err = NewLevelDBWal("testWrite.ldb") + p, err = NewLevelDBWal(dbFile) So(err, ShouldBeNil) - defer func() { - p.Close() - os.RemoveAll("testWrite.ldb") - }() + defer os.RemoveAll(dbFile) + + err = p.Write(nil) + So(err, ShouldNotBeNil) l1 := &kt.Log{ LogHeader: kt.LogHeader{ @@ -56,6 +59,9 @@ func TestLevelDBWal_Write(t *testing.T) { So(err, ShouldBeNil) So(l, ShouldResemble, l1) + _, err = p.Get(10000) + So(err, ShouldNotBeNil) + // test consecutive writes l2 := &kt.Log{ LogHeader: kt.LogHeader{ @@ -87,5 +93,54 @@ func TestLevelDBWal_Write(t *testing.T) { } err = p.Write(l3) So(err, ShouldBeNil) + + _, err = p.Read() + So(err, ShouldEqual, io.EOF) + + p.Close() + + _, err = p.Read() + So(err, ShouldEqual, ErrWalClosed) + + err = p.Write(l1) + So(err, ShouldEqual, ErrWalClosed) + + _, err = p.Get(l1.Index) + So(err, ShouldEqual, ErrWalClosed) + + // load again + p, err = NewLevelDBWal(dbFile) + So(err, ShouldBeNil) + + for i := 0; i != 4; i++ { + l, err = p.Read() + So(err, ShouldBeNil) + So(l.Index, ShouldEqual, i) + } + + _, err = p.Read() + So(err, ShouldEqual, io.EOF) + + p.Close() + + // load again + p, err = NewLevelDBWal(dbFile) + So(err, ShouldBeNil) + + // not complete read + for i := 0; i != 3; i++ { + l, err = p.Read() + So(err, ShouldBeNil) + So(l.Index, ShouldEqual, i) + } + + p.Close() + + // close multiple times + So(p.Close, ShouldNotPanic) + }) + Convey("open failed test", t, func() { + _, err := NewLevelDBWal("") + So(err, ShouldNotBeNil) }) } diff --git a/kayak/wal/mem_wal.go b/kayak/wal/mem_wal.go index 0b2e7d2ed..a1e63d6aa 100644 --- a/kayak/wal/mem_wal.go +++ b/kayak/wal/mem_wal.go @@ -79,23 +79,7 @@ func (p *MemWal) Read() (l *kt.Log, err error) { return } - p.RLock() - defer p.RUnlock() - - if atomic.LoadUint64(&p.offset) >= uint64(len(p.logs)) { - err = io.EOF - return - } - - index := atomic.AddUint64(&p.offset, 1) - 1 - if index >= uint64(len(p.logs)) { - // error - err = io.EOF - return - } - - l = p.logs[index] - + err = io.EOF return } diff --git a/kayak/wal/mem_wal_test.go b/kayak/wal/mem_wal_test.go index 1a539ccb0..c79db6959 100644 --- a/kayak/wal/mem_wal_test.go +++ b/kayak/wal/mem_wal_test.go @@ -17,6 +17,7 @@ package wal import ( + "io" "sync" "testing" @@ -53,6 +54,9 @@ func TestMemWal_Write(t *testing.T) { So(err, ShouldBeNil) So(l, ShouldResemble, l1) + _, err = p.Get(10000) + So(err, ShouldNotBeNil) + // test consecutive writes l2 := &kt.Log{ LogHeader: kt.LogHeader{ @@ -93,6 +97,19 @@ func TestMemWal_Write(t *testing.T) { So(p.revIndex, ShouldHaveLength, 4) So(p.revIndex[l3.Index], ShouldEqual, 3) So(p.offset, ShouldEqual, 4) + + _, err = p.Read() + So(err, ShouldEqual, io.EOF) + + p.Close() + _, err = p.Read() + So(err, ShouldEqual, ErrWalClosed) + + _, err = p.Get(1) + So(err, ShouldEqual, ErrWalClosed) + + err = p.Write(l1) + So(err, ShouldEqual, ErrWalClosed) }) } diff --git a/metric/rpc.go b/metric/rpc.go index 232854826..57ee7eb2e 100644 --- a/metric/rpc.go +++ b/metric/rpc.go @@ -147,6 +147,6 @@ func (cc *CollectClient) UploadMetrics(BPNodeID proto.NodeID) (err error) { if err != nil { log.Errorf("calling RPC %s failed: %s", reqType, err) } - log.Infof("resp %s: %v", reqType, resp) + log.Debugf("resp %s: %v", reqType, resp) return } diff --git a/proto/servers.go b/proto/servers.go index 27d935fee..fc61c2cd4 100644 --- a/proto/servers.go +++ b/proto/servers.go @@ -18,7 +18,7 @@ package proto import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" ) //go:generate hsp @@ -34,57 +34,27 @@ type PeersHeader struct { // Peers defines the peers configuration. type Peers struct { PeersHeader - Hash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Clone makes a deep copy of Peers. func (p *Peers) Clone() (copy Peers) { copy.Version = p.Version + copy.Term = p.Term copy.Leader = p.Leader copy.Servers = append(copy.Servers, p.Servers...) - copy.Signee = p.Signee - copy.Signature = p.Signature + copy.DefaultHashSignVerifierImpl = p.DefaultHashSignVerifierImpl return } // Sign generates signature. func (p *Peers) Sign(signer *asymmetric.PrivateKey) (err error) { - var enc []byte - if enc, err = p.PeersHeader.MarshalHash(); err != nil { - return - } - - var h = hash.THashH(enc) - if p.Signature, err = signer.Sign(h[:]); err != nil { - return - } - - p.Hash = h - p.Signee = signer.PubKey() - return + return p.DefaultHashSignVerifierImpl.Sign(&p.PeersHeader, signer) } // Verify verify signature. func (p *Peers) Verify() (err error) { - var enc []byte - if enc, err = p.PeersHeader.MarshalHash(); err != nil { - return - } - - var h = hash.THashH(enc) - if !p.Hash.IsEqual(&h) { - err = ErrHashVerification - return - } - - if !p.Signature.Verify(h[:], p.Signee) { - err = ErrSignVerification - return - } - - return + return p.DefaultHashSignVerifierImpl.Verify(&p.PeersHeader) } // Find finds the index of the server with the specified key in the server list. diff --git a/proto/servers_gen.go b/proto/servers_gen.go index 7357a4dfd..8041564e7 100644 --- a/proto/servers_gen.go +++ b/proto/servers_gen.go @@ -10,35 +10,15 @@ import ( func (z *Peers) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) + // map header, size 2 + o = append(o, 0x82, 0x82) if oTemp, err := z.PeersHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Hash.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -48,19 +28,7 @@ func (z *Peers) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Peers) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 12 + z.PeersHeader.Msgsize() + 5 + z.Hash.Msgsize() + s = 1 + 12 + z.PeersHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/proto/servers_test.go b/proto/servers_test.go new file mode 100644 index 000000000..1260b6227 --- /dev/null +++ b/proto/servers_test.go @@ -0,0 +1,77 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package proto + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" + . "github.com/smartystreets/goconvey/convey" +) + +func TestPeers(t *testing.T) { + Convey("test peers", t, func() { + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + p := &Peers{ + PeersHeader: PeersHeader{ + Term: 1, + Leader: NodeID("00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9"), + Servers: []NodeID{ + NodeID("00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9"), + NodeID("00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35"), + }, + }, + } + err = p.Sign(privKey) + So(err, ShouldBeNil) + err = p.Verify() + So(err, ShouldBeNil) + + // after encode/decode + buf, err := utils.EncodeMsgPack(p) + var peers *Peers + err = utils.DecodeMsgPack(buf.Bytes(), &peers) + So(err, ShouldBeNil) + err = peers.Verify() + So(err, ShouldBeNil) + + peers2 := peers.Clone() + err = peers2.Verify() + So(err, ShouldBeNil) + + i, found := peers.Find(NodeID("00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35")) + So(i, ShouldEqual, 1) + So(found, ShouldBeTrue) + + i, found = peers.Find(NodeID("0000000000000000000000000000000000000000000000000000000000000001")) + So(found, ShouldBeFalse) + + // verify hash failed + peers.Term = 2 + err = peers.Verify() + So(err, ShouldNotBeNil) + err = peers.Sign(privKey) + So(err, ShouldBeNil) + + // verify failed + p.Signature = peers.Signature + err = p.Verify() + So(err, ShouldNotBeNil) + }) +} diff --git a/route/acl.go b/route/acl.go index 25fdf7494..9664f7e3c 100644 --- a/route/acl.go +++ b/route/acl.go @@ -77,8 +77,6 @@ const ( DBSAck // DBSDeploy is used by BP to create/drop/update database DBSDeploy - // DBSGetRequest is used by observer to view original request - DBSGetRequest // DBCCall is used by Miner for data consistency DBCCall // BPDBCreateDatabase is used by client to create database @@ -93,14 +91,10 @@ const ( SQLCAdviseNewBlock // SQLCAdviseBinLog is usd by sqlchain to advise binlog between adjacent node SQLCAdviseBinLog - // SQLCAdviseResponsedQuery is used by sqlchain to advice response query between adjacent node - SQLCAdviseResponsedQuery - // SQLCAdviseAckedQuery is used by sqlchain to advise response ack between adjacent node + // SQLCAdviseAckedQuery is used by sqlchain to advice response query between adjacent node SQLCAdviseAckedQuery // SQLCFetchBlock is used by sqlchain to fetch block from adjacent nodes SQLCFetchBlock - // SQLCFetchAckedQuery is used by sqlchain to fetch response ack from adjacent nodes - SQLCFetchAckedQuery // SQLCSignBilling is used by sqlchain to response billing signature for periodic billing request SQLCSignBilling // SQLCLaunchBilling is used by blockproducer to trigger the billing process in sqlchain @@ -109,8 +103,6 @@ const ( SQLCSubscribeTransactions // SQLCCancelSubscription is used by sqlchain to handle observer subscription cancellation request SQLCCancelSubscription - // OBSAdviseAckedQuery is used by sqlchain to push acked query to observers - OBSAdviseAckedQuery // OBSAdviseNewBlock is used by sqlchain to push new block to observers OBSAdviseNewBlock // MCCAdviseNewBlock is used by block producer to push block to adjacent nodes @@ -167,8 +159,6 @@ func (s RemoteFunc) String() string { return "DBS.Ack" case DBSDeploy: return "DBS.Deploy" - case DBSGetRequest: - return "DBS.GetRequest" case DBCCall: return "DBC.Call" case BPDBCreateDatabase: @@ -183,14 +173,10 @@ func (s RemoteFunc) String() string { return "SQLC.AdviseNewBlock" case SQLCAdviseBinLog: return "SQLC.AdviseBinLog" - case SQLCAdviseResponsedQuery: - return "SQLC.AdviseResponsedQuery" case SQLCAdviseAckedQuery: return "SQLC.AdviseAckedQuery" case SQLCFetchBlock: return "SQLC.FetchBlock" - case SQLCFetchAckedQuery: - return "SQLC.FetchAckedQuery" case SQLCSignBilling: return "SQLC.SignBilling" case SQLCLaunchBilling: @@ -199,8 +185,6 @@ func (s RemoteFunc) String() string { return "SQLC.SubscribeTransactions" case SQLCCancelSubscription: return "SQLC.CancelSubscription" - case OBSAdviseAckedQuery: - return "OBS.AdviseAckedQuery" case OBSAdviseNewBlock: return "OBS.AdviseNewBlock" case MCCAdviseNewBlock: diff --git a/route/service.go b/route/service.go index ad7d34da7..b02725380 100644 --- a/route/service.go +++ b/route/service.go @@ -50,7 +50,7 @@ func NewDHTService(DHTStorePath string, persistImpl consistent.Persistence, init } // Nil RPC does nothing just for probe -func (DHT *DHTService) Nil(req *proto.PingReq, resp *proto.PingResp) (err error) { +func (DHT *DHTService) Nil(req *interface{}, resp *interface{}) (err error) { return } diff --git a/rpc/leak_test.go b/rpc/leak_test.go index d90f4fe83..91b96159c 100644 --- a/rpc/leak_test.go +++ b/rpc/leak_test.go @@ -43,7 +43,8 @@ func TestSessionPool_SessionBroken(t *testing.T) { os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db")) os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db-shm")) os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db-wal")) - os.Remove(FJ(testWorkingDir, "./leak/leader/kayak.db")) + os.Remove(FJ(testWorkingDir, "./leak/kayak.db")) + os.RemoveAll(FJ(testWorkingDir, "./leak/kayak.ldb")) leader, err := utils.RunCommandNB( FJ(baseDir, "./bin/cqld"), diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index cafd60d43..d39b12c86 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -61,12 +61,12 @@ func NewPersistentCaller(target proto.NodeID) *PersistentCaller { } } -func (c *PersistentCaller) initClient(method string) (err error) { +func (c *PersistentCaller) initClient(isAnonymous bool) (err error) { c.Lock() defer c.Unlock() if c.client == nil { var conn net.Conn - conn, err = DialToNode(c.TargetID, c.pool, method == route.DHTPing.String()) + conn, err = DialToNode(c.TargetID, c.pool, isAnonymous) if err != nil { log.WithField("target", c.TargetID).WithError(err).Error("dial to node failed") return @@ -83,28 +83,21 @@ func (c *PersistentCaller) initClient(method string) (err error) { // Call invokes the named function, waits for it to complete, and returns its error status. func (c *PersistentCaller) Call(method string, args interface{}, reply interface{}) (err error) { - err = c.initClient(method) + err = c.initClient(method == route.DHTPing.String()) if err != nil { log.WithError(err).Error("init PersistentCaller client failed") return } err = c.client.Call(method, args, reply) if err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { + if err == io.EOF || + err == io.ErrUnexpectedEOF || + err == io.ErrClosedPipe || + err == rpc.ErrShutdown { // if got EOF, retry once - c.Lock() - c.Close() - c.client = nil - c.Unlock() - err = c.initClient(method) + err = c.Reconnect(method) if err != nil { - log.WithField("rpc", method).WithError(err).Error("second init client for RPC failed") - return - } - err = c.client.Call(method, args, reply) - if err != nil { - log.WithField("rpc", method).WithError(err).Error("second time call RPC failed") - return + log.WithField("rpc", method).WithError(err).Error("reconnect failed") } } log.WithField("rpc", method).WithError(err).Error("call RPC failed") @@ -112,6 +105,20 @@ func (c *PersistentCaller) Call(method string, args interface{}, reply interface return } +// Reconnect tries to rebuild RPC client +func (c *PersistentCaller) Reconnect(method string) (err error) { + c.Lock() + c.Close() + c.client = nil + c.Unlock() + err = c.initClient(method == route.DHTPing.String()) + if err != nil { + log.WithField("rpc", method).WithError(err).Error("second init client for RPC failed") + return + } + return +} + // CloseStream closes the stream and RPC client func (c *PersistentCaller) CloseStream() { if c.client != nil { diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index c3442336c..13c786a28 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -342,20 +342,28 @@ func BenchmarkPersistentCaller_Call(b *testing.B) { client = NewPersistentCaller(conf.GConf.BP.NodeID) b.Run("benchmark Persistent Call Nil", func(b *testing.B) { - var ( - req proto.PingReq - resp proto.PingResp - ) - b.ResetTimer() for i := 0; i < b.N; i++ { - err = client.Call("DHT.Nil", &req, &resp) + err = client.Call("DHT.Nil", nil, nil) if err != nil { b.Error(err) } } }) + b.Run("benchmark Persistent Call parallel Nil", func(b *testing.B) { + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.Call("DHT.Nil", nil, nil) + if err != nil { + b.Error(err) + } + } + }) + }) + req := &proto.FindNeighborReq{ ID: "1234567812345678123456781234567812345678123456781234567812345678", Count: 10, diff --git a/sqlchain/ackindex.go b/sqlchain/ackindex.go new file mode 100644 index 000000000..8e84d8bf5 --- /dev/null +++ b/sqlchain/ackindex.go @@ -0,0 +1,239 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlchain + +import ( + "sync" + "sync/atomic" + + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" +) + +var ( + // Global atomic counters for stats + multiIndexCount int32 + responseCount int32 + ackTrackerCount int32 +) + +type ackTracker struct { + resp *types.SignedResponseHeader + ack *types.SignedAckHeader +} + +type multiAckIndex struct { + sync.RWMutex + ri map[types.QueryKey]*types.SignedResponseHeader // ri is the index of queries without acks + qi map[types.QueryKey]*ackTracker // qi is the index of query trackers +} + +func (i *multiAckIndex) addResponse(resp *types.SignedResponseHeader) (err error) { + var key = resp.ResponseHeader.Request.GetQueryKey() + log.Debugf("Adding key %s <-- resp %s", &key, resp.Hash()) + i.Lock() + defer i.Unlock() + if oresp, ok := i.ri[key]; ok { + if oresp.Hash() != resp.Hash() { + err = errors.Wrapf(ErrResponseSeqNotMatch, "add key %s <-- resp %s", &key, resp.Hash()) + return + } + return + } + i.ri[key] = resp + atomic.AddInt32(&responseCount, 1) + return +} + +func (i *multiAckIndex) register(ack *types.SignedAckHeader) (err error) { + var ( + resp *types.SignedResponseHeader + ok bool + key = ack.SignedRequestHeader().GetQueryKey() + ) + log.Debugf("Registering key %s <-- ack %s", &key, ack.Hash()) + + i.Lock() + defer i.Unlock() + if resp, ok = i.ri[key]; !ok { + err = errors.Wrapf(ErrQueryNotFound, "register key %s <-- ack %s", &key, ack.Hash()) + return + } + delete(i.ri, key) + i.qi[key] = &ackTracker{ + resp: resp, + ack: ack, + } + atomic.AddInt32(&responseCount, -1) + atomic.AddInt32(&ackTrackerCount, 1) + return +} + +func (i *multiAckIndex) remove(ack *types.SignedAckHeader) (err error) { + var key = ack.SignedRequestHeader().GetQueryKey() + log.Debugf("Removing key %s -x- ack %s", &key, ack.Hash()) + i.Lock() + defer i.Unlock() + if _, ok := i.ri[key]; ok { + delete(i.ri, key) + atomic.AddInt32(&responseCount, -1) + return + } + if oack, ok := i.qi[key]; ok { + if oack.ack.Hash() != ack.Hash() { + err = errors.Wrapf( + ErrMultipleAckOfSeqNo, "remove key %s -x- ack %s", &key, ack.Hash()) + return + } + delete(i.qi, key) + atomic.AddInt32(&ackTrackerCount, -1) + return + } + err = errors.Wrapf(ErrQueryNotFound, "remove key %s -x- ack %s", &key, ack.Hash()) + return +} + +func (i *multiAckIndex) acks() (ret []*types.SignedAckHeader) { + i.RLock() + defer i.RUnlock() + for _, v := range i.qi { + ret = append(ret, v.ack) + } + return +} + +func (i *multiAckIndex) expire() { + i.RLock() + defer i.RUnlock() + // TODO(leventeliu): need further processing. + for _, v := range i.ri { + log.WithFields(log.Fields{ + "request_hash": v.Request.Hash(), + "request_time": v.Request.Timestamp, + "request_type": v.Request.QueryType, + "request_node": v.Request.NodeID, + "response_hash": v.Hash(), + "response_node": v.NodeID, + "response_time": v.Timestamp, + }).Warn("Query expires without acknowledgement") + } + for _, v := range i.qi { + log.WithFields(log.Fields{ + "request_hash": v.resp.Request.Hash(), + "request_time": v.resp.Request.Timestamp, + "request_type": v.resp.Request.QueryType, + "request_node": v.resp.Request.NodeID, + "response_hash": v.ack.Response.Hash(), + "response_node": v.ack.Response.NodeID, + "response_time": v.ack.Response.Timestamp, + "ack_hash": v.ack.Hash(), + "ack_node": v.ack.NodeID, + "ack_time": v.ack.Timestamp, + }).Warn("Query expires without block producing") + } +} + +type ackIndex struct { + hi map[int32]*multiAckIndex + + sync.RWMutex + barrier int32 +} + +func newAckIndex() *ackIndex { + return &ackIndex{ + hi: make(map[int32]*multiAckIndex), + } +} + +func (i *ackIndex) load(h int32) (mi *multiAckIndex, err error) { + var ok bool + i.Lock() + defer i.Unlock() + if h < i.barrier { + err = errors.Wrapf(ErrQueryExpired, "loading index at height %d barrier %d", h, i.barrier) + return + } + if mi, ok = i.hi[h]; !ok { + mi = &multiAckIndex{ + ri: make(map[types.QueryKey]*types.SignedResponseHeader), + qi: make(map[types.QueryKey]*ackTracker), + } + i.hi[h] = mi + atomic.AddInt32(&multiIndexCount, 1) + } + return +} + +func (i *ackIndex) advance(h int32) { + var dl []*multiAckIndex + i.Lock() + for x := i.barrier; x < h; x++ { + if mi, ok := i.hi[x]; ok { + dl = append(dl, mi) + } + delete(i.hi, x) + } + i.barrier = h + i.Unlock() + // Record expired and not acknowledged queries + for _, v := range dl { + v.expire() + atomic.AddInt32(&responseCount, int32(-len(v.ri))) + atomic.AddInt32(&ackTrackerCount, int32(-len(v.qi))) + } + atomic.AddInt32(&multiIndexCount, int32(-len(dl))) +} + +func (i *ackIndex) addResponse(h int32, resp *types.SignedResponseHeader) (err error) { + var mi *multiAckIndex + if mi, err = i.load(h); err != nil { + return + } + return mi.addResponse(resp) +} + +func (i *ackIndex) register(h int32, ack *types.SignedAckHeader) (err error) { + var mi *multiAckIndex + if mi, err = i.load(h); err != nil { + return + } + return mi.register(ack) +} + +func (i *ackIndex) remove(h int32, ack *types.SignedAckHeader) (err error) { + var mi *multiAckIndex + if mi, err = i.load(h); err != nil { + return + } + return mi.remove(ack) +} + +func (i *ackIndex) acks(h int32) (ret []*types.SignedAckHeader) { + var b = func() int32 { + i.RLock() + defer i.RUnlock() + return i.barrier + }() + for x := b; x <= h; x++ { + if mi, err := i.load(x); err == nil { + ret = append(ret, mi.acks()...) + } + } + return +} diff --git a/sqlchain/ackindex_test.go b/sqlchain/ackindex_test.go new file mode 100644 index 000000000..77ccd4d18 --- /dev/null +++ b/sqlchain/ackindex_test.go @@ -0,0 +1,60 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlchain + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + . "github.com/smartystreets/goconvey/convey" +) + +func TestAckIndex(t *testing.T) { + Convey("Given a ackIndex instance", t, func() { + var ( + err error + + ai = newAckIndex() + resp = &types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + NodeID: proto.NodeID( + "0000000000000000000000000000000000000000000000000000000000000000"), + ConnectionID: 0, + SeqNo: 0, + }, + }, + }, + } + ack = &types.SignedAckHeader{ + AckHeader: types.AckHeader{ + Response: *resp, + }, + } + ) + Convey("Add response and register ack should return no error", func() { + err = ai.addResponse(0, resp) + So(err, ShouldBeNil) + err = ai.register(0, ack) + So(err, ShouldBeNil) + err = ai.remove(0, ack) + So(err, ShouldBeNil) + }) + }) +} diff --git a/sqlchain/blockindex.go b/sqlchain/blockindex.go index c2333a1e1..04bf7d97f 100644 --- a/sqlchain/blockindex.go +++ b/sqlchain/blockindex.go @@ -21,18 +21,18 @@ import ( "sync" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" ) type blockNode struct { parent *blockNode - block *ct.Block // TODO(leventeliu): cleanup history blocks to release memory. + block *types.Block hash hash.Hash height int32 // height is the chain height of the head count int32 // count counts the blocks (except genesis) at this head } -func newBlockNode(height int32, block *ct.Block, parent *blockNode) *blockNode { +func newBlockNode(height int32, block *types.Block, parent *blockNode) *blockNode { return &blockNode{ hash: *block.BlockHash(), parent: parent, @@ -48,7 +48,7 @@ func newBlockNode(height int32, block *ct.Block, parent *blockNode) *blockNode { } } -func (n *blockNode) initBlockNode(height int32, block *ct.Block, parent *blockNode) { +func (n *blockNode) initBlockNode(height int32, block *types.Block, parent *blockNode) { n.block = block n.hash = *block.BlockHash() n.parent = nil @@ -85,19 +85,14 @@ func (n *blockNode) indexKey() (key []byte) { } type blockIndex struct { - cfg *Config - mu sync.RWMutex index map[hash.Hash]*blockNode } -func newBlockIndex(cfg *Config) (index *blockIndex) { - index = &blockIndex{ - cfg: cfg, +func newBlockIndex() (index *blockIndex) { + return &blockIndex{ index: make(map[hash.Hash]*blockNode), } - - return index } func (i *blockIndex) addBlock(newBlock *blockNode) { diff --git a/sqlchain/blockindex_test.go b/sqlchain/blockindex_test.go index df47a4cdd..e372980c8 100644 --- a/sqlchain/blockindex_test.go +++ b/sqlchain/blockindex_test.go @@ -20,16 +20,16 @@ import ( "testing" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" ) var ( - testBlocks []*ct.Block + testBlocks []*types.Block testBlockNumber = 50 ) func generateTestBlocks() (err error) { - testBlocks = make([]*ct.Block, 0, testBlockNumber) + testBlocks = make([]*types.Block, 0, testBlockNumber) for i, prev := 0, genesisHash; i < testBlockNumber; i++ { b, err := createRandomBlock(prev, false) @@ -108,8 +108,7 @@ func TestInitBlockNode(t *testing.T) { } func TestAncestor(t *testing.T) { - cfg := &Config{} - index := newBlockIndex(cfg) + index := newBlockIndex() parent := (*blockNode)(nil) for h, b := range testBlocks { @@ -142,8 +141,7 @@ func TestAncestor(t *testing.T) { } func TestIndex(t *testing.T) { - cfg := &Config{} - index := newBlockIndex(cfg) + index := newBlockIndex() parent := (*blockNode)(nil) for h, b := range testBlocks { diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 5fd0199ad..a95f81c45 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -21,27 +21,34 @@ import ( "encoding/binary" "fmt" "os" + rt "runtime" "sync" + "sync/atomic" "time" pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + x "github.com/CovenantSQL/CovenantSQL/xenomint" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" "github.com/pkg/errors" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/util" ) +const ( + minBlockCacheTTL = int32(30) +) + var ( metaState = [4]byte{'S', 'T', 'A', 'T'} metaBlockIndex = [4]byte{'B', 'L', 'C', 'K'} @@ -49,13 +56,21 @@ var ( metaResponseIndex = [4]byte{'R', 'E', 'S', 'P'} metaAckIndex = [4]byte{'Q', 'A', 'C', 'K'} leveldbConf = opt.Options{} + + // Atomic counters for stats + cachedBlockCount int32 ) func init() { leveldbConf.BlockSize = 4 * 1024 * 1024 leveldbConf.Compression = opt.SnappyCompression - leveldbConf.WriteBuffer = 64 * 1024 * 1024 - leveldbConf.BlockCacheCapacity = 2 * leveldbConf.WriteBuffer +} + +func statBlock(b *types.Block) { + atomic.AddInt32(&cachedBlockCount, 1) + rt.SetFinalizer(b, func(_ *types.Block) { + atomic.AddInt32(&cachedBlockCount, -1) + }) } // heightToKey converts a height in int32 to a key in bytes. @@ -93,15 +108,16 @@ type Chain struct { // tdb stores ack/request/response tdb *leveldb.DB bi *blockIndex - qi *queryIndex + ai *ackIndex + st *x.State cl *rpc.Caller rt *runtime stopCh chan struct{} - blocks chan *ct.Block + blocks chan *types.Block heights chan int32 - responses chan *wt.ResponseHeader - acks chan *wt.AckHeader + responses chan *types.ResponseHeader + acks chan *types.AckHeader // DBAccount info tokenType pt.TokenType @@ -118,6 +134,11 @@ type Chain struct { replCh chan struct{} // replWg defines the waitGroups for running replications. replWg sync.WaitGroup + + // Cached fileds, may need to renew some of this fields later. + // + // pk is the private key of the local miner. + pk *asymmetric.PrivateKey } // NewChain creates a new sql-chain struct. @@ -125,7 +146,7 @@ func NewChain(c *Config) (chain *Chain, err error) { // TODO(leventeliu): this is a rough solution, you may also want to clean database file and // force rebuilding. var fi os.FileInfo - if fi, err = os.Stat(c.DataFile + "-block-state.ldb"); err == nil && fi.Mode().IsDir() { + if fi, err = os.Stat(c.ChainFilePrefix + "-block-state.ldb"); err == nil && fi.Mode().IsDir() { return LoadChain(c) } @@ -135,7 +156,7 @@ func NewChain(c *Config) (chain *Chain, err error) { } // Open LevelDB for block and state - bdbFile := c.DataFile + "-block-state.ldb" + bdbFile := c.ChainFilePrefix + "-block-state.ldb" bdb, err := leveldb.OpenFile(bdbFile, &leveldbConf) if err != nil { err = errors.Wrapf(err, "open leveldb %s", bdbFile) @@ -145,7 +166,7 @@ func NewChain(c *Config) (chain *Chain, err error) { log.Debugf("Create new chain bdb %s", bdbFile) // Open LevelDB for ack/request/response - tdbFile := c.DataFile + "-ack-req-resp.ldb" + tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" tdb, err := leveldb.OpenFile(tdbFile, &leveldbConf) if err != nil { err = errors.Wrapf(err, "open leveldb %s", tdbFile) @@ -154,19 +175,39 @@ func NewChain(c *Config) (chain *Chain, err error) { log.Debugf("Create new chain tdb %s", tdbFile) + // Open x.State + var ( + strg xi.Storage + state *x.State + ) + if strg, err = xs.NewSqlite(c.DataFile); err != nil { + return + } + if state, err = x.NewState(c.Server, strg); err != nil { + return + } + + // Cache local private key + var pk *asymmetric.PrivateKey + if pk, err = kms.GetLocalPrivateKey(); err != nil { + err = errors.Wrap(err, "failed to cache private key") + return + } + // Create chain state chain = &Chain{ bdb: bdb, tdb: tdb, - bi: newBlockIndex(c), - qi: newQueryIndex(), + bi: newBlockIndex(), + ai: newAckIndex(), + st: state, cl: rpc.NewCaller(), rt: newRunTime(c), stopCh: make(chan struct{}), - blocks: make(chan *ct.Block), + blocks: make(chan *types.Block), heights: make(chan int32, 1), - responses: make(chan *wt.ResponseHeader), - acks: make(chan *wt.AckHeader), + responses: make(chan *types.ResponseHeader), + acks: make(chan *types.AckHeader), tokenType: c.TokenType, gasPrice: c.GasPrice, updatePeriod: c.UpdatePeriod, @@ -175,6 +216,8 @@ func NewChain(c *Config) (chain *Chain, err error) { observers: make(map[proto.NodeID]int32), observerReplicators: make(map[proto.NodeID]*observerReplicator), replCh: make(chan struct{}), + + pk: pk, } if err = chain.pushBlock(c.Genesis); err != nil { @@ -187,7 +230,7 @@ func NewChain(c *Config) (chain *Chain, err error) { // LoadChain loads the chain state from the specified database and rebuilds a memory index. func LoadChain(c *Config) (chain *Chain, err error) { // Open LevelDB for block and state - bdbFile := c.DataFile + "-block-state.ldb" + bdbFile := c.ChainFilePrefix + "-block-state.ldb" bdb, err := leveldb.OpenFile(bdbFile, &leveldbConf) if err != nil { err = errors.Wrapf(err, "open leveldb %s", bdbFile) @@ -195,26 +238,46 @@ func LoadChain(c *Config) (chain *Chain, err error) { } // Open LevelDB for ack/request/response - tdbFile := c.DataFile + "-ack-req-resp.ldb" + tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" tdb, err := leveldb.OpenFile(tdbFile, &leveldbConf) if err != nil { err = errors.Wrapf(err, "open leveldb %s", tdbFile) return } + // Open x.State + var ( + strg xi.Storage + xstate *x.State + ) + if strg, err = xs.NewSqlite(c.DataFile); err != nil { + return + } + if xstate, err = x.NewState(c.Server, strg); err != nil { + return + } + + // Cache local private key + var pk *asymmetric.PrivateKey + if pk, err = kms.GetLocalPrivateKey(); err != nil { + err = errors.Wrap(err, "failed to cache private key") + return + } + // Create chain state chain = &Chain{ bdb: bdb, tdb: tdb, - bi: newBlockIndex(c), - qi: newQueryIndex(), + bi: newBlockIndex(), + ai: newAckIndex(), + st: xstate, cl: rpc.NewCaller(), rt: newRunTime(c), stopCh: make(chan struct{}), - blocks: make(chan *ct.Block), + blocks: make(chan *types.Block), heights: make(chan int32, 1), - responses: make(chan *wt.ResponseHeader), - acks: make(chan *wt.AckHeader), + responses: make(chan *types.ResponseHeader), + acks: make(chan *types.AckHeader), tokenType: c.TokenType, gasPrice: c.GasPrice, updatePeriod: c.UpdatePeriod, @@ -223,6 +286,8 @@ func LoadChain(c *Config) (chain *Chain, err error) { observers: make(map[proto.NodeID]int32), observerReplicators: make(map[proto.NodeID]*observerReplicator), replCh: make(chan struct{}), + + pk: pk, } // Read state struct @@ -242,6 +307,7 @@ func LoadChain(c *Config) (chain *Chain, err error) { // Read blocks and rebuild memory index var ( + id uint64 index int32 last *blockNode blockIter = chain.bdb.NewIterator(util.BytesPrefix(metaBlockIndex[:]), nil) @@ -251,7 +317,7 @@ func LoadChain(c *Config) (chain *Chain, err error) { var ( k = blockIter.Key() v = blockIter.Value() - block = &ct.Block{} + block = &types.Block{} current, parent *blockNode ) @@ -286,6 +352,11 @@ func LoadChain(c *Config) (chain *Chain, err error) { } } + // Update id + if nid, ok := block.CalcNextID(); ok && nid > id { + id = nid + } + current = &blockNode{} current.initBlockNode(chain.rt.getHeightFromTime(block.Timestamp()), block, parent) chain.bi.addBlock(current) @@ -299,6 +370,8 @@ func LoadChain(c *Config) (chain *Chain, err error) { // Set chain state st.node = last chain.rt.setHead(st) + chain.st.InitTx(id) + chain.pruneBlockCache() // Read queries and rebuild memory index respIter := chain.tdb.NewIterator(util.BytesPrefix(metaResponseIndex[:]), nil) @@ -307,20 +380,15 @@ func LoadChain(c *Config) (chain *Chain, err error) { k := respIter.Key() v := respIter.Value() h := keyWithSymbolToHeight(k) - var resp = &wt.SignedResponseHeader{} + var resp = &types.SignedResponseHeader{} if err = utils.DecodeMsgPack(v, resp); err != nil { err = errors.Wrapf(err, "load resp, height %d, index %s", h, string(k)) return } log.WithFields(log.Fields{ "height": h, - "header": resp.Hash.String(), + "header": resp.Hash().String(), }).Debug("Loaded new resp header") - err = chain.qi.addResponse(h, resp) - if err != nil { - err = errors.Wrapf(err, "load resp, height %d, hash %s", h, resp.Hash.String()) - return - } } if err = respIter.Error(); err != nil { err = errors.Wrap(err, "load resp") @@ -333,20 +401,15 @@ func LoadChain(c *Config) (chain *Chain, err error) { k := ackIter.Key() v := ackIter.Value() h := keyWithSymbolToHeight(k) - var ack = &wt.SignedAckHeader{} + var ack = &types.SignedAckHeader{} if err = utils.DecodeMsgPack(v, ack); err != nil { err = errors.Wrapf(err, "load ack, height %d, index %s", h, string(k)) return } log.WithFields(log.Fields{ "height": h, - "header": ack.Hash.String(), + "header": ack.Hash().String(), }).Debug("Loaded new ack header") - err = chain.qi.addAck(h, ack) - if err != nil { - err = errors.Wrapf(err, "load ack, height %d, hash %s", h, ack.Hash.String()) - return - } } if err = respIter.Error(); err != nil { err = errors.Wrap(err, "load ack") @@ -357,7 +420,7 @@ func LoadChain(c *Config) (chain *Chain, err error) { } // pushBlock pushes the signed block header to extend the current main chain. -func (c *Chain) pushBlock(b *ct.Block) (err error) { +func (c *Chain) pushBlock(b *types.Block) (err error) { // Prepare and encode h := c.rt.getHeightFromTime(b.Timestamp()) node := newBlockNode(h, b, c.rt.getHead().node) @@ -396,58 +459,55 @@ func (c *Chain) pushBlock(b *ct.Block) (err error) { } c.rt.setHead(st) c.bi.addBlock(node) - c.qi.setSignedBlock(h, b) + + // Keep track of the queries from the new block + var ierr error + for i, v := range b.QueryTxs { + if ierr = c.addResponse(v.Response); ierr != nil { + log.WithFields(log.Fields{ + "index": i, + "producer": b.Producer(), + "block_hash": b.BlockHash(), + }).WithError(ierr).Warn("Failed to add response to ackIndex") + } + } + for i, v := range b.Acks { + if ierr = c.remove(v); ierr != nil { + log.WithFields(log.Fields{ + "index": i, + "producer": b.Producer(), + "block_hash": b.BlockHash(), + }).WithError(ierr).Warn("Failed to remove Ack from ackIndex") + } + } if err == nil { log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString()[:14], - "time": c.rt.getChainTimeString(), - "block": b.BlockHash().String()[:8], - "producer": b.Producer()[:8], - "querycount": len(b.Queries), - "blocktime": b.Timestamp().Format(time.RFC3339Nano), - "blockheight": c.rt.getHeightFromTime(b.Timestamp()), - "headblock": fmt.Sprintf("%s <- %s", + "peer": c.rt.getPeerInfoString()[:14], + "time": c.rt.getChainTimeString(), + "block": b.BlockHash().String()[:8], + "producer": b.Producer()[:8], + "queryCount": len(b.QueryTxs), + "ackCount": len(b.Acks), + "blockTime": b.Timestamp().Format(time.RFC3339Nano), + "height": c.rt.getHeightFromTime(b.Timestamp()), + "head": fmt.Sprintf("%s <- %s", func() string { if st.node.parent != nil { return st.node.parent.hash.String()[:8] } return "|" }(), st.Head.String()[:8]), - "headheight": c.rt.getHead().Height, + "headHeight": c.rt.getHead().Height, }).Info("Pushed new block") } return } -// pushResponedQuery pushes a responsed, signed and verified query into the chain. -func (c *Chain) pushResponedQuery(resp *wt.SignedResponseHeader) (err error) { - h := c.rt.getHeightFromTime(resp.Request.Timestamp) - k := heightToKey(h) - var enc *bytes.Buffer - - if enc, err = utils.EncodeMsgPack(resp); err != nil { - return - } - - tdbKey := utils.ConcatAll(metaResponseIndex[:], k, resp.Hash[:]) - if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put response %d %s", h, resp.Hash.String()) - return - } - - if err = c.qi.addResponse(h, resp); err != nil { - err = errors.Wrapf(err, "add resp h %d hash %s", h, resp.Hash) - return err - } - - return -} - // pushAckedQuery pushes a acknowledged, signed and verified query into the chain. -func (c *Chain) pushAckedQuery(ack *wt.SignedAckHeader) (err error) { - log.Debugf("push ack %s", ack.Hash.String()) +func (c *Chain) pushAckedQuery(ack *types.SignedAckHeader) (err error) { + log.Debugf("push ack %s", ack.Hash().String()) h := c.rt.getHeightFromTime(ack.SignedResponseHeader().Timestamp) k := heightToKey(h) var enc *bytes.Buffer @@ -456,34 +516,33 @@ func (c *Chain) pushAckedQuery(ack *wt.SignedAckHeader) (err error) { return } - tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.Hash[:]) + tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.Hash().AsBytes()) if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash.String()) + err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash().String()) return } - if err = c.qi.addAck(h, ack); err != nil { - err = errors.Wrapf(err, "add ack h %d hash %s", h, ack.Hash) - return err + if err = c.register(ack); err != nil { + err = errors.Wrapf(err, "register ack %v at height %d", ack.Hash(), h) + return } return } -// produceBlock prepares, signs and advises the pending block to the orther peers. -func (c *Chain) produceBlock(now time.Time) (err error) { - // Retrieve local key pair - priv, err := kms.GetLocalPrivateKey() - - if err != nil { +// produceBlockV2 prepares, signs and advises the pending block to the other peers. +func (c *Chain) produceBlockV2(now time.Time) (err error) { + var ( + frs []*types.Request + qts []*x.QueryTracker + ) + if frs, qts, err = c.st.CommitEx(); err != nil { return } - - // Pack and sign block - block := &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + var block = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: c.rt.getServer(), GenesisHash: c.rt.genesisHash, @@ -491,15 +550,27 @@ func (c *Chain) produceBlock(now time.Time) (err error) { // MerkleRoot: will be set by Block.PackAndSignBlock(PrivateKey) Timestamp: now, }, - // BlockHash/Signee/Signature: will be set by Block.PackAndSignBlock(PrivateKey) }, - Queries: c.qi.markAndCollectUnsignedAcks(c.rt.getNextTurn()), + FailedReqs: frs, + QueryTxs: make([]*types.QueryAsTx, len(qts)), + Acks: c.ai.acks(c.rt.getHeightFromTime(now)), + } + statBlock(block) + for i, v := range qts { + // TODO(leventeliu): maybe block waiting at a ready channel instead? + for !v.Ready() { + time.Sleep(1 * time.Millisecond) + } + block.QueryTxs[i] = &types.QueryAsTx{ + // TODO(leventeliu): add acks for billing. + Request: v.Req, + Response: &v.Resp.Header, + } } - - if err = block.PackAndSignBlock(priv); err != nil { + // Sign block + if err = block.PackAndSignBlock(c.pk); err != nil { return } - // Send to pending list c.blocks <- block log.WithFields(log.Fields{ @@ -509,29 +580,29 @@ func (c *Chain) produceBlock(now time.Time) (err error) { "using_timestamp": now.Format(time.RFC3339Nano), "block_hash": block.BlockHash().String(), }).Debug("Produced new block") - // Advise new block to the other peers - req := &MuxAdviseNewBlockReq{ - Envelope: proto.Envelope{ - // TODO(leventeliu): Add fields. - }, - DatabaseID: c.rt.databaseID, - AdviseNewBlockReq: AdviseNewBlockReq{ - Block: block, - Count: func() int32 { - if nd := c.bi.lookupNode(block.BlockHash()); nd != nil { - return nd.count - } - if pn := c.bi.lookupNode(block.ParentHash()); pn != nil { - return pn.count + 1 - } - return -1 - }(), - }, - } - peers := c.rt.getPeers() - wg := &sync.WaitGroup{} - + var ( + req = &MuxAdviseNewBlockReq{ + Envelope: proto.Envelope{ + // TODO(leventeliu): Add fields. + }, + DatabaseID: c.rt.databaseID, + AdviseNewBlockReq: AdviseNewBlockReq{ + Block: block, + Count: func() int32 { + if nd := c.bi.lookupNode(block.BlockHash()); nd != nil { + return nd.count + } + if pn := c.bi.lookupNode(block.ParentHash()); pn != nil { + return pn.count + 1 + } + return -1 + }(), + }, + } + peers = c.rt.getPeers() + wg = &sync.WaitGroup{} + ) for _, s := range peers.Servers { if s != c.rt.getServer() { wg.Add(1) @@ -552,12 +623,9 @@ func (c *Chain) produceBlock(now time.Time) (err error) { }(s) } } - wg.Wait() - // fire replication to observers c.startStopReplication() - return } @@ -593,6 +661,7 @@ func (c *Chain) syncHead() { }).WithError(err).Debug( "Failed to fetch block from peer") } else { + statBlock(resp.Block) c.blocks <- resp.Block log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), @@ -625,8 +694,10 @@ func (c *Chain) syncHead() { // runCurrentTurn does the check and runs block producing if its my turn. func (c *Chain) runCurrentTurn(now time.Time) { defer func() { + c.stat() + c.pruneBlockCache() c.rt.setNextTurn() - c.qi.advanceBarrier(c.rt.getMinValidHeight()) + c.ai.advance(c.rt.getMinValidHeight()) // Info the block processing goroutine that the chain height has grown, so please return // any stashed blocks for further check. c.heights <- c.rt.getHead().Height @@ -656,7 +727,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { return } - if err := c.produceBlock(now); err != nil { + if err := c.produceBlockV2(now); err != nil { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), @@ -727,7 +798,7 @@ func (c *Chain) sync() (err error) { func (c *Chain) processBlocks() { rsCh := make(chan struct{}) rsWG := &sync.WaitGroup{} - returnStash := func(stash []*ct.Block) { + returnStash := func(stash []*types.Block) { defer rsWG.Done() for _, block := range stash { select { @@ -744,7 +815,7 @@ func (c *Chain) processBlocks() { c.rt.wg.Done() }() - var stash []*ct.Block + var stash []*types.Block for { select { case h := <-c.heights: @@ -854,21 +925,34 @@ func (c *Chain) Stop() (err error) { "time": c.rt.getChainTimeString(), }).Debug("Chain service stopped") // Close LevelDB file - err = c.bdb.Close() + var ierr error + if ierr = c.bdb.Close(); ierr != nil && err == nil { + err = ierr + } + log.WithFields(log.Fields{ + "peer": c.rt.getPeerInfoString(), + "time": c.rt.getChainTimeString(), + }).WithError(ierr).Debug("Chain database closed") + if ierr = c.tdb.Close(); ierr != nil && err == nil { + err = ierr + } log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), - }).Debug("Chain database closed") - err = c.tdb.Close() + }).WithError(ierr).Debug("Chain database closed") + // Close state + if ierr = c.st.Close(false); ierr != nil && err == nil { + err = ierr + } log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), - }).Debug("Chain database closed") + }).WithError(ierr).Debug("Chain state storage closed") return } // FetchBlock fetches the block at specified height from local cache. -func (c *Chain) FetchBlock(height int32) (b *ct.Block, err error) { +func (c *Chain) FetchBlock(height int32) (b *types.Block, err error) { if n := c.rt.getHead().node.ancestor(height); n != nil { k := utils.ConcatAll(metaBlockIndex[:], n.indexKey()) var v []byte @@ -878,7 +962,8 @@ func (c *Chain) FetchBlock(height int32) (b *ct.Block, err error) { return } - b = &ct.Block{} + b = &types.Block{} + statBlock(b) err = utils.DecodeMsgPack(v, b) if err != nil { err = errors.Wrapf(err, "fetch block %s", string(k)) @@ -889,86 +974,8 @@ func (c *Chain) FetchBlock(height int32) (b *ct.Block, err error) { return } -// FetchAckedQuery fetches the acknowledged query from local cache. -func (c *Chain) FetchAckedQuery(height int32, header *hash.Hash) ( - ack *wt.SignedAckHeader, err error, -) { - if ack, err = c.qi.getAck(height, header); err != nil || ack == nil { - for h := height - c.rt.queryTTL - 1; h <= height; h++ { - k := heightToKey(h) - ackKey := utils.ConcatAll(metaAckIndex[:], k, header[:]) - var v []byte - if v, err = c.tdb.Get(ackKey, nil); err != nil { - // if err == leveldb.ErrNotFound, just loop for next h - if err != leveldb.ErrNotFound { - err = errors.Wrapf(err, "fetch ack in height %d hash %s", h, header.String()) - return - } - } else { - var dec = &wt.SignedAckHeader{} - if err = utils.DecodeMsgPack(v, dec); err != nil { - err = errors.Wrapf(err, "fetch ack in height %d hash %s", h, header.String()) - return - } - ack = dec - break - } - } - } - if ack == nil { - err = errors.Wrapf(ErrAckQueryNotFound, "fetch ack not found") - } - return -} - -// syncAckedQuery uses RPC call to synchronize an acknowledged query from a remote node. -func (c *Chain) syncAckedQuery(height int32, header *hash.Hash, id proto.NodeID) ( - ack *wt.SignedAckHeader, err error, -) { - req := &MuxFetchAckedQueryReq{ - Envelope: proto.Envelope{ - // TODO(leventeliu): Add fields. - }, - DatabaseID: c.rt.databaseID, - FetchAckedQueryReq: FetchAckedQueryReq{ - Height: height, - SignedAckedHash: header, - }, - } - resp := &MuxFetchAckedQueryResp{} - - if err = c.cl.CallNode(id, route.SQLCFetchAckedQuery.String(), req, resp); err != nil { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - }).WithError(err).Error( - "Failed to fetch acked query") - return - } - - if err = c.VerifyAndPushAckedQuery(resp.Ack); err != nil { - return - } - - ack = resp.Ack - return -} - -// queryOrSyncAckedQuery tries to query an acknowledged query from local index, and also tries to -// synchronize it from a remote node if not found locally. -func (c *Chain) queryOrSyncAckedQuery(height int32, header *hash.Hash, id proto.NodeID) ( - ack *wt.SignedAckHeader, err error, -) { - if ack, err = c.FetchAckedQuery( - height, header, - ); (err == nil && ack != nil) || id == c.rt.getServer() { - return - } - return c.syncAckedQuery(height, header, id) -} - // CheckAndPushNewBlock implements ChainRPCServer.CheckAndPushNewBlock. -func (c *Chain) CheckAndPushNewBlock(block *ct.Block) (err error) { +func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { height := c.rt.getHeightFromTime(block.Timestamp()) head := c.rt.getHead() peers := c.rt.getPeers() @@ -1032,46 +1039,16 @@ func (c *Chain) CheckAndPushNewBlock(block *ct.Block) (err error) { // ... // } - // Check queries - for _, q := range block.Queries { - var ok bool - - if ok, err = c.qi.checkAckFromBlock(height, block.BlockHash(), q); err != nil { - return - } - - if !ok { - if _, err = c.syncAckedQuery(height, q, block.Producer()); err != nil { - return - } - - if _, err = c.qi.checkAckFromBlock(height, block.BlockHash(), q); err != nil { - return - } - } - } - - return c.pushBlock(block) -} - -// VerifyAndPushResponsedQuery verifies a responsed and signed query, and pushed it if valid. -func (c *Chain) VerifyAndPushResponsedQuery(resp *wt.SignedResponseHeader) (err error) { - // TODO(leventeliu): check resp. - if c.rt.queryTimeIsExpired(resp.Timestamp) { - err = errors.Wrapf(ErrQueryExpired, "Verify response query, min valid height %d, response height %d", c.rt.getMinValidHeight(), c.rt.getHeightFromTime(resp.Timestamp)) - return - } - - if err = resp.Verify(); err != nil { - err = errors.Wrapf(err, "") + // Replicate local state from the new block + if err = c.st.ReplayBlock(block); err != nil { return } - return c.pushResponedQuery(resp) + return c.pushBlock(block) } // VerifyAndPushAckedQuery verifies a acknowledged and signed query, and pushed it if valid. -func (c *Chain) VerifyAndPushAckedQuery(ack *wt.SignedAckHeader) (err error) { +func (c *Chain) VerifyAndPushAckedQuery(ack *types.SignedAckHeader) (err error) { // TODO(leventeliu): check ack. if c.rt.queryTimeIsExpired(ack.SignedResponseHeader().Timestamp) { err = errors.Wrapf(ErrQueryExpired, "Verify ack query, min valid height %d, ack height %d", c.rt.getMinValidHeight(), c.rt.getHeightFromTime(ack.Timestamp)) @@ -1101,8 +1078,7 @@ func (c *Chain) getBilling(low, high int32) (req *pt.BillingRequest, err error) var ( n *blockNode addr proto.AccountAddress - ack *wt.SignedAckHeader - lowBlock, highBlock *ct.Block + lowBlock, highBlock *types.Block billings = make(map[proto.AccountAddress]*proto.AddrAndGas) ) @@ -1114,6 +1090,11 @@ func (c *Chain) getBilling(low, high int32) (req *pt.BillingRequest, err error) } for ; n != nil && n.height >= low; n = n.parent { + // TODO(leventeliu): block maybe released, use persistence version in this case. + if n.block == nil { + continue + } + if lowBlock == nil { lowBlock = n.block } @@ -1135,22 +1116,18 @@ func (c *Chain) getBilling(low, high int32) (req *pt.BillingRequest, err error) } } - for _, v := range n.block.Queries { - if ack, err = c.queryOrSyncAckedQuery(n.height, v, n.block.Producer()); err != nil { - return - } - - if addr, err = crypto.PubKeyHash(ack.SignedResponseHeader().Signee); err != nil { + for _, v := range n.block.Acks { + if addr, err = crypto.PubKeyHash(v.SignedResponseHeader().Signee); err != nil { return } if billing, ok := billings[addr]; ok { - billing.GasAmount += c.rt.price[ack.SignedRequestHeader().QueryType] * - ack.SignedRequestHeader().BatchCount + billing.GasAmount += c.rt.price[v.SignedRequestHeader().QueryType] * + v.SignedRequestHeader().BatchCount } else { billings[addr] = &proto.AddrAndGas{ AccountAddress: addr, - RawNodeID: *ack.SignedResponseHeader().NodeID.ToRawNodeID(), + RawNodeID: *v.SignedResponseHeader().NodeID.ToRawNodeID(), GasAmount: c.rt.producingReward, } } @@ -1202,7 +1179,7 @@ func (c *Chain) collectBillingSignatures(billings *pt.BillingRequest) { go func() { defer proWG.Done() - bpReq := &ct.AdviseBillingReq{ + bpReq := &types.AdviseBillingReq{ Req: billings, } @@ -1314,24 +1291,13 @@ func (c *Chain) SignBilling(req *pt.BillingRequest) ( if err = req.VerifySignatures(); err != nil { return } - if loc, err = c.getBilling(req.Header.LowHeight, req.Header.HighHeight); err != nil { return } - if err = req.Compare(loc); err != nil { return } - - // Sign block with private key - priv, err := kms.GetLocalPrivateKey() - - if err != nil { - return - } - - pub, sig, err = req.SignRequestHeader(priv, false) - + pub, sig, err = req.SignRequestHeader(c.pk, false) return } @@ -1414,3 +1380,84 @@ func (c *Chain) replicationCycle() { } } } + +// Query queries req from local chain state and returns the query results in resp. +func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { + var ref *x.QueryTracker + if ref, resp, err = c.st.Query(req); err != nil { + return + } + if err = resp.Sign(c.pk); err != nil { + return + } + if err = c.addResponse(&resp.Header); err != nil { + return + } + ref.UpdateResp(resp) + return +} + +// Replay replays a write log from other peer to replicate storage state. +func (c *Chain) Replay(req *types.Request, resp *types.Response) (err error) { + switch req.Header.QueryType { + case types.ReadQuery: + return + case types.WriteQuery: + return c.st.Replay(req, resp) + default: + err = ErrInvalidRequest + } + if err = c.addResponse(&resp.Header); err != nil { + return + } + return +} + +func (c *Chain) addResponse(resp *types.SignedResponseHeader) (err error) { + return c.ai.addResponse(c.rt.getHeightFromTime(resp.Request.Timestamp), resp) +} + +func (c *Chain) register(ack *types.SignedAckHeader) (err error) { + return c.ai.register(c.rt.getHeightFromTime(ack.SignedRequestHeader().Timestamp), ack) +} + +func (c *Chain) remove(ack *types.SignedAckHeader) (err error) { + return c.ai.remove(c.rt.getHeightFromTime(ack.SignedRequestHeader().Timestamp), ack) +} + +func (c *Chain) pruneBlockCache() { + var ( + head = c.rt.getHead().node + lastCnt int32 + ) + if head == nil { + return + } + lastCnt = head.count - c.rt.blockCacheTTL + // Move to last count position + for ; head != nil && head.count > lastCnt; head = head.parent { + } + // Prune block references + for ; head != nil && head.block != nil; head = head.parent { + head.block = nil + } +} + +func (c *Chain) stat() { + var ( + ic = atomic.LoadInt32(&multiIndexCount) + rc = atomic.LoadInt32(&responseCount) + tc = atomic.LoadInt32(&ackTrackerCount) + bc = atomic.LoadInt32(&cachedBlockCount) + ) + // Print chain stats + log.WithFields(log.Fields{ + "database_id": c.rt.databaseID, + "multiIndex_count": ic, + "response_header_count": rc, + "query_tracker_count": tc, + "cached_block_count": bc, + }).Info("Chain mem stats") + // Print xeno stats + c.st.Stat(c.rt.databaseID) +} diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 42a78df35..798854417 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -164,15 +164,16 @@ func TestMultiChain(t *testing.T) { // Create chain instance config := &Config{ - DatabaseID: testDatabaseID, - DataFile: dbfile, - Genesis: genesis, - Period: testPeriod, - Tick: testTick, - MuxService: mux, - Server: peers.Servers[i], - Peers: peers, - QueryTTL: testQueryTTL, + DatabaseID: testDatabaseID, + ChainFilePrefix: dbfile, + DataFile: dbfile, + Genesis: genesis, + Period: testPeriod, + Tick: testTick, + MuxService: mux, + Server: peers.Servers[i], + Peers: peers, + QueryTTL: testQueryTTL, } chain, err := NewChain(config) @@ -307,18 +308,9 @@ func TestMultiChain(t *testing.T) { i, c.rt.getPeerInfoString()) continue } - t.Logf("Checking block %v at height %d in peer %s", - node.block.BlockHash(), i, c.rt.getPeerInfoString()) - for _, v := range node.block.Queries { - if ack, err := c.queryOrSyncAckedQuery( - i, v, node.block.Producer(), - ); err != nil && ack == nil { - t.Errorf("Failed to fetch ack %v at height %d in peer %s: %v", - v, i, c.rt.getPeerInfoString(), err) - } else { - t.Logf("Successed to fetch ack %v at height %d in peer %s", - v, i, c.rt.getPeerInfoString()) - } + if node.block != nil { + t.Logf("Checking block %v at height %d in peer %s", + node.block.BlockHash(), i, c.rt.getPeerInfoString()) } } }(v.chain) @@ -355,7 +347,7 @@ func TestMultiChain(t *testing.T) { if err != nil { t.Errorf("Error occurred: %v", err) - } else if err = c.VerifyAndPushResponsedQuery(resp); err != nil { + } else if err = c.addResponse(resp); err != nil { t.Errorf("Error occurred: %v", err) } diff --git a/sqlchain/config.go b/sqlchain/config.go index 012470bc2..113e99718 100644 --- a/sqlchain/config.go +++ b/sqlchain/config.go @@ -19,18 +19,18 @@ package sqlchain import ( "time" - "github.com/CovenantSQL/CovenantSQL/blockproducer/types" + pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" ) // Config represents a sql-chain config. type Config struct { - DatabaseID proto.DatabaseID - DataFile string + DatabaseID proto.DatabaseID + ChainFilePrefix string + DataFile string - Genesis *ct.Block + Genesis *types.Block Period time.Duration Tick time.Duration @@ -39,15 +39,17 @@ type Config struct { Server proto.NodeID // Price sets query price in gases. - Price map[wt.QueryType]uint64 + Price map[types.QueryType]uint64 ProducingReward uint64 BillingPeriods int32 // QueryTTL sets the unacknowledged query TTL in block periods. QueryTTL int32 + BlockCacheTTL int32 + // DBAccount info - TokenType types.TokenType + TokenType pt.TokenType GasPrice uint64 UpdatePeriod uint64 } diff --git a/sqlchain/errors.go b/sqlchain/errors.go index 0cbf1c682..10fa9cde8 100644 --- a/sqlchain/errors.go +++ b/sqlchain/errors.go @@ -68,7 +68,7 @@ var ( // ErrCorruptedIndex indicates that a corrupted index item is detected. ErrCorruptedIndex = errors.New("corrupted index item") - // ErrUnknownMuxRequest indicates that the a multiplexing request endpoint is not found. + // ErrUnknownMuxRequest indicates that the multiplexing request endpoint is not found. ErrUnknownMuxRequest = errors.New("unknown multiplexing request") // ErrUnknownProducer indicates that the block has an unknown producer. @@ -77,8 +77,8 @@ var ( // ErrInvalidProducer indicates that the block has an invalid producer. ErrInvalidProducer = errors.New("invalid block producer") - // ErrUnavailableBillingRang indicates that the billing range is not abailable now. - ErrUnavailableBillingRang = errors.New("unabailable billing range") + // ErrUnavailableBillingRang indicates that the billing range is not available now. + ErrUnavailableBillingRang = errors.New("unavailable billing range") // ErrHashNotMatch indicates that a message hash value doesn't match the original hash value // given in its hash field. @@ -89,4 +89,14 @@ var ( // ErrAckQueryNotFound indicates that an acknowledged query record is not found. ErrAckQueryNotFound = errors.New("acknowledged query not found") + + // ErrQueryNotFound indicates that a query is not found in the index. + ErrQueryNotFound = errors.New("query not found") + + // ErrInvalidRequest indicates the query is invalid. + ErrInvalidRequest = errors.New("invalid request") + + // ErrResponseSeqNotMatch indicates that a response sequence id doesn't match the original one + // in the index. + ErrResponseSeqNotMatch = errors.New("response sequence id doesn't match") ) diff --git a/sqlchain/mux.go b/sqlchain/mux.go index 09a215d81..7dea6dfea 100644 --- a/sqlchain/mux.go +++ b/sqlchain/mux.go @@ -75,20 +75,6 @@ type MuxAdviseBinLogResp struct { AdviseBinLogResp } -// MuxAdviseResponsedQueryReq defines a request of the AdviseAckedQuery RPC method. -type MuxAdviseResponsedQueryReq struct { - proto.Envelope - proto.DatabaseID - AdviseResponsedQueryReq -} - -// MuxAdviseResponsedQueryResp defines a response of the AdviseAckedQuery RPC method. -type MuxAdviseResponsedQueryResp struct { - proto.Envelope - proto.DatabaseID - AdviseResponsedQueryResp -} - // MuxAdviseAckedQueryReq defines a request of the AdviseAckedQuery RPC method. type MuxAdviseAckedQueryReq struct { proto.Envelope @@ -117,20 +103,6 @@ type MuxFetchBlockResp struct { FetchBlockResp } -// MuxFetchAckedQueryReq defines a request of the FetchAckedQuery RPC method. -type MuxFetchAckedQueryReq struct { - proto.Envelope - proto.DatabaseID - FetchAckedQueryReq -} - -// MuxFetchAckedQueryResp defines a request of the FetchAckedQuery RPC method. -type MuxFetchAckedQueryResp struct { - proto.Envelope - proto.DatabaseID - FetchAckedQueryResp -} - // MuxSignBillingReq defines a request of the SignBilling RPC method. type MuxSignBillingReq struct { proto.Envelope @@ -209,19 +181,6 @@ func (s *MuxService) AdviseBinLog(req *MuxAdviseBinLogReq, resp *MuxAdviseBinLog return ErrUnknownMuxRequest } -// AdviseResponsedQuery is the RPC method to advise a new responsed query to the target server. -func (s *MuxService) AdviseResponsedQuery( - req *MuxAdviseResponsedQueryReq, resp *MuxAdviseResponsedQueryResp) error { - if v, ok := s.serviceMap.Load(req.DatabaseID); ok { - resp.Envelope = req.Envelope - resp.DatabaseID = req.DatabaseID - return v.(*ChainRPCService).AdviseResponsedQuery( - &req.AdviseResponsedQueryReq, &resp.AdviseResponsedQueryResp) - } - - return ErrUnknownMuxRequest -} - // AdviseAckedQuery is the RPC method to advise a new acknowledged query to the target server. func (s *MuxService) AdviseAckedQuery( req *MuxAdviseAckedQueryReq, resp *MuxAdviseAckedQueryResp) error { @@ -246,19 +205,6 @@ func (s *MuxService) FetchBlock(req *MuxFetchBlockReq, resp *MuxFetchBlockResp) return ErrUnknownMuxRequest } -// FetchAckedQuery is the RPC method to fetch a known block from the target server. -func (s *MuxService) FetchAckedQuery( - req *MuxFetchAckedQueryReq, resp *MuxFetchAckedQueryResp) (err error) { - if v, ok := s.serviceMap.Load(req.DatabaseID); ok { - resp.Envelope = req.Envelope - resp.DatabaseID = req.DatabaseID - return v.(*ChainRPCService).FetchAckedQuery( - &req.FetchAckedQueryReq, &resp.FetchAckedQueryResp) - } - - return ErrUnknownMuxRequest -} - // SignBilling is the RPC method to get signature for a billing request from the target server. func (s *MuxService) SignBilling(req *MuxSignBillingReq, resp *MuxSignBillingResp) (err error) { if v, ok := s.serviceMap.Load(req.DatabaseID); ok { diff --git a/sqlchain/observer.go b/sqlchain/observer.go index 8b928427c..e1fa19960 100644 --- a/sqlchain/observer.go +++ b/sqlchain/observer.go @@ -21,23 +21,17 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) /* -Observer implements interface like a sqlchain including AdviseNewBlock/AdviseAckedQuery. +Observer implements method AdviseNewBlock to receive blocks from sqlchain node. Request/Response entity from sqlchain api is re-used for simplicity. type Observer interface { AdviseNewBlock(*MuxAdviseNewBlockReq, *MuxAdviseNewBlockResp) error - AdviseAckedQuery(*MuxAdviseAckedQueryReq, *MuxAdviseAckedQueryResp) error } - -The observer could call DBS.GetRequest to fetch original request entity from the DBMS service. -The whole observation of block producing and write query execution would be as follows. -AdviseAckedQuery -> AdviseNewBlock -> GetRequest. */ // observerReplicator defines observer replication state. @@ -93,7 +87,7 @@ func (r *observerReplicator) replicate() { curHeight := r.c.rt.getHead().Height - if r.height == ct.ReplicateFromNewest { + if r.height == types.ReplicateFromNewest { log.WithFields(log.Fields{ "node": r.nodeID, "height": curHeight, @@ -121,7 +115,7 @@ func (r *observerReplicator) replicate() { }).Debug("try replicating block for observer") // replicate one record - var block *ct.Block + var block *types.Block if block, err = r.c.FetchBlock(r.height); err != nil { // fetch block failed log.WithField("height", r.height).WithError(err).Warning("fetch block with height failed") @@ -136,7 +130,7 @@ func (r *observerReplicator) replicate() { // find last available block log.Debug("start block height hole detection") - var lastBlock, nextBlock *ct.Block + var lastBlock, nextBlock *types.Block var lastHeight, nextHeight int32 for h := r.height - 1; h >= 0; h-- { @@ -205,36 +199,6 @@ func (r *observerReplicator) replicate() { }).Debug("finish block height hole detection, skipping") } - // fetch acks in block - for _, h := range block.Queries { - var ack *wt.SignedAckHeader - if ack, err = r.c.queryOrSyncAckedQuery(r.height, h, block.Producer()); err != nil || ack == nil { - log.WithFields(log.Fields{ - "ack": h.String(), - "height": r.height, - }).WithError(err).Warning("fetch ack of block height") - continue - } - - // send advise to this block - req := &MuxAdviseAckedQueryReq{ - Envelope: proto.Envelope{}, - DatabaseID: r.c.rt.databaseID, - AdviseAckedQueryReq: AdviseAckedQueryReq{ - Query: ack, - }, - } - resp := &MuxAdviseAckedQueryResp{} - err = r.c.cl.CallNode(r.nodeID, route.OBSAdviseAckedQuery.String(), req, resp) - if err != nil { - log.WithFields(log.Fields{ - "node": r.nodeID, - "height": r.height, - }).WithError(err).Warning("send ack advise to observer") - return - } - } - // send block req := &MuxAdviseNewBlockReq{ Envelope: proto.Envelope{}, diff --git a/sqlchain/otypes/billing_req.go b/sqlchain/otypes/billing_req.go new file mode 100644 index 000000000..59521a6d4 --- /dev/null +++ b/sqlchain/otypes/billing_req.go @@ -0,0 +1,34 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package otypes + +import ( + pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +// AdviseBillingReq defines a request of the AdviseBillingRequest RPC method. +type AdviseBillingReq struct { + proto.Envelope + Req *pt.BillingRequest +} + +// AdviseBillingResp defines a request of the AdviseBillingRequest RPC method. +type AdviseBillingResp struct { + proto.Envelope + Resp *pt.BillingRequest +} diff --git a/sqlchain/types/block.go b/sqlchain/otypes/block.go similarity index 81% rename from sqlchain/types/block.go rename to sqlchain/otypes/block.go index 22b10167d..9cf4b8ff0 100644 --- a/sqlchain/types/block.go +++ b/sqlchain/otypes/block.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "reflect" @@ -40,24 +40,6 @@ type Header struct { Timestamp time.Time } -//// MarshalHash marshals for hash -//func (h *Header) MarshalHash() ([]byte, error) { -// buffer := bytes.NewBuffer(nil) -// -// if err := utils.WriteElements(buffer, binary.BigEndian, -// h.Version, -// h.Producer, -// &h.GenesisHash, -// &h.ParentHash, -// &h.MerkleRoot, -// h.Timestamp, -// ); err != nil { -// return nil, err -// } -// -// return buffer.Bytes(), nil -//} - // SignedHeader is block header along with its producer signature. type SignedHeader struct { Header @@ -66,27 +48,6 @@ type SignedHeader struct { Signature *asymmetric.Signature } -//// MarshalHash marshals for hash. -//func (s *SignedHeader) MarshalHash() ([]byte, error) { -// buffer := bytes.NewBuffer(nil) -// -// if err := utils.WriteElements(buffer, binary.BigEndian, -// s.Version, -// s.Producer, -// &s.GenesisHash, -// &s.ParentHash, -// &s.MerkleRoot, -// s.Timestamp, -// &s.BlockHash, -// s.Signee, -// s.Signature, -// ); err != nil { -// return nil, err -// } -// -// return buffer.Bytes(), nil -//} - // Verify verifies the signature of the signed header. func (s *SignedHeader) Verify() error { if !s.Signature.Verify(s.BlockHash[:], s.Signee) { @@ -142,20 +103,6 @@ func (b *Block) PackAndSignBlock(signer *asymmetric.PrivateKey) (err error) { return } -//// MarshalHash marshals for hash -//func (b *Block) MarshalHash() ([]byte, error) { -// buffer := bytes.NewBuffer(nil) -// -// if err := utils.WriteElements(buffer, binary.BigEndian, -// &b.SignedHeader, -// b.Queries, -// ); err != nil { -// return nil, err -// } -// -// return buffer.Bytes(), nil -//} - // PushAckedQuery pushes a acknowledged and verified query into the block. func (b *Block) PushAckedQuery(h *hash.Hash) { if b.Queries == nil { diff --git a/sqlchain/types/block_gen.go b/sqlchain/otypes/block_gen.go similarity index 99% rename from sqlchain/types/block_gen.go rename to sqlchain/otypes/block_gen.go index 1d7b3630c..ebd75b2fe 100644 --- a/sqlchain/types/block_gen.go +++ b/sqlchain/otypes/block_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/sqlchain/types/block_gen_test.go b/sqlchain/otypes/block_gen_test.go similarity index 99% rename from sqlchain/types/block_gen_test.go rename to sqlchain/otypes/block_gen_test.go index 8266f1438..7743f3729 100644 --- a/sqlchain/types/block_gen_test.go +++ b/sqlchain/otypes/block_gen_test.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/sqlchain/types/block_test.go b/sqlchain/otypes/block_test.go similarity index 99% rename from sqlchain/types/block_test.go rename to sqlchain/otypes/block_test.go index 2a0f74dee..a3dfdb1fe 100644 --- a/sqlchain/types/block_test.go +++ b/sqlchain/otypes/block_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "bytes" diff --git a/sqlchain/otypes/doc.go b/sqlchain/otypes/doc.go new file mode 100644 index 000000000..00ef1d7d4 --- /dev/null +++ b/sqlchain/otypes/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package otypes defines commonly used types for sql-chain. +package otypes diff --git a/sqlchain/types/errors.go b/sqlchain/otypes/errors.go similarity index 98% rename from sqlchain/types/errors.go rename to sqlchain/otypes/errors.go index 7d856cb81..9a065843d 100644 --- a/sqlchain/types/errors.go +++ b/sqlchain/otypes/errors.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "errors" diff --git a/common/types.go b/sqlchain/otypes/observer.go similarity index 69% rename from common/types.go rename to sqlchain/otypes/observer.go index eebb21745..3eb20dce2 100644 --- a/common/types.go +++ b/sqlchain/otypes/observer.go @@ -14,14 +14,11 @@ * limitations under the License. */ -package common +package otypes const ( - // AddressLength is the fixed length of a CovenantSQL node address. - AddressLength = 64 - // UUIDLength is the fixed length of a UUID. - UUIDLength = 16 + // ReplicateFromBeginning is the replication offset observes from genesis block. + ReplicateFromBeginning = int32(0) + // ReplicateFromNewest is the replication offset observes from block head of current node. + ReplicateFromNewest = int32(-1) ) - -// UUID is a unique identity which may be used as a Raft transaction ID. -type UUID [UUIDLength]byte diff --git a/sqlchain/types/xxx_test.go b/sqlchain/otypes/xxx_test.go similarity index 99% rename from sqlchain/types/xxx_test.go rename to sqlchain/otypes/xxx_test.go index d6545d79d..428be91c5 100644 --- a/sqlchain/types/xxx_test.go +++ b/sqlchain/otypes/xxx_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "io/ioutil" diff --git a/sqlchain/queryindex.go b/sqlchain/queryindex.go deleted file mode 100644 index da30cc84e..000000000 --- a/sqlchain/queryindex.go +++ /dev/null @@ -1,574 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sqlchain - -// TODO(leventeliu): use pooled objects to speed up this index. - -import ( - "sync" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" - "github.com/pkg/errors" -) - -var ( - placeHolder = &hash.Hash{} -) - -// requestTracker defines a tracker of a particular database query request. -// We use it to track and update queries in this index system. -type requestTracker struct { - // TODO(leventeliu): maybe we don't need them to be "signed" here. Given that the response or - // Ack is already verified, simply use Header. - response *wt.SignedResponseHeader - ack *wt.SignedAckHeader - // signedBlock is the hash of the block in the currently best chain which contains this query. - signedBlock *hash.Hash -} - -// queryTracker defines a tracker of a particular database query. It may contain multiple queries -// to differe workers. -type queryTracker struct { - firstAck *requestTracker - queries []*requestTracker -} - -// newQueryTracker returns a new queryTracker reference. -func newQueryTracker() *queryTracker { - return &queryTracker{ - // TODO(leventeliu): set appropriate capacity. - firstAck: nil, - queries: make([]*requestTracker, 0, 10), - } -} - -// updateAck updates the query tracker with a verified SignedAckHeader. -func (s *requestTracker) updateAck(ack *wt.SignedAckHeader) (isNew bool, err error) { - if s.ack == nil { - // A later Ack can overwrite the original Response setting - *s = requestTracker{ - response: ack.SignedResponseHeader(), - ack: ack, - } - - isNew = true - } else if !s.ack.Hash.IsEqual(&ack.Hash) { - // This may happen when a client sends multiple acknowledgements for a same query (same - // response header hash) - err = ErrMultipleAckOfResponse - } // else it's same as s.Ack, let's try not to overwrite it - - return -} - -// hashIndex defines a requestTracker index using hash as key. -type hashIndex map[hash.Hash]*requestTracker - -// seqIndex defines a queryTracker index using sequence number as key. -type seqIndex map[wt.QueryKey]*queryTracker - -// ensure returns the *queryTracker associated with the given key. It creates a new item if the -// key doesn't exist. -func (i seqIndex) ensure(k wt.QueryKey) (v *queryTracker) { - var ok bool - - if v, ok = i[k]; !ok { - v = newQueryTracker() - i[k] = v - } - - return -} - -// multiIndex defines a combination of multiple indexes. -// -// Index layout is as following: -// -// respIndex +----------------+ -// +---------------------------+->| requestTracker | +---------------------------+ -// | ... | | | | +-response |------>| signedresponseheader | -// +--------+ | | | +-ack (nil) | | +-ResponseHeader | -// | hash#1 |-----+ | | +-... | | | +-SignedRequestHeader | -// +--------+ | +----------------+ | | | +-RequestHeader | -// | ... | | | | | | +-... | -// +--------+ +------------------+ | | | | | +-SeqNo: seq#0 | -// | hash#3 |-----+ +->| queryTracker | | | | | | +-... | -// +--------+ | | | +-firstAck (nil) | | | | | +-Hash = hash#0 | -// | ... | | | | +-queries | | | | | +-Signee ====> pubk#0 | -// +--------+ | | | +-[0] |--+ | | | +-Signature => sign#0 | -// | hash#6 |--+ | | | +-... | | | +-... | -// +--------+ | | | +------------------+ | +-Hash = hash#1 | -// | ... | | | | | +-Signee ====> pubk#1 | -// | | | | +-Signature => sign#1 | -// | | | +---------------------------+ -// | | | +----------------+ -// | +-------------+---------+-+--->| requestTracker | -// | | | | | | +-response |----+ +-------------------------------+ -// ackindex | | | | | | +-ack |----|->| SignedAckHeader | -// | | | | | | +-... | | | +-AckHeader | -// | ... | | | | | | +----------------+ +->| | +-SignedResponseHeader | -// +--------+ | | | | | | | | +-ResponseHeader | -// | hash#4 |--|----------------+ | | | | | | +-SignedRequestHeader | -// +--------+ | | | | | | | | | +-RequestHeader | -// | ... | | | | | | | | | | | +-... | -// | | | | | | | | | | +-SeqNo: seq#1 | -// | | | | | | | | | | +-... | -// | | | | | | | | | +-Hash = hash#2 | -// | | | | | | | | | +-Signee ====> pubk#2 | -// | | | | | | | | | +-Signature => sign#2 | -// seqIndex | | | | +----------------+ | | | | +-... | -// +------------------------------+->| requestTracker | | | | +-Hash = hash#3 | -// | ... | | | | | | +-response |---+ | | | +-signee ====> pubk#3 | -// +--------+ | | | | | +-ack (nil) | | | | | +-Signature => sign#3 | -// | seq#0 |--------+ | | | | +-... | | | | +-... | -// +--------+ | | | +----------------+ | | +-Hash = hash#4 | -// | ... | | | | | | +-Signee ====> pubk#2 | -// +--------+ +--------------+ | | | | | +-Signature => sign#4 | -// | seq#1 |---------->| queryTracker | | | | | +-------------------------------+ -// +--------+ | +-firstAck |--+ | | | -// | ... | | +-queries | | | | -// | +-[0] |----+ | | -// | +-[1] |------+ | +---------------------------+ -// | +-... | +-->| SignedResponseHeader | -// +--------------+ | +-ResponseHeader | -// | | +-SignedRequestHeader | -// | | | +-RequestHeader | -// | | | | +-... | -// | | | | +-SeqNo: seq#1 | -// | | | | +-... | -// | | | +-Hash = hash#5 | -// | | | +-Signee ====> pubk#5 | -// | | | +-Signature => sign#5 | -// | | +-... | -// | +-Hash = hash#6 | -// | +-Signee ====> pubk#6 | -// | +-Signature => sign#6 | -// +---------------------------+ -// -type multiIndex struct { - sync.Mutex - respIndex, ackIndex hashIndex - seqIndex -} - -// newMultiIndex returns a new multiIndex reference. -func newMultiIndex() *multiIndex { - return &multiIndex{ - respIndex: make(map[hash.Hash]*requestTracker), - ackIndex: make(map[hash.Hash]*requestTracker), - seqIndex: make(map[wt.QueryKey]*queryTracker), - } -} - -// addResponse adds the responsed query to the index. -func (i *multiIndex) addResponse(resp *wt.SignedResponseHeader) (err error) { - i.Lock() - defer i.Unlock() - - if v, ok := i.respIndex[resp.Hash]; ok { - if v == nil || v.response == nil { - // TODO(leventeliu): consider to panic. - err = ErrCorruptedIndex - return - } - - // Given that `resp` is already verified by user, its header should be deeply equal to - // v.response.ResponseHeader. - // Considering that we may allow a node to update its key pair on-the-fly, just overwrite - // this response. - v.response = resp - return - } - - // Create new item - s := &requestTracker{ - response: resp, - } - - i.respIndex[resp.Hash] = s - q := i.seqIndex.ensure(resp.Request.GetQueryKey()) - q.queries = append(q.queries, s) - - return nil -} - -// addAck adds the acknowledged query to the index. -func (i *multiIndex) addAck(ack *wt.SignedAckHeader) (err error) { - i.Lock() - defer i.Unlock() - var v *requestTracker - var ok bool - q := i.seqIndex.ensure(ack.SignedRequestHeader().GetQueryKey()) - - if v, ok = i.respIndex[ack.ResponseHash()]; ok { - if v == nil || v.response == nil { - // TODO(leventeliu): consider to panic. - err = ErrCorruptedIndex - return - } - - // Add hash -> ack index anyway, so that we can find the request tracker later, even if - // there is a earlier acknowledgement for the same request - i.ackIndex[ack.Hash] = v - - // This also updates the item indexed by ackIndex and seqIndex - var isNew bool - - if isNew, err = v.updateAck(ack); err != nil { - return - } - - if isNew { - q.queries = append(q.queries, v) - } - } else { - // Build new queryTracker and update both indexes - v = &requestTracker{ - response: ack.SignedResponseHeader(), - ack: ack, - } - - i.respIndex[ack.ResponseHash()] = v - i.ackIndex[ack.Hash] = v - q.queries = append(q.queries, v) - } - - // TODO(leventeliu): - // This query has multiple signed acknowledgements. It may be caused by a network problem. - // We will keep the first ack counted anyway. But, should we report it to someone? - if q.firstAck == nil { - q.firstAck = v - } else if !q.firstAck.ack.Hash.IsEqual(&ack.Hash) { - err = ErrMultipleAckOfSeqNo - } - - return -} - -func (i *multiIndex) getAck(header *hash.Hash) (ack *wt.SignedAckHeader, ok bool) { - i.Lock() - defer i.Unlock() - - var t *requestTracker - if t, ok = i.ackIndex[*header]; ok { - ack = t.ack - } - - return -} - -// setSignedBlock sets the signed block of the acknowledged query. -func (i *multiIndex) setSignedBlock(blockHash *hash.Hash, ackHeaderHash *hash.Hash) { - i.Lock() - defer i.Unlock() - - if v, ok := i.ackIndex[*ackHeaderHash]; ok { - v.signedBlock = blockHash - } -} - -// resetSignedBlock resets the signed block of the acknowledged query. -func (i *multiIndex) resetSignedBlock(blockHash *hash.Hash, ackHeaderHash *hash.Hash) { - i.Lock() - defer i.Unlock() - - if v, ok := i.ackIndex[*ackHeaderHash]; ok { - // TODO(leventeliu): check if v.signedBlock equals blockHash. - v.signedBlock = nil - } -} - -// checkBeforeExpire checks the index and does some necessary work before it expires. -func (i *multiIndex) checkBeforeExpire() { - i.Lock() - defer i.Unlock() - - for _, q := range i.seqIndex { - if ack := q.firstAck; ack == nil { - // TODO(leventeliu): - // This query is not acknowledged and expires now. - } else if ack.signedBlock == nil || ack.signedBlock == placeHolder { - // TODO(leventeliu): - // This query was acknowledged normally but collectors didn't pack it in any block. - // There is definitely something wrong with them. - } - - for _, s := range q.queries { - if s != q.firstAck { - // TODO(leventeliu): so these guys lost the competition in this query. Should we - // do something about it? - } - } - } -} - -// checkAckFromBlock checks a acknowledged query from a block in this index. -func (i *multiIndex) checkAckFromBlock(b *hash.Hash, ack *hash.Hash) (isKnown bool, err error) { - i.Lock() - defer i.Unlock() - - // Check acknowledgement - q, isKnown := i.ackIndex[*ack] - - if !isKnown { - return - } - - if q.signedBlock != nil && !q.signedBlock.IsEqual(b) { - err = ErrQuerySignedByAnotherBlock - log.WithFields(log.Fields{ - "query": ack.String(), - "block": b.String(), - "signed_block": q.signedBlock.String(), - }).WithError(err).Error( - "Failed to check acknowledgement from block") - return - } - - qs := i.seqIndex[q.ack.SignedRequestHeader().GetQueryKey()] - - // Check it as a first acknowledgement - if i.respIndex[q.response.Hash] != q || qs == nil || qs.firstAck == nil { - err = ErrCorruptedIndex - return - } - - // If `q` is not considered first acknowledgement of this query locally - if qs.firstAck != q { - if qs.firstAck.signedBlock != nil { - err = ErrQuerySignedByAnotherBlock - log.WithFields(log.Fields{ - "query": ack.String(), - "block": b.String(), - "signed_block": func() string { - if q.signedBlock != nil { - return q.signedBlock.String() - } - return "nil" - }(), - }).WithError(err).Error( - "Failed to check acknowledgement from block") - return - } - - // But if the acknowledgement is not signed yet, it is also acceptable to promote another - // acknowledgement - qs.firstAck = q - } - - return -} - -// markAndCollectUnsignedAcks marks and collects all the unsigned acknowledgements in the index. -func (i *multiIndex) markAndCollectUnsignedAcks(qs *[]*hash.Hash) { - i.Lock() - defer i.Unlock() - - for _, q := range i.seqIndex { - if ack := q.firstAck; ack != nil && ack.signedBlock == nil { - ack.signedBlock = placeHolder - *qs = append(*qs, &ack.ack.Hash) - } - } -} - -// heightIndex defines a MultiIndex index using height as key. -type heightIndex struct { - sync.Mutex - index map[int32]*multiIndex -} - -// ensureHeight returns the *MultiIndex associated with the given height. It creates a new item if -// the key doesn't exist. -func (i *heightIndex) ensureHeight(h int32) (v *multiIndex) { - i.Lock() - defer i.Unlock() - v, ok := i.index[h] - - if !ok { - v = newMultiIndex() - i.index[h] = v - } - - return -} - -// ensureRange creates new *multiIndex items associated within the given height range [l, h) for -// those don't exist. -func (i *heightIndex) ensureRange(l, h int32) { - i.Lock() - defer i.Unlock() - - for x := l; x < h; x++ { - if _, ok := i.index[x]; !ok { - i.index[x] = newMultiIndex() - } - } -} - -func (i *heightIndex) get(k int32) (v *multiIndex, ok bool) { - i.Lock() - defer i.Unlock() - v, ok = i.index[k] - return -} - -func (i *heightIndex) del(k int32) { - i.Lock() - defer i.Unlock() - delete(i.index, k) -} - -// queryIndex defines a query index maintainer. -type queryIndex struct { - heightIndex *heightIndex - - sync.Mutex - barrier int32 -} - -func (i *queryIndex) getBarrier() int32 { - i.Lock() - defer i.Unlock() - return i.barrier -} - -func (i *queryIndex) setBarrier(b int32) { - i.Lock() - defer i.Unlock() - i.barrier = b -} - -// newQueryIndex returns a new queryIndex reference. -func newQueryIndex() *queryIndex { - return &queryIndex{ - heightIndex: &heightIndex{ - index: make(map[int32]*multiIndex), - }, - } -} - -// addResponse adds the responsed query to the index. -func (i *queryIndex) addResponse(h int32, resp *wt.SignedResponseHeader) error { - // TODO(leventeliu): we should ensure that the Request uses coordinated timestamp, instead of - // any client local time. - return i.heightIndex.ensureHeight(h).addResponse(resp) -} - -// addAck adds the acknowledged query to the index. -func (i *queryIndex) addAck(h int32, ack *wt.SignedAckHeader) error { - return i.heightIndex.ensureHeight(h).addAck(ack) -} - -// checkAckFromBlock checks a acknowledged query from a block at the given height. -func (i *queryIndex) checkAckFromBlock(h int32, b *hash.Hash, ack *hash.Hash) ( - isKnown bool, err error) { - l := i.getBarrier() - - if h < l { - err = errors.Wrapf(ErrQueryExpired, "check Ack, height %d, barrier %d", h, l) - return - } - - for x := l; x <= h; x++ { - if hi, ok := i.heightIndex.get(x); ok { - if isKnown, err = hi.checkAckFromBlock(b, ack); err != nil || isKnown { - return - } - } - } - - return -} - -// setSignedBlock updates the signed block in index for the acknowledged queries in the block. -func (i *queryIndex) setSignedBlock(h int32, block *ct.Block) { - b := i.getBarrier() - - for _, v := range block.Queries { - for x := b; x <= h; x++ { - if hi, ok := i.heightIndex.get(x); ok { - hi.setSignedBlock(block.BlockHash(), v) - } - } - } -} - -func (i *queryIndex) resetSignedBlock(h int32, block *ct.Block) { - b := i.getBarrier() - - for _, v := range block.Queries { - for x := b; x <= h; x++ { - if hi, ok := i.heightIndex.get(x); ok { - hi.resetSignedBlock(block.BlockHash(), v) - } - } - } -} - -// getAck gets the acknowledged queries from the index. -func (i *queryIndex) getAck(h int32, header *hash.Hash) (ack *wt.SignedAckHeader, err error) { - b := i.getBarrier() - - if h < b { - err = errors.Wrapf(ErrQueryExpired, "get Ack, height %d, barrier %d", h, b) - return - } - - for x := b; x <= h; x++ { - if hi, ok := i.heightIndex.get(x); ok { - if ack, ok = hi.getAck(header); ok { - return - } - } - } - - err = ErrQueryNotCached - return -} - -// advanceBarrier moves barrier to given height. All buckets lower than this height will be set as -// expired, and all the queries which are not packed in these buckets will be reported. -func (i *queryIndex) advanceBarrier(height int32) { - b := i.getBarrier() - i.setBarrier(height) - - for x := b; x < height; x++ { - if hi, ok := i.heightIndex.get(x); ok { - hi.checkBeforeExpire() - i.heightIndex.del(x) - } - } -} - -// markAndCollectUnsignedAcks marks and collects all the unsigned acknowledgements which can be -// signed by a block at the given height. -func (i *queryIndex) markAndCollectUnsignedAcks(height int32) (qs []*hash.Hash) { - b := i.getBarrier() - qs = make([]*hash.Hash, 0, 1024) - - for x := b; x < height; x++ { - if hi, ok := i.heightIndex.get(x); ok { - hi.markAndCollectUnsignedAcks(&qs) - } - } - - return -} diff --git a/sqlchain/queryindex_test.go b/sqlchain/queryindex_test.go deleted file mode 100644 index 01bb5bc13..000000000 --- a/sqlchain/queryindex_test.go +++ /dev/null @@ -1,394 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sqlchain - -import ( - "math/rand" - "reflect" - "testing" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/pkg/errors" -) - -const ( - testBucketNumber = 10 - testQueryNumberPerHeight = 10 - testClientNumber = 10 - testWorkerNumber = 10 - testQueryWorkerNumber = 3 -) - -func (i *heightIndex) mustGet(k int32) *multiIndex { - i.Lock() - defer i.Unlock() - return i.index[k] -} - -func TestCorruptedIndex(t *testing.T) { - ack, err := createRandomNodesAndAck() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - resp := ack.SignedResponseHeader() - - // Create index - qi := newQueryIndex() - - if err = qi.addResponse(0, resp); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = qi.addAck(0, ack); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test repeatedly add - if err = qi.addResponse(0, resp); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = qi.addAck(0, ack); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test corrupted index - qi.heightIndex.mustGet(0).respIndex[resp.Hash].response = nil - - if err = qi.addResponse(0, resp); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } - - if err = qi.addAck(0, ack); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } - - qi.heightIndex.mustGet(0).respIndex[resp.Hash] = nil - - if err = qi.addResponse(0, resp); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } - - if err = qi.addAck(0, ack); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestSingleAck(t *testing.T) { - ack, err := createRandomNodesAndAck() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - qi := newQueryIndex() - - if err = qi.addAck(0, ack); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Check not signed ack - qi.heightIndex.mustGet(0).checkBeforeExpire() -} - -func TestEnsureRange(t *testing.T) { - qi := newQueryIndex() - qi.heightIndex.ensureRange(0, 10) - - for i := 0; i < 10; i++ { - if _, ok := qi.heightIndex.get(int32(i)); !ok { - t.Fatalf("Failed to ensure height %d", i) - } - } -} - -func TestCheckAckFromBlock(t *testing.T) { - var height int32 = 10 - qi := newQueryIndex() - qi.advanceBarrier(height) - b1, err := createRandomBlock(genesisHash, false) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if _, err := qi.checkAckFromBlock( - 0, b1.BlockHash(), b1.Queries[0], - ); errors.Cause(err) != ErrQueryExpired { - t.Fatalf("Unexpected error: %v", err) - } - - if isKnown, err := qi.checkAckFromBlock( - height, b1.BlockHash(), b1.Queries[0], - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if isKnown { - t.Fatal("Unexpected result: index should not know this query") - } - - // Create a group of query for test - cli, err := newRandomNode() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - worker1, err := newRandomNode() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - worker2, err := newRandomNode() - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - req, err := createRandomQueryRequest(cli) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - resp1, err := createRandomQueryResponseWithRequest(req, worker1) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - ack1, err := createRandomQueryAckWithResponse(resp1, cli) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - resp2, err := createRandomQueryResponseWithRequest(req, worker2) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - ack2, err := createRandomQueryAckWithResponse(resp2, cli) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test a query signed by another block - if err = qi.addAck(height, ack1); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = qi.addAck(height, ack2); err != ErrMultipleAckOfSeqNo { - t.Fatalf("Unexpected error: %v", err) - } - - b2, err := createRandomBlock(genesisHash, false) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - b1.Queries[0] = &ack1.Hash - b2.Queries[0] = &ack1.Hash - qi.setSignedBlock(height, b1) - - if _, err := qi.checkAckFromBlock( - height, b2.BlockHash(), b2.Queries[0], - ); err != ErrQuerySignedByAnotherBlock { - t.Fatalf("Unexpected error: %v", err) - } - - // Test checking same ack signed by another block - b2.Queries[0] = &ack2.Hash - - if _, err = qi.checkAckFromBlock( - height, b2.BlockHash(), b2.Queries[0], - ); err != ErrQuerySignedByAnotherBlock { - t.Fatalf("Unexpected error: %v", err) - } - - // Revert index state for the first block, and test checking again - qi.heightIndex.mustGet(height).seqIndex[req.GetQueryKey()].firstAck.signedBlock = nil - - if _, err = qi.checkAckFromBlock( - height, b2.BlockHash(), b2.Queries[0], - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Test corrupted index - qi.heightIndex.mustGet(height).seqIndex[req.GetQueryKey()] = nil - - if _, err = qi.checkAckFromBlock( - height, b2.BlockHash(), b2.Queries[0], - ); err != ErrCorruptedIndex { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestGetAck(t *testing.T) { - qi := newQueryIndex() - qh := &hash.Hash{} - - if _, err := qi.getAck(-1, qh); errors.Cause(err) != ErrQueryExpired { - t.Fatalf("Unexpected error: %v", err) - } - - if _, err := qi.getAck(0, qh); errors.Cause(err) != ErrQueryNotCached { - t.Fatalf("Unexpected error: %v", err) - } -} - -func TestQueryIndex(t *testing.T) { - log.SetLevel(log.InfoLevel) - // Initialize clients and workers - clients, err := newRandomNodes(testClientNumber) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - workers, err := newRandomNodes(testWorkerNumber) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - // Initialize index - qi := newQueryIndex() - - // Create some responses and acknowledgements and insert to index - for i := 0; i < testBucketNumber; i++ { - qi.advanceBarrier(int32(i)) - block, err := createRandomBlock(genesisHash, false) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - block.Queries = block.Queries[:0] - - for j := 0; j < testQueryNumberPerHeight; j++ { - cli := clients[rand.Intn(testClientNumber)] - req, err := createRandomQueryRequest(cli) - hasFirstAck := false - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - ackNumber := rand.Intn(testQueryWorkerNumber + 1) - - for k := 0; k < testQueryWorkerNumber; k++ { - worker := workers[(rand.Intn(testWorkerNumber)+k)%testWorkerNumber] - resp, err := createRandomQueryResponseWithRequest(req, worker) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - log.Debugf("i = %d, j = %d, k = %d\n\tseqno = %+v, req = %v, resp = %v", i, j, k, - resp.Request.GetQueryKey(), &req.Hash, &resp.Hash) - - if err = qi.addResponse(int32(i), resp); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if k < ackNumber { - dupAckNumber := 1 + rand.Intn(2) - - for l := 0; l < dupAckNumber; l++ { - ack, err := createRandomQueryAckWithResponse(resp, cli) - - log.Debugf("i = %d, j = %d, k = %d, l = %d\n\tseqno = %+v, "+ - "req = %v, resp = %v, ack = %v", - i, j, k, l, - ack.SignedRequestHeader().GetQueryKey(), - &ack.SignedRequestHeader().Hash, - &ack.SignedResponseHeader().Hash, - &ack.Hash, - ) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - err = qi.addAck(int32(i), ack) - - if !hasFirstAck { - if l == 0 && err != nil || - l > 0 && err != nil && err != ErrMultipleAckOfResponse { - t.Fatalf("Error occurred: %v", err) - } - } else { - if l == 0 && err == nil { - t.Fatalf("Unexpected error: %v", err) - } - } - - if err == nil { - hasFirstAck = true - block.PushAckedQuery(&ack.Hash) - } else { - continue - } - - if rAck, err := qi.getAck(int32(i), &ack.Hash); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !reflect.DeepEqual(ack, rAck) { - t.Fatalf("Unexpected result:\n\torigin = %+v\n\toutput = %+v", - ack, rAck) - } else if !reflect.DeepEqual( - ack.SignedResponseHeader(), rAck.SignedResponseHeader()) { - t.Fatalf("Unexpected result:\n\torigin = %+v\n\toutput = %+v", - ack.SignedResponseHeader(), rAck.SignedResponseHeader()) - } - } - } - } - - qi.setSignedBlock(int32(i), block) - - for j := range block.Queries { - if isKnown, err := qi.checkAckFromBlock( - int32(i), block.BlockHash(), block.Queries[j], - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !isKnown { - t.Logf("Failed to check known ack: %s", block.Queries[j]) - } - } - - qi.resetSignedBlock(int32(i), block) - - for j := range block.Queries { - if isKnown, err := qi.checkAckFromBlock( - int32(i), block.BlockHash(), block.Queries[j], - ); err != nil { - t.Fatalf("Error occurred: %v", err) - } else if !isKnown { - t.Fatal("Unexpected result: block is known") - } - } - } - } -} diff --git a/sqlchain/rpc.go b/sqlchain/rpc.go index 5bb7fcbdf..5ed518267 100644 --- a/sqlchain/rpc.go +++ b/sqlchain/rpc.go @@ -21,8 +21,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" ) // ChainRPCService defines a sql-chain RPC server. @@ -32,7 +31,7 @@ type ChainRPCService struct { // AdviseNewBlockReq defines a request of the AdviseNewBlock RPC method. type AdviseNewBlockReq struct { - Block *ct.Block + Block *types.Block Count int32 } @@ -48,18 +47,9 @@ type AdviseBinLogReq struct { type AdviseBinLogResp struct { } -// AdviseResponsedQueryReq defines a request of the AdviseAckedQuery RPC method. -type AdviseResponsedQueryReq struct { - Query *wt.SignedResponseHeader -} - -// AdviseResponsedQueryResp defines a response of the AdviseAckedQuery RPC method. -type AdviseResponsedQueryResp struct { -} - // AdviseAckedQueryReq defines a request of the AdviseAckedQuery RPC method. type AdviseAckedQueryReq struct { - Query *wt.SignedAckHeader + Query *types.SignedAckHeader } // AdviseAckedQueryResp defines a response of the AdviseAckedQuery RPC method. @@ -74,18 +64,7 @@ type FetchBlockReq struct { // FetchBlockResp defines a response of the FetchBlock RPC method. type FetchBlockResp struct { Height int32 - Block *ct.Block -} - -// FetchAckedQueryReq defines a request of the FetchAckedQuery RPC method. -type FetchAckedQueryReq struct { - Height int32 - SignedAckedHash *hash.Hash -} - -// FetchAckedQueryResp defines a request of the FetchAckedQuery RPC method. -type FetchAckedQueryResp struct { - Ack *wt.SignedAckHeader + Block *types.Block } // SignBillingReq defines a request of the SignBilling RPC method. @@ -140,12 +119,6 @@ func (s *ChainRPCService) AdviseBinLog(req *AdviseBinLogReq, resp *AdviseBinLogR return nil } -// AdviseResponsedQuery is the RPC method to advise a new responsed query to the target server. -func (s *ChainRPCService) AdviseResponsedQuery( - req *AdviseResponsedQueryReq, resp *AdviseResponsedQueryResp) error { - return s.chain.VerifyAndPushResponsedQuery(req.Query) -} - // AdviseAckedQuery is the RPC method to advise a new acknowledged query to the target server. func (s *ChainRPCService) AdviseAckedQuery( req *AdviseAckedQueryReq, resp *AdviseAckedQueryResp) error { @@ -159,13 +132,6 @@ func (s *ChainRPCService) FetchBlock(req *FetchBlockReq, resp *FetchBlockResp) ( return } -// FetchAckedQuery is the RPC method to fetch a known block from the target server. -func (s *ChainRPCService) FetchAckedQuery(req *FetchAckedQueryReq, resp *FetchAckedQueryResp, -) (err error) { - resp.Ack, err = s.chain.FetchAckedQuery(req.Height, req.SignedAckedHash) - return -} - // SignBilling is the RPC method to get signature for a billing request from the target server. func (s *ChainRPCService) SignBilling(req *SignBillingReq, resp *SignBillingResp) (err error) { resp.HeaderHash = req.BillingRequest.RequestHash diff --git a/sqlchain/runtime.go b/sqlchain/runtime.go index e3754024d..b1935558d 100644 --- a/sqlchain/runtime.go +++ b/sqlchain/runtime.go @@ -23,9 +23,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) // runtime represents a chain runtime state. @@ -48,10 +47,12 @@ type runtime struct { tick time.Duration // queryTTL sets the unacknowledged query TTL in block periods. queryTTL int32 + // blockCacheTTL sets the cached block numbers. + blockCacheTTL int32 // muxServer is the multiplexing service of sql-chain PRC. muxService *MuxService // price sets query price in gases. - price map[wt.QueryType]uint64 + price map[types.QueryType]uint64 producingReward uint64 billingPeriods int32 @@ -86,11 +87,17 @@ type runtime struct { // newRunTime returns a new sql-chain runtime instance with the specified config. func newRunTime(c *Config) (r *runtime) { r = &runtime{ - stopCh: make(chan struct{}), - databaseID: c.DatabaseID, - period: c.Period, - tick: c.Tick, - queryTTL: c.QueryTTL, + stopCh: make(chan struct{}), + databaseID: c.DatabaseID, + period: c.Period, + tick: c.Tick, + queryTTL: c.QueryTTL, + blockCacheTTL: func() int32 { + if c.BlockCacheTTL < minBlockCacheTTL { + return minBlockCacheTTL + } + return c.BlockCacheTTL + }(), muxService: c.MuxService, price: c.Price, producingReward: c.ProducingReward, @@ -122,7 +129,7 @@ func newRunTime(c *Config) (r *runtime) { return } -func (r *runtime) setGenesis(b *ct.Block) { +func (r *runtime) setGenesis(b *types.Block) { r.chainInitTime = b.Timestamp() r.genesisHash = *b.BlockHash() r.head = &state{ @@ -190,7 +197,7 @@ func (r *runtime) setNextTurn() { } // getQueryGas gets the consumption of gas for a specified query type. -func (r *runtime) getQueryGas(t wt.QueryType) uint64 { +func (r *runtime) getQueryGas(t types.QueryType) uint64 { return r.price[t] } diff --git a/sqlchain/storage/storage.go b/sqlchain/storage/storage.go deleted file mode 100644 index 7e36bad0e..000000000 --- a/sqlchain/storage/storage.go +++ /dev/null @@ -1,438 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package storage - -import ( - "context" - "database/sql" - "errors" - "fmt" - "io" - "sync" - - "github.com/CovenantSQL/CovenantSQL/twopc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - // Register CovenantSQL/go-sqlite3-encrypt engine. - _ "github.com/CovenantSQL/go-sqlite3-encrypt" -) - -var ( - index = struct { - sync.Mutex - db map[string]*sql.DB - }{ - db: make(map[string]*sql.DB), - } -) - -// Query represents the single query of sqlite. -type Query struct { - Pattern string - Args []sql.NamedArg -} - -// ExecLog represents the execution log of sqlite. -type ExecLog struct { - ConnectionID uint64 - SeqNo uint64 - Timestamp int64 - Queries []Query -} - -// ExecResult represents the execution result of sqlite. -type ExecResult struct { - LastInsertID int64 - RowsAffected int64 -} - -func openDB(dsn string) (db *sql.DB, err error) { - // Rebuild DSN. - d, err := NewDSN(dsn) - - if err != nil { - return nil, err - } - - d.AddParam("_journal_mode", "WAL") - d.AddParam("_synchronous", "NORMAL") - fdsn := d.Format() - - fn := d.GetFileName() - mode, _ := d.GetParam("mode") - cache, _ := d.GetParam("cache") - - if (fn == ":memory:" || mode == "memory") && cache != "shared" { - // Return a new DB instance if it's in memory and private. - db, err = sql.Open("sqlite3", fdsn) - return - } - - index.Lock() - db, ok := index.db[d.filename] - index.Unlock() - - if !ok { - db, err = sql.Open("sqlite3", fdsn) - - if err != nil { - return nil, err - } - - index.Lock() - index.db[d.filename] = db - index.Unlock() - } - - return -} - -// TxID represents a transaction ID. -type TxID struct { - ConnectionID uint64 - SeqNo uint64 - Timestamp int64 -} - -func equalTxID(x, y *TxID) bool { - return x.ConnectionID == y.ConnectionID && x.SeqNo == y.SeqNo && x.Timestamp == y.Timestamp -} - -// Storage represents a underlying storage implementation based on sqlite3. -type Storage struct { - sync.Mutex - dsn string - db *sql.DB - tx *sql.Tx // Current tx - id TxID - queries []Query -} - -// New returns a new storage connected by dsn. -func New(dsn string) (st *Storage, err error) { - db, err := openDB(dsn) - - if err != nil { - return - } - - return &Storage{ - dsn: dsn, - db: db, - }, nil -} - -// Prepare implements prepare method of two-phase commit worker. -func (s *Storage) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { - el, ok := wb.(*ExecLog) - - if !ok { - return errors.New("unexpected WriteBatch type") - } - - s.Lock() - defer s.Unlock() - - if s.tx != nil { - if equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { - s.queries = el.Queries - return nil - } - - return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ - "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) - } - - s.tx, err = s.db.BeginTx(ctx, nil) - - if err != nil { - return - } - - s.id = TxID{el.ConnectionID, el.SeqNo, el.Timestamp} - s.queries = el.Queries - - return nil -} - -// Commit implements commit method of two-phase commit worker. -func (s *Storage) Commit(ctx context.Context, wb twopc.WriteBatch) (result interface{}, err error) { - el, ok := wb.(*ExecLog) - - if !ok { - err = errors.New("unexpected WriteBatch type") - return - } - - s.Lock() - defer s.Unlock() - - if s.tx != nil { - if equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { - // get last insert id and affected rows result - execResult := ExecResult{} - - for _, q := range s.queries { - // convert arguments types - args := make([]interface{}, len(q.Args)) - - for i, v := range q.Args { - args[i] = v - } - - var res sql.Result - res, err = s.tx.ExecContext(ctx, q.Pattern, args...) - - if err != nil { - log.WithError(err).Debug("commit query failed") - s.tx.Rollback() - s.tx = nil - s.queries = nil - return - } - - lastInsertID, _ := res.LastInsertId() - rowsAffected, _ := res.RowsAffected() - - execResult.LastInsertID = lastInsertID - execResult.RowsAffected += rowsAffected - } - - s.tx.Commit() - s.tx = nil - s.queries = nil - result = execResult - - return - } - - err = fmt.Errorf("twopc: inconsistent state, currently in tx: "+ - "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) - return - } - - err = errors.New("twopc: tx not prepared") - return -} - -// Rollback implements rollback method of two-phase commit worker. -func (s *Storage) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { - el, ok := wb.(*ExecLog) - - if !ok { - return errors.New("unexpected WriteBatch type") - } - - s.Lock() - defer s.Unlock() - - if !equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { - return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ - "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) - } - - if s.tx != nil { - s.tx.Rollback() - s.tx = nil - s.queries = nil - } - - return nil -} - -// Query implements read-only query feature. -func (s *Storage) Query(ctx context.Context, queries []Query) (columns []string, types []string, - data [][]interface{}, err error) { - data = make([][]interface{}, 0) - - if len(queries) == 0 { - return - } - - var tx *sql.Tx - var txOptions = &sql.TxOptions{ - ReadOnly: true, - } - - if tx, err = s.db.BeginTx(ctx, txOptions); err != nil { - return - } - - // always rollback on complete - defer tx.Rollback() - - q := queries[len(queries)-1] - - // convert arguments types - args := make([]interface{}, len(q.Args)) - - for i, v := range q.Args { - args[i] = v - } - - var rows *sql.Rows - if rows, err = tx.Query(q.Pattern, args...); err != nil { - return - } - - // free result set - defer rows.Close() - - // get rows meta - if columns, err = rows.Columns(); err != nil { - return - } - - // if there is empty columns, treat result as empty - if len(columns) == 0 { - return - } - - // get types meta - if types, err = s.transformColumnTypes(rows.ColumnTypes()); err != nil { - return - } - - rs := newRowScanner(len(columns)) - - for rows.Next() { - err = rows.Scan(rs.ScanArgs()...) - if err != nil { - return - } - - data = append(data, rs.GetRow()) - } - - err = rows.Err() - return -} - -// Exec implements write query feature. -func (s *Storage) Exec(ctx context.Context, queries []Query) (result ExecResult, err error) { - if len(queries) == 0 { - return - } - - var tx *sql.Tx - var txOptions = &sql.TxOptions{ - ReadOnly: false, - } - - if tx, err = s.db.BeginTx(ctx, txOptions); err != nil { - return - } - - defer tx.Rollback() - - for _, q := range queries { - // convert arguments types - args := make([]interface{}, len(q.Args)) - - for i, v := range q.Args { - args[i] = v - } - - var r sql.Result - if r, err = tx.Exec(q.Pattern, args...); err != nil { - log.WithError(err).Debug("execute query failed") - return - } - - var affected int64 - affected, _ = r.RowsAffected() - result.RowsAffected += affected - result.LastInsertID, _ = r.LastInsertId() - } - - tx.Commit() - - return -} - -// Close implements database safe close feature. -func (s *Storage) Close() (err error) { - d, err := NewDSN(s.dsn) - if err != nil { - return - } - - index.Lock() - defer index.Unlock() - delete(index.db, d.filename) - return s.db.Close() -} - -func (s *Storage) transformColumnTypes(columnTypes []*sql.ColumnType, e error) (types []string, err error) { - if e != nil { - err = e - return - } - - types = make([]string, len(columnTypes)) - - for i, c := range columnTypes { - types[i] = c.DatabaseTypeName() - } - - return -} - -// golang does trick convert, use rowScanner to return the original result type in sqlite3 driver -type rowScanner struct { - fieldCnt int - column int // current column - fields []interface{} // temp fields - scanArgs []interface{} -} - -func newRowScanner(fieldCnt int) (s *rowScanner) { - s = &rowScanner{ - fieldCnt: fieldCnt, - column: 0, - fields: make([]interface{}, fieldCnt), - scanArgs: make([]interface{}, fieldCnt), - } - - for i := 0; i != fieldCnt; i++ { - s.scanArgs[i] = s - } - - return -} - -func (s *rowScanner) Scan(src interface{}) error { - if s.fieldCnt <= s.column { - // read complete - return io.EOF - } - - s.fields[s.column] = src - s.column++ - - return nil -} - -func (s *rowScanner) GetRow() []interface{} { - return s.fields -} - -func (s *rowScanner) ScanArgs() []interface{} { - // reset - s.column = 0 - s.fields = make([]interface{}, s.fieldCnt) - return s.scanArgs -} diff --git a/sqlchain/storage/storage_test.go b/sqlchain/storage/storage_test.go deleted file mode 100644 index 0cda67128..000000000 --- a/sqlchain/storage/storage_test.go +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package storage - -import ( - "context" - "database/sql" - "fmt" - "io/ioutil" - "reflect" - "testing" - "time" -) - -func newQuery(query string, args ...interface{}) (q Query) { - q.Pattern = query - - // convert args - q.Args = make([]sql.NamedArg, len(args)) - for i, v := range args { - q.Args[i] = sql.Named("", v) - } - - return -} - -func newNamedQuery(query string, args map[string]interface{}) (q Query) { - q.Pattern = query - q.Args = make([]sql.NamedArg, len(args)) - i := 0 - - // convert args - for n, v := range args { - q.Args[i] = sql.Named(n, v) - i++ - } - - return -} - -func TestBadType(t *testing.T) { - fl, err := ioutil.TempFile("", "sqlite3-") - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - st, err := New(fmt.Sprintf("file:%s", fl.Name())) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = st.Prepare(context.Background(), struct{}{}); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if _, err = st.Commit(context.Background(), struct{}{}); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if err = st.Rollback(context.Background(), struct{}{}); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } -} - -func TestStorage(t *testing.T) { - fl, err := ioutil.TempFile("", "sqlite3-") - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - st, err := New(fmt.Sprintf("file:%s", fl.Name())) - - if err != nil { - t.Fatalf("Error occurred: %v", err) - } - - el1 := &ExecLog{ - ConnectionID: 1, - SeqNo: 1, - Timestamp: time.Now().UnixNano(), - Queries: []Query{ - newQuery("CREATE TABLE IF NOT EXISTS `kv` (`key` TEXT PRIMARY KEY, `value` BLOB)"), - newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k0', NULL)"), - newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k1', 'v1')"), - newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k2', 'v2')"), - newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k3', 'v3')"), - newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k3', 'v3-2')"), - newQuery("DELETE FROM `kv` WHERE `key`='k2'"), - }, - } - - el2 := &ExecLog{ - ConnectionID: 1, - SeqNo: 2, - Timestamp: time.Now().UnixNano(), - Queries: []Query{ - newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k1', 'v1-2')"), - }, - } - - if err = st.Prepare(context.Background(), el1); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = st.Prepare(context.Background(), el1); err != nil { - t.Fatalf("Error occurred: %v", err) - } - - if err = st.Prepare(context.Background(), el2); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if _, err = st.Commit(context.Background(), el2); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - if err = st.Rollback(context.Background(), el2); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %v", err) - } - - var res interface{} - if res, err = st.Commit(context.Background(), el1); err != nil { - t.Fatalf("Error occurred: %v", err) - } else { - result := res.(ExecResult) - t.Logf("Result: %v", result) - } - - // test query - columns, types, data, err := st.Query(context.Background(), - []Query{newQuery("SELECT * FROM `kv` ORDER BY `key` ASC")}) - - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } - if !reflect.DeepEqual(columns, []string{"key", "value"}) { - t.Fatalf("Error column result: %v", columns) - } - if !reflect.DeepEqual(types, []string{"TEXT", "BLOB"}) { - t.Fatalf("Error types result: %v", types) - } - if len(data) != 3 { - t.Fatalf("Error result count: %v, should be 3", len(data)) - } else { - // compare rows - should1 := []interface{}{[]byte("k0"), nil} - should2 := []interface{}{[]byte("k1"), []byte("v1")} - should3 := []interface{}{[]byte("k3"), []byte("v3-2")} - t.Logf("Rows: %v", data) - if !reflect.DeepEqual(data[0], should1) { - t.Fatalf("Error result row: %v, should: %v", data[0], should1) - } - if !reflect.DeepEqual(data[1], should2) { - t.Fatalf("Error result row: %v, should: %v", data[1], should2) - } - if !reflect.DeepEqual(data[2], should3) { - t.Fatalf("Error result row: %v, should: %v", data[2], should2) - } - } - - // test query with projection - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("SELECT `key` FROM `kv` ORDER BY `key` ASC")}) - - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } - if !reflect.DeepEqual(columns, []string{"key"}) { - t.Fatalf("Error column result: %v", columns) - } - if !reflect.DeepEqual(types, []string{"TEXT"}) { - t.Fatalf("Error types result: %v", types) - } - if len(data) != 3 { - t.Fatalf("Error result count: %v, should be 3", len(data)) - } else { - // compare rows - should1 := []interface{}{[]byte("k0")} - should2 := []interface{}{[]byte("k1")} - should3 := []interface{}{[]byte("k3")} - t.Logf("Rows: %v", data) - if !reflect.DeepEqual(data[0], should1) { - t.Fatalf("Error result row: %v, should: %v", data[0], should1) - } - if !reflect.DeepEqual(data[1], should2) { - t.Fatalf("Error result row: %v, should: %v", data[1], should2) - } - if !reflect.DeepEqual(data[2], should3) { - t.Fatalf("Error result row: %v, should: %v", data[2], should2) - } - } - - // test query with condition - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("SELECT `key` FROM `kv` WHERE `value` IS NOT NULL ORDER BY `key` ASC")}) - - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } - if !reflect.DeepEqual(columns, []string{"key"}) { - t.Fatalf("Error column result: %v", columns) - } - if !reflect.DeepEqual(types, []string{"TEXT"}) { - t.Fatalf("Error types result: %v", types) - } - if len(data) != 2 { - t.Fatalf("Error result count: %v, should be 3", len(data)) - } else { - // compare rows - should1 := []interface{}{[]byte("k1")} - should2 := []interface{}{[]byte("k3")} - t.Logf("Rows: %v", data) - if !reflect.DeepEqual(data[0], should1) { - t.Fatalf("Error result row: %v, should: %v", data[0], should1) - } - if !reflect.DeepEqual(data[1], should2) { - t.Fatalf("Error result row: %v, should: %v", data[1], should2) - } - } - - // test failed query - columns, types, data, err = st.Query(context.Background(), []Query{newQuery("SQL???? WHAT!!!!")}) - - if err == nil { - t.Fatal("Query should failed") - } else { - t.Logf("Query failed as expected with: %v", err.Error()) - } - - // test non-read query - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("DELETE FROM `kv` WHERE `value` IS NULL")}) - - execResult, err := st.Exec(context.Background(), - []Query{newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k4', 'v4')")}) - if err != nil || execResult.RowsAffected != 1 { - t.Fatalf("Exec INSERT failed: %v", err) - } - // test with arguments - execResult, err = st.Exec(context.Background(), []Query{newQuery("DELETE FROM `kv` WHERE `key`='k4'")}) - if err != nil || execResult.RowsAffected != 1 { - t.Fatalf("Exec DELETE failed: %v", err) - } - execResult, err = st.Exec(context.Background(), - []Query{newQuery("DELETE FROM `kv` WHERE `key`=?", "not_exist")}) - if err != nil || execResult.RowsAffected != 0 { - t.Fatalf("Exec DELETE failed: %v", err) - } - - // test again - columns, types, data, err = st.Query(context.Background(), []Query{newQuery("SELECT `key` FROM `kv`")}) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else if len(data) != 3 { - t.Fatalf("Last write query should not take any effect, row count: %v", len(data)) - } else { - t.Logf("Rows: %v", data) - } - - // test with select - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("SELECT `key` FROM `kv` WHERE `key` IN (?)", "k1")}) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else if len(data) != 1 { - t.Fatalf("Should only have one record, but actually %v", len(data)) - } else { - t.Logf("Rows: %v", data) - } - - // test with select with named arguments - columns, types, data, err = st.Query(context.Background(), - []Query{newNamedQuery("SELECT `key` FROM `kv` WHERE `key` IN (:test2, :test1)", map[string]interface{}{ - "test1": "k1", - "test2": "k3", - })}) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else if len(data) != 2 { - t.Fatalf("Should only have two records, but actually %v", len(data)) - } else { - t.Logf("Rows: %v", data) - } - - // test with function - columns, types, data, err = st.Query(context.Background(), - []Query{newQuery("SELECT COUNT(1) AS `c` FROM `kv`")}) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else { - if len(columns) != 1 { - t.Fatalf("Query result should contain only one column, now %v", len(columns)) - } else if columns[0] != "c" { - t.Fatalf("Query result column name is not defined alias, but :%v", columns[0]) - } - if len(types) != 1 { - t.Fatalf("Query result should contain only one column, now %v", len(types)) - } else { - t.Logf("Query result type is: %v", types[0]) - } - if len(data) != 1 || len(data[0]) != 1 { - t.Fatalf("Query result should contain only one row and one column, now %v", data) - } else if !reflect.DeepEqual(data[0][0], int64(3)) { - t.Fatalf("Query result should be table row count 3, but: %v", data[0]) - } - } - - // test with timestamp fields - _, err = st.Exec(context.Background(), []Query{ - newQuery("CREATE TABLE `tm` (tm TIMESTAMP)"), - newQuery("INSERT INTO `tm` VALUES(DATE('NOW'))"), - }) - if err != nil { - t.Fatalf("Query failed: %v", err.Error()) - } else { - // query for values - _, _, data, err = st.Query(context.Background(), []Query{newQuery("SELECT `tm` FROM `tm`")}) - if len(data) != 1 || len(data[0]) != 1 { - t.Fatalf("Query result should contain only one row and one column, now %v", data) - } else if !reflect.TypeOf(data[0][0]).AssignableTo(reflect.TypeOf(time.Time{})) { - t.Fatalf("Query result should be time.Time type, but: %v", reflect.TypeOf(data[0][0]).String()) - } - } -} diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go index 15c814c9b..3e45a73b3 100644 --- a/sqlchain/xxx_test.go +++ b/sqlchain/xxx_test.go @@ -28,11 +28,11 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) var ( @@ -105,8 +105,8 @@ func createRandomStrings(offset, length, soffset, slength int) (s []string) { return } -func createRandomStorageQueries(offset, length, soffset, slength int) (qs []wt.Query) { - qs = make([]wt.Query, rand.Intn(length)+offset) +func createRandomStorageQueries(offset, length, soffset, slength int) (qs []types.Query) { + qs = make([]types.Query, rand.Intn(length)+offset) for i := range qs { createRandomString(soffset, slength, &qs[i].Pattern) @@ -119,11 +119,11 @@ func createRandomTimeAfter(now time.Time, maxDelayMillisecond int) time.Time { return now.Add(time.Duration(rand.Intn(maxDelayMillisecond)+1) * time.Millisecond) } -func createRandomQueryRequest(cli *nodeProfile) (r *wt.SignedRequestHeader, err error) { - req := &wt.Request{ - Header: wt.SignedRequestHeader{ - RequestHeader: wt.RequestHeader{ - QueryType: wt.QueryType(rand.Intn(2)), +func createRandomQueryRequest(cli *nodeProfile) (r *types.SignedRequestHeader, err error) { + req := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.QueryType(rand.Intn(2)), NodeID: cli.NodeID, ConnectionID: uint64(rand.Int63()), SeqNo: uint64(rand.Int63()), @@ -131,7 +131,7 @@ func createRandomQueryRequest(cli *nodeProfile) (r *wt.SignedRequestHeader, err // BatchCount and QueriesHash will be set by req.Sign() }, }, - Payload: wt.RequestPayload{ + Payload: types.RequestPayload{ Queries: createRandomStorageQueries(10, 10, 10, 10), }, } @@ -147,7 +147,7 @@ func createRandomQueryRequest(cli *nodeProfile) (r *wt.SignedRequestHeader, err } func createRandomQueryResponse(cli, worker *nodeProfile) ( - r *wt.SignedResponseHeader, err error, + r *types.SignedResponseHeader, err error, ) { req, err := createRandomQueryRequest(cli) @@ -155,18 +155,18 @@ func createRandomQueryResponse(cli, worker *nodeProfile) ( return } - resp := &wt.Response{ - Header: wt.SignedResponseHeader{ - ResponseHeader: wt.ResponseHeader{ + resp := &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ Request: *req, NodeID: worker.NodeID, Timestamp: createRandomTimeAfter(req.Timestamp, 100), }, }, - Payload: wt.ResponsePayload{ + Payload: types.ResponsePayload{ Columns: createRandomStrings(10, 10, 10, 10), DeclTypes: createRandomStrings(10, 10, 10, 10), - Rows: make([]wt.ResponseRow, rand.Intn(10)+10), + Rows: make([]types.ResponseRow, rand.Intn(10)+10), }, } @@ -186,21 +186,21 @@ func createRandomQueryResponse(cli, worker *nodeProfile) ( return } -func createRandomQueryResponseWithRequest(req *wt.SignedRequestHeader, worker *nodeProfile) ( - r *wt.SignedResponseHeader, err error, +func createRandomQueryResponseWithRequest(req *types.SignedRequestHeader, worker *nodeProfile) ( + r *types.SignedResponseHeader, err error, ) { - resp := &wt.Response{ - Header: wt.SignedResponseHeader{ - ResponseHeader: wt.ResponseHeader{ + resp := &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ Request: *req, NodeID: worker.NodeID, Timestamp: createRandomTimeAfter(req.Timestamp, 100), }, }, - Payload: wt.ResponsePayload{ + Payload: types.ResponsePayload{ Columns: createRandomStrings(10, 10, 10, 10), DeclTypes: createRandomStrings(10, 10, 10, 10), - Rows: make([]wt.ResponseRow, rand.Intn(10)+10), + Rows: make([]types.ResponseRow, rand.Intn(10)+10), }, } @@ -220,12 +220,12 @@ func createRandomQueryResponseWithRequest(req *wt.SignedRequestHeader, worker *n return } -func createRandomQueryAckWithResponse(resp *wt.SignedResponseHeader, cli *nodeProfile) ( - r *wt.SignedAckHeader, err error, +func createRandomQueryAckWithResponse(resp *types.SignedResponseHeader, cli *nodeProfile) ( + r *types.SignedAckHeader, err error, ) { - ack := &wt.Ack{ - Header: wt.SignedAckHeader{ - AckHeader: wt.AckHeader{ + ack := &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ Response: *resp, NodeID: cli.NodeID, Timestamp: createRandomTimeAfter(resp.Timestamp, 100), @@ -241,16 +241,16 @@ func createRandomQueryAckWithResponse(resp *wt.SignedResponseHeader, cli *nodePr return } -func createRandomQueryAck(cli, worker *nodeProfile) (r *wt.SignedAckHeader, err error) { +func createRandomQueryAck(cli, worker *nodeProfile) (r *types.SignedAckHeader, err error) { resp, err := createRandomQueryResponse(cli, worker) if err != nil { return } - ack := &wt.Ack{ - Header: wt.SignedAckHeader{ - AckHeader: wt.AckHeader{ + ack := &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ Response: *resp, NodeID: cli.NodeID, Timestamp: createRandomTimeAfter(resp.Timestamp, 100), @@ -266,7 +266,7 @@ func createRandomQueryAck(cli, worker *nodeProfile) (r *wt.SignedAckHeader, err return } -func createRandomNodesAndAck() (r *wt.SignedAckHeader, err error) { +func createRandomNodesAndAck() (r *types.SignedAckHeader, err error) { cli, err := newRandomNode() if err != nil { @@ -320,7 +320,7 @@ func registerNodesWithPublicKey(pub *asymmetric.PublicKey, diff int, num int) ( return } -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -331,9 +331,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: genesisHash, @@ -346,7 +346,13 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error for i, n := 0, rand.Intn(10)+10; i < n; i++ { h := &hash.Hash{} rand.Read(h[:]) - b.PushAckedQuery(h) + b.Acks = []*types.SignedAckHeader{ + { + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: *h, + }, + }, + } } if isGenesis { @@ -358,7 +364,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error return } - b.Queries = nil b.SignedHeader.GenesisHash = hash.Hash{} b.SignedHeader.Header.Producer = proto.NodeID(nis[0].Hash.String()) } @@ -367,9 +372,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error return } -func createRandomQueries(x int) (acks []*wt.SignedAckHeader, err error) { +func createRandomQueries(x int) (acks []*types.SignedAckHeader, err error) { n := rand.Intn(x) - acks = make([]*wt.SignedAckHeader, n) + acks = make([]*types.SignedAckHeader, n) for i := range acks { if acks[i], err = createRandomNodesAndAck(); err != nil { @@ -380,8 +385,8 @@ func createRandomQueries(x int) (acks []*wt.SignedAckHeader, err error) { return } -func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*wt.SignedAckHeader) ( - b *ct.Block, err error, +func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*types.SignedAckHeader) ( + b *types.Block, err error, ) { // Generate key pair priv, _, err := asymmetric.GenSecp256k1KeyPair() @@ -393,9 +398,9 @@ func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*wt.SignedAc h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: genesis, @@ -405,10 +410,6 @@ func createRandomBlockWithQueries(genesis, parent hash.Hash, acks []*wt.SignedAc }, } - for _, ack := range acks { - b.PushAckedQuery(&ack.Hash) - } - err = b.PackAndSignBlock(priv) return } diff --git a/sqlchain/storage/doc.go b/storage/doc.go similarity index 100% rename from sqlchain/storage/doc.go rename to storage/doc.go diff --git a/sqlchain/storage/dsn.go b/storage/dsn.go similarity index 86% rename from sqlchain/storage/dsn.go rename to storage/dsn.go index 08177de94..e74663823 100644 --- a/sqlchain/storage/dsn.go +++ b/storage/dsn.go @@ -82,7 +82,11 @@ func (dsn *DSN) AddParam(key, value string) { dsn.params = make(map[string]string) } - dsn.params[key] = value + if value == "" { + delete(dsn.params, key) + } else { + dsn.params[key] = value + } } // GetParam gets the value. @@ -90,3 +94,16 @@ func (dsn *DSN) GetParam(key string) (value string, ok bool) { value, ok = dsn.params[key] return } + +// Clone returns a copy of current dsn. +func (dsn *DSN) Clone() (copy *DSN) { + copy = &DSN{} + copy.filename = dsn.filename + copy.params = make(map[string]string, len(dsn.params)) + + for k, v := range dsn.params { + copy.params[k] = v + } + + return +} diff --git a/sqlchain/storage/dsn_test.go b/storage/dsn_test.go similarity index 100% rename from sqlchain/storage/dsn_test.go rename to storage/dsn_test.go diff --git a/storage/storage.go b/storage/storage.go index 55141a8d9..7e36bad0e 100644 --- a/storage/storage.go +++ b/storage/storage.go @@ -14,371 +14,425 @@ * limitations under the License. */ -// Package storage implements simple key-value storage interfaces based on sqlite3. -// -// Although a sql.DB should be safe for concurrent use according to -// https://golang.org/pkg/database/sql/#OpenDB, the go-sqlite3 implementation only guarantees -// the safety of concurrent readers. See https://github.com/mattn/go-sqlite3/issues/148 for details. -// -// As a result, here are some suggestions: -// -// 1. Perform as many concurrent GetValue(s) operations as you like; -// 2. Use only one goroutine to perform SetValue(s)/DelValue(s) operations; -// 3. Or implement a simple busy waiting yourself on a go-sqlite3.ErrLocked error if you must use -// concurrent writers. package storage import ( + "context" "database/sql" + "errors" "fmt" + "io" "sync" + + "github.com/CovenantSQL/CovenantSQL/twopc" + "github.com/CovenantSQL/CovenantSQL/utils/log" // Register CovenantSQL/go-sqlite3-encrypt engine. _ "github.com/CovenantSQL/go-sqlite3-encrypt" ) var ( index = struct { - mu *sync.Mutex + sync.Mutex db map[string]*sql.DB }{ - &sync.Mutex{}, - make(map[string]*sql.DB), + db: make(map[string]*sql.DB), } ) -func openDB(dsn string) (db *sql.DB, err error) { - index.mu.Lock() - defer index.mu.Unlock() - - db = index.db[dsn] - if db == nil { - db, err = sql.Open("sqlite3", dsn) - if err != nil { - return nil, err - } - - index.db[dsn] = db - } - - return db, err +// Query represents the single query of sqlite. +type Query struct { + Pattern string + Args []sql.NamedArg } -// Storage represents a key-value storage. -type Storage struct { - dsn string - table string - db *sql.DB +// ExecLog represents the execution log of sqlite. +type ExecLog struct { + ConnectionID uint64 + SeqNo uint64 + Timestamp int64 + Queries []Query } -// KV represents a key-value pair. -type KV struct { - Key string - Value []byte +// ExecResult represents the execution result of sqlite. +type ExecResult struct { + LastInsertID int64 + RowsAffected int64 } -// OpenStorage opens a database using the specified DSN and ensures that the specified table exists. -func OpenStorage(dsn string, table string) (st *Storage, err error) { - // Open database - var db *sql.DB - db, err = openDB(dsn) +func openDB(dsn string) (db *sql.DB, err error) { + // Rebuild DSN. + d, err := NewDSN(dsn) if err != nil { - return st, err + return nil, err } - // Ensure table - stmt := fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%s` (`key` TEXT PRIMARY KEY, `value` BLOB)", - table) + d.AddParam("_journal_mode", "WAL") + d.AddParam("_synchronous", "NORMAL") + fdsn := d.Format() + + fn := d.GetFileName() + mode, _ := d.GetParam("mode") + cache, _ := d.GetParam("cache") - if _, err = db.Exec(stmt); err != nil { - return st, err + if (fn == ":memory:" || mode == "memory") && cache != "shared" { + // Return a new DB instance if it's in memory and private. + db, err = sql.Open("sqlite3", fdsn) + return } - st = &Storage{dsn, table, db} - return st, err -} + index.Lock() + db, ok := index.db[d.filename] + index.Unlock() -// SetValue sets or replace the value to key. -func (s *Storage) SetValue(key string, value []byte) (err error) { - stmt := fmt.Sprintf("INSERT OR REPLACE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - _, err = s.db.Exec(stmt, key, value) + if !ok { + db, err = sql.Open("sqlite3", fdsn) - return err -} + if err != nil { + return nil, err + } + + index.Lock() + index.db[d.filename] = db + index.Unlock() + } -// SetValueIfNotExist sets the value to key if it doesn't exist. -func (s *Storage) SetValueIfNotExist(key string, value []byte) (err error) { - stmt := fmt.Sprintf("INSERT OR IGNORE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - _, err = s.db.Exec(stmt, key, value) + return +} - return err +// TxID represents a transaction ID. +type TxID struct { + ConnectionID uint64 + SeqNo uint64 + Timestamp int64 } -// DelValue deletes the value of key. -func (s *Storage) DelValue(key string) (err error) { - stmt := fmt.Sprintf("DELETE FROM `%s` WHERE `key` = ?", s.table) - _, err = s.db.Exec(stmt, key) +func equalTxID(x, y *TxID) bool { + return x.ConnectionID == y.ConnectionID && x.SeqNo == y.SeqNo && x.Timestamp == y.Timestamp +} - return err +// Storage represents a underlying storage implementation based on sqlite3. +type Storage struct { + sync.Mutex + dsn string + db *sql.DB + tx *sql.Tx // Current tx + id TxID + queries []Query } -// GetValue fetches the value of key. -func (s *Storage) GetValue(key string) (value []byte, err error) { - stmt := fmt.Sprintf("SELECT `value` FROM `%s` WHERE `key` = ?", s.table) +// New returns a new storage connected by dsn. +func New(dsn string) (st *Storage, err error) { + db, err := openDB(dsn) - if err = s.db.QueryRow(stmt, key).Scan(&value); err == sql.ErrNoRows { - err = nil + if err != nil { + return } - return value, err + return &Storage{ + dsn: dsn, + db: db, + }, nil } -// SetValues sets or replaces the key-value pairs in kvs. -// -// Note that this is not a transaction. We use a prepared statement to send these queries. Each -// call may fail while part of the queries succeed. -func (s *Storage) SetValues(kvs []KV) (err error) { - stmt := fmt.Sprintf("INSERT OR REPLACE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - pStmt, err := s.db.Prepare(stmt) +// Prepare implements prepare method of two-phase commit worker. +func (s *Storage) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { + el, ok := wb.(*ExecLog) - if err != nil { - return err + if !ok { + return errors.New("unexpected WriteBatch type") } - defer pStmt.Close() + s.Lock() + defer s.Unlock() - for _, row := range kvs { - if _, err = pStmt.Exec(row.Key, row.Value); err != nil { - return err + if s.tx != nil { + if equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { + s.queries = el.Queries + return nil } + + return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ + "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) } + s.tx, err = s.db.BeginTx(ctx, nil) + + if err != nil { + return + } + + s.id = TxID{el.ConnectionID, el.SeqNo, el.Timestamp} + s.queries = el.Queries + return nil } -// SetValuesIfNotExist sets the key-value pairs in kvs if the key doesn't exist. -// -// Note that this is not a transaction. We use a prepared statement to send these queries. Each -// call may fail while part of the queries succeed. -func (s *Storage) SetValuesIfNotExist(kvs []KV) (err error) { - stmt := fmt.Sprintf("INSERT OR IGNORE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - pStmt, err := s.db.Prepare(stmt) +// Commit implements commit method of two-phase commit worker. +func (s *Storage) Commit(ctx context.Context, wb twopc.WriteBatch) (result interface{}, err error) { + el, ok := wb.(*ExecLog) - if err != nil { - return err + if !ok { + err = errors.New("unexpected WriteBatch type") + return } - defer pStmt.Close() + s.Lock() + defer s.Unlock() + + if s.tx != nil { + if equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { + // get last insert id and affected rows result + execResult := ExecResult{} + + for _, q := range s.queries { + // convert arguments types + args := make([]interface{}, len(q.Args)) + + for i, v := range q.Args { + args[i] = v + } + + var res sql.Result + res, err = s.tx.ExecContext(ctx, q.Pattern, args...) + + if err != nil { + log.WithError(err).Debug("commit query failed") + s.tx.Rollback() + s.tx = nil + s.queries = nil + return + } + + lastInsertID, _ := res.LastInsertId() + rowsAffected, _ := res.RowsAffected() - for _, row := range kvs { - if _, err = pStmt.Exec(row.Key, row.Value); err != nil { - return err + execResult.LastInsertID = lastInsertID + execResult.RowsAffected += rowsAffected + } + + s.tx.Commit() + s.tx = nil + s.queries = nil + result = execResult + + return } + + err = fmt.Errorf("twopc: inconsistent state, currently in tx: "+ + "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) + return } - return nil + err = errors.New("twopc: tx not prepared") + return } -// DelValues deletes the values of the keys. -// -// Note that this is not a transaction. We use a prepared statement to send these queries. Each -// call may fail while part of the queries succeed. -func (s *Storage) DelValues(keys []string) (err error) { - stmt := fmt.Sprintf("DELETE FROM `%s` WHERE `key` = ?", s.table) - pStmt, err := s.db.Prepare(stmt) +// Rollback implements rollback method of two-phase commit worker. +func (s *Storage) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { + el, ok := wb.(*ExecLog) - if err != nil { - return err + if !ok { + return errors.New("unexpected WriteBatch type") } - defer pStmt.Close() + s.Lock() + defer s.Unlock() - for _, key := range keys { - if _, err = pStmt.Exec(key); err != nil { - return err - } + if !equalTxID(&s.id, &TxID{el.ConnectionID, el.SeqNo, el.Timestamp}) { + return fmt.Errorf("twopc: inconsistent state, currently in tx: "+ + "conn = %d, seq = %d, time = %d", s.id.ConnectionID, s.id.SeqNo, s.id.Timestamp) + } + + if s.tx != nil { + s.tx.Rollback() + s.tx = nil + s.queries = nil } return nil } -// GetValues fetches the values of keys. -// -// Note that this is not a transaction. We use a prepared statement to send these queries. Each -// call may fail while part of the queries succeed and some values may be altered during the -// queries. But the results will be returned only if all the queries succeed. -func (s *Storage) GetValues(keys []string) (kvs []KV, err error) { - stmt := fmt.Sprintf("SELECT `value` FROM `%s` WHERE `key` = ?", s.table) - pStmt, err := s.db.Prepare(stmt) +// Query implements read-only query feature. +func (s *Storage) Query(ctx context.Context, queries []Query) (columns []string, types []string, + data [][]interface{}, err error) { + data = make([][]interface{}, 0) - if err != nil { - return nil, err + if len(queries) == 0 { + return + } + + var tx *sql.Tx + var txOptions = &sql.TxOptions{ + ReadOnly: true, } - defer pStmt.Close() + if tx, err = s.db.BeginTx(ctx, txOptions); err != nil { + return + } - kvs = make([]KV, len(keys)) + // always rollback on complete + defer tx.Rollback() - for index, key := range keys { - kvs[index].Key = key + q := queries[len(queries)-1] - if err = pStmt.QueryRow(key).Scan(&kvs[index].Value); err != nil && err != sql.ErrNoRows { - return nil, err - } + // convert arguments types + args := make([]interface{}, len(q.Args)) + + for i, v := range q.Args { + args[i] = v } - return kvs, nil -} + var rows *sql.Rows + if rows, err = tx.Query(q.Pattern, args...); err != nil { + return + } -// SetValuesTx sets or replaces the key-value pairs in kvs as a transaction. -func (s *Storage) SetValuesTx(kvs []KV) (err error) { - // Begin transaction - tx, err := s.db.Begin() + // free result set + defer rows.Close() - if err != nil { - return err + // get rows meta + if columns, err = rows.Columns(); err != nil { + return } - defer func() { - if err != nil { - tx.Rollback() - } else { - err = tx.Commit() - } - }() - - // Prepare statement - stmt := fmt.Sprintf("INSERT OR REPLACE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - pStmt, err := tx.Prepare(stmt) + // if there is empty columns, treat result as empty + if len(columns) == 0 { + return + } - if err != nil { - return err + // get types meta + if types, err = s.transformColumnTypes(rows.ColumnTypes()); err != nil { + return } - defer pStmt.Close() + rs := newRowScanner(len(columns)) - // Execute queries - for _, row := range kvs { - if _, err = pStmt.Exec(row.Key, row.Value); err != nil { - return err + for rows.Next() { + err = rows.Scan(rs.ScanArgs()...) + if err != nil { + return } + + data = append(data, rs.GetRow()) } - return nil + err = rows.Err() + return } -// SetValuesIfNotExistTx sets the key-value pairs in kvs if the key doesn't exist as a transaction. -func (s *Storage) SetValuesIfNotExistTx(kvs []KV) (err error) { - // Begin transaction - tx, err := s.db.Begin() +// Exec implements write query feature. +func (s *Storage) Exec(ctx context.Context, queries []Query) (result ExecResult, err error) { + if len(queries) == 0 { + return + } - if err != nil { - return err + var tx *sql.Tx + var txOptions = &sql.TxOptions{ + ReadOnly: false, } - defer func() { - if err != nil { - tx.Rollback() - } else { - err = tx.Commit() - } - }() + if tx, err = s.db.BeginTx(ctx, txOptions); err != nil { + return + } - // Prepare statement - stmt := fmt.Sprintf("INSERT OR IGNORE INTO `%s` (`key`, `value`) VALUES (?, ?)", s.table) - pStmt, err := tx.Prepare(stmt) + defer tx.Rollback() - if err != nil { - return err - } + for _, q := range queries { + // convert arguments types + args := make([]interface{}, len(q.Args)) - defer pStmt.Close() + for i, v := range q.Args { + args[i] = v + } - // Execute queries - for _, row := range kvs { - if _, err = pStmt.Exec(row.Key, row.Value); err != nil { - return err + var r sql.Result + if r, err = tx.Exec(q.Pattern, args...); err != nil { + log.WithError(err).Debug("execute query failed") + return } + + var affected int64 + affected, _ = r.RowsAffected() + result.RowsAffected += affected + result.LastInsertID, _ = r.LastInsertId() } - return nil -} + tx.Commit() -// DelValuesTx deletes the values of the keys as a transaction. -func (s *Storage) DelValuesTx(keys []string) (err error) { - // Begin transaction - tx, err := s.db.Begin() + return +} +// Close implements database safe close feature. +func (s *Storage) Close() (err error) { + d, err := NewDSN(s.dsn) if err != nil { - return err + return } - defer func() { - if err != nil { - tx.Rollback() - } else { - err = tx.Commit() - } - }() - - // Prepare statement - stmt := fmt.Sprintf("DELETE FROM `%s` WHERE `key` = ?", s.table) - pStmt, err := tx.Prepare(stmt) + index.Lock() + defer index.Unlock() + delete(index.db, d.filename) + return s.db.Close() +} - if err != nil { - return err +func (s *Storage) transformColumnTypes(columnTypes []*sql.ColumnType, e error) (types []string, err error) { + if e != nil { + err = e + return } - defer pStmt.Close() + types = make([]string, len(columnTypes)) - // Execute queries - for _, key := range keys { - if _, err = pStmt.Exec(key); err != nil { - return err - } + for i, c := range columnTypes { + types[i] = c.DatabaseTypeName() } - return nil + return } -// GetValuesTx fetches the values of keys as a transaction. -func (s *Storage) GetValuesTx(keys []string) (kvs []KV, err error) { - // Begin transaction - tx, err := s.db.Begin() +// golang does trick convert, use rowScanner to return the original result type in sqlite3 driver +type rowScanner struct { + fieldCnt int + column int // current column + fields []interface{} // temp fields + scanArgs []interface{} +} - if err != nil { - return nil, err +func newRowScanner(fieldCnt int) (s *rowScanner) { + s = &rowScanner{ + fieldCnt: fieldCnt, + column: 0, + fields: make([]interface{}, fieldCnt), + scanArgs: make([]interface{}, fieldCnt), } - defer func() { - if err != nil { - tx.Rollback() - } else { - err = tx.Commit() - } - }() + for i := 0; i != fieldCnt; i++ { + s.scanArgs[i] = s + } - // Prepare statement - stmt := fmt.Sprintf("SELECT `value` FROM `%s` WHERE `key` = ?", s.table) - pStmt, err := tx.Prepare(stmt) + return +} - if err != nil { - return nil, err +func (s *rowScanner) Scan(src interface{}) error { + if s.fieldCnt <= s.column { + // read complete + return io.EOF } - defer pStmt.Close() - - // Execute queries - kvs = make([]KV, len(keys)) + s.fields[s.column] = src + s.column++ - for index, key := range keys { - kvs[index].Key = key - err = pStmt.QueryRow(key).Scan(&kvs[index].Value) + return nil +} - if err != nil && err != sql.ErrNoRows { - return nil, err - } - } +func (s *rowScanner) GetRow() []interface{} { + return s.fields +} - return kvs, nil +func (s *rowScanner) ScanArgs() []interface{} { + // reset + s.column = 0 + s.fields = make([]interface{}, s.fieldCnt) + return s.scanArgs } diff --git a/storage/storage_test.go b/storage/storage_test.go index 970183471..0cda67128 100644 --- a/storage/storage_test.go +++ b/storage/storage_test.go @@ -17,1000 +17,337 @@ package storage import ( - "bytes" + "context" + "database/sql" "fmt" "io/ioutil" - "math/rand" - "os" "reflect" - "sync" "testing" "time" ) -var ( - sampleTexts = []KV{ - {"Philip K. Dick", []byte("All their equipment and instruments are alive.")}, - {"Philip K. Dick", []byte("The face of the moon was in shadow.")}, - {"Samuel R. Delany", []byte("A red flair silhouetted the jagged edge of a wing.")}, - {"Samuel R. Delany", []byte("Mist enveloped the ship three hours out from port.")}, - {"Samuel R. Delany", []byte("Silver mist suffused the deck of the ship.")}, - {"Samuel R. Delany", []byte("Waves flung themselves at the blue evening.")}, - {"Mary Shelley", []byte("I watched the storm, so beautiful yet terrific.")}, - {"John Munro", []byte("Almost before we knew it, we had left the ground.")}, - {"John Munro", []byte("The sky was cloudless and of a deep dark blue.")}, - {"John Munro", []byte("The spectacle before us was indeed sublime.")}, - {"E. E. Smith", []byte("A shining crescent far beneath the flying vessel.")}, - {"Isaac Asimov", []byte("It was going to be a lonely trip back.")}, - {"Robert Louis Stevenson", []byte("My two natures had memory in common.")}, - {"Harry Harrison", []byte("The face of the moon was in shadow.")}, - {"H. G. Wells", []byte("Then came the night of the first falling star.")}, - } - - ignoredSampleTexts map[string][]byte - replacedSampleTexts map[string][]byte - keysOfSampleTexts []string -) - -func buildReplacedMapFromKVs(kvs []KV) (kvsmap map[string][]byte) { - kvsmap = make(map[string][]byte) - - for _, row := range kvs { - if row.Value != nil { - kvsmap[row.Key] = row.Value - } - } - - return kvsmap -} - -func buildIgnoredMapFromKVs(kvs []KV) (kvsmap map[string][]byte) { - kvsmap = make(map[string][]byte) - - for _, row := range kvs { - if _, ok := kvsmap[row.Key]; !ok && row.Value != nil { - kvsmap[row.Key] = row.Value - } - } - - return kvsmap -} - -func randomDel(kvsmap map[string][]byte) (rkvsmap map[string][]byte, dkeys []string) { - knum := len(kvsmap) - dnum := knum / 2 - list := rand.Perm(knum) - dmap := make([]bool, knum) - - for index := range dmap { - dmap[index] = false - } - - for index, iindex := range list { - if index < dnum { - dmap[iindex] = true - } - } - - index := 0 - dkeys = make([]string, 0, dnum) - rkvsmap = make(map[string][]byte) - - for k, v := range kvsmap { - if dmap[index] { - dkeys = append(dkeys, k) - } else { - rkvsmap[k] = v - } - - index++ - } - - return rkvsmap, dkeys -} - -func testSetup() { - // Build datasets for test - ignoredSampleTexts = buildIgnoredMapFromKVs(sampleTexts) - replacedSampleTexts = buildReplacedMapFromKVs(sampleTexts) - - index := 0 - keysOfSampleTexts = make([]string, len(replacedSampleTexts)) - - for key := range replacedSampleTexts { - keysOfSampleTexts[index] = key - index++ - } -} - -func TestMain(m *testing.M) { - testSetup() - os.Exit(m.Run()) -} - -func TestBadDSN(t *testing.T) { - // Use bad DSN to open storage - if _, err := OpenStorage(os.TempDir(), "test-bad-dsn"); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } -} - -func TestOpenStorage(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - _, err = OpenStorage(fl.Name(), "test-open-storage") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } -} - -func TestSetValue(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-value") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } -} - -func TestSetValueIfNotExist(t *testing.T) { - // Open storage - fl, errTemp := ioutil.TempFile("", "db") - - if errTemp != nil { - t.Fatalf("Error occurred: %s", errTemp.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-value-if-not-exist") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValueIfNotExist(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - } - - // Verify values - for k, v := range ignoredSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } -} - -func TestGetValue(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-get-value") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } - - // Test get nil value - nonexistentKey := "Jules Verne" - v, err := st.GetValue(nonexistentKey) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if v != nil { - t.Fatalf("Unexpected output result: got %v while expecting nil", v) - } -} - -func TestDelValue(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-del-value") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } - - // Delete value - delKey := "Samuel R. Delany" - err = st.DelValue(delKey) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify nil result - v, err := st.GetValue(delKey) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if v != nil { - t.Fatalf("Unexpected output result: got %v while expecting nil", v) - } - - // Test deleting a nonexistent key: it should not return any error - nonexistentKey := "Jules Verne" - - if err = st.DelValue(nonexistentKey); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } -} - -func TestSetValues(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-values") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValues(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValues(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(replacedSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", replacedSampleTexts, okvs) - } -} - -func TestSetValuesIfNotExist(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-values-if-not-exist") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesIfNotExist(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } +func newQuery(query string, args ...interface{}) (q Query) { + q.Pattern = query - // Verify values - kvs, err := st.GetValues(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // convert args + q.Args = make([]sql.NamedArg, len(args)) + for i, v := range args { + q.Args[i] = sql.Named("", v) } - okvs := buildIgnoredMapFromKVs(kvs) - - if !reflect.DeepEqual(ignoredSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", ignoredSampleTexts, okvs) - } + return } -func TestDelValues(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") +func newNamedQuery(query string, args map[string]interface{}) (q Query) { + q.Pattern = query + q.Args = make([]sql.NamedArg, len(args)) + i := 0 - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // convert args + for n, v := range args { + q.Args[i] = sql.Named(n, v) + i++ } - st, err := OpenStorage(fl.Name(), "test-del-values") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValues(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Randomly delete some values - rkvs, dkeys := randomDel(replacedSampleTexts) - - if err = st.DelValues(dkeys); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValues(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(rkvs, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", rkvs, okvs) - } + return } -func TestGetValues(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") +func TestBadType(t *testing.T) { + fl, err := ioutil.TempFile("", "sqlite3-") if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Error occurred: %v", err) } - st, err := OpenStorage(fl.Name(), "test-get-values") + st, err := New(fmt.Sprintf("file:%s", fl.Name())) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Error occurred: %v", err) } - // Set values - if err = st.SetValues(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Add some nonexistent keys - mixedKeys := append(keysOfSampleTexts, "Jules Verne", "Kathy Tyers", "Jack Vance") - - // Verify values - kvs, err := st.GetValues(mixedKeys) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(replacedSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", replacedSampleTexts, okvs) - } -} - -func TestSetValuesTx(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-values-tx") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValuesTx(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(replacedSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", replacedSampleTexts, okvs) - } -} - -func TestSetValuesIfNotExistTx(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-set-values-if-not-exist-tx") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesIfNotExistTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValuesTx(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildIgnoredMapFromKVs(kvs) - - if !reflect.DeepEqual(ignoredSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", ignoredSampleTexts, okvs) - } -} - -func TestDelValuesTx(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-del-values-tx") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Randomly delete some values - rkvs, dkeys := randomDel(replacedSampleTexts) - - if err = st.DelValuesTx(dkeys); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Verify values - kvs, err := st.GetValuesTx(keysOfSampleTexts) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(rkvs, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", rkvs, okvs) - } -} - -func TestGetValuesTx(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-get-values-tx") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Add some nonexistent keys - mixedKeys := append(keysOfSampleTexts, "Jules Verne", "Kathy Tyers", "Jack Vance") - - // Verify values - kvs, err := st.GetValuesTx(mixedKeys) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - okvs := buildReplacedMapFromKVs(kvs) - - if !reflect.DeepEqual(replacedSampleTexts, okvs) { - t.Fatalf("Unexpected output result: input = %v, output = %v", replacedSampleTexts, okvs) - } -} - -func TestDBError(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(fl.Name(), "test-db-error") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Hack the internal structs and filesystem to wipe out the databse - if err = st.db.Close(); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - delete(index.db, fl.Name()) - - if err = os.Truncate(fl.Name(), 0); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if st.db, err = openDB(fl.Name()); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Now try some operations opon it - if err = st.SetValue("", nil); err == nil { + if err = st.Prepare(context.Background(), struct{}{}); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - if err = st.SetValues(sampleTexts); err == nil { + if _, err = st.Commit(context.Background(), struct{}{}); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - if err = st.SetValuesTx(sampleTexts); err == nil { + if err = st.Rollback(context.Background(), struct{}{}); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } +} - if err = st.SetValueIfNotExist("", nil); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } +func TestStorage(t *testing.T) { + fl, err := ioutil.TempFile("", "sqlite3-") - if err = st.SetValuesIfNotExist(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err != nil { + t.Fatalf("Error occurred: %v", err) } - if err = st.SetValuesIfNotExistTx(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } + st, err := New(fmt.Sprintf("file:%s", fl.Name())) - if err = st.DelValue(""); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err != nil { + t.Fatalf("Error occurred: %v", err) } - if err = st.DelValues(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + el1 := &ExecLog{ + ConnectionID: 1, + SeqNo: 1, + Timestamp: time.Now().UnixNano(), + Queries: []Query{ + newQuery("CREATE TABLE IF NOT EXISTS `kv` (`key` TEXT PRIMARY KEY, `value` BLOB)"), + newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k0', NULL)"), + newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k1', 'v1')"), + newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k2', 'v2')"), + newQuery("INSERT OR IGNORE INTO `kv` VALUES ('k3', 'v3')"), + newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k3', 'v3-2')"), + newQuery("DELETE FROM `kv` WHERE `key`='k2'"), + }, } - if err = st.DelValuesTx(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + el2 := &ExecLog{ + ConnectionID: 1, + SeqNo: 2, + Timestamp: time.Now().UnixNano(), + Queries: []Query{ + newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k1', 'v1-2')"), + }, } - if _, err = st.GetValue(""); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err = st.Prepare(context.Background(), el1); err != nil { + t.Fatalf("Error occurred: %v", err) } - if _, err = st.GetValues(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err = st.Prepare(context.Background(), el1); err != nil { + t.Fatalf("Error occurred: %v", err) } - if _, err = st.GetValuesTx(keysOfSampleTexts); err == nil { + if err = st.Prepare(context.Background(), el2); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - // Hack the internal structs to close the database - if err = st.db.Close(); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - delete(index.db, fl.Name()) - - // Now try some operations opon it - if err = st.SetValue("", nil); err == nil { + if _, err = st.Commit(context.Background(), el2); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - if err = st.SetValues(sampleTexts); err == nil { + if err = st.Rollback(context.Background(), el2); err == nil { t.Fatal("Unexpected result: returned nil while expecting an error") } else { - t.Logf("Error occurred as expected: %s", err.Error()) + t.Logf("Error occurred as expected: %v", err) } - if err = st.SetValuesTx(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") + var res interface{} + if res, err = st.Commit(context.Background(), el1); err != nil { + t.Fatalf("Error occurred: %v", err) } else { - t.Logf("Error occurred as expected: %s", err.Error()) + result := res.(ExecResult) + t.Logf("Result: %v", result) } - if err = st.SetValueIfNotExist("", nil); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } + // test query + columns, types, data, err := st.Query(context.Background(), + []Query{newQuery("SELECT * FROM `kv` ORDER BY `key` ASC")}) - if err = st.SetValuesIfNotExist(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } - - if err = st.SetValuesIfNotExistTx(sampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if err != nil { + t.Fatalf("Query failed: %v", err.Error()) } - - if err = st.DelValue(""); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if !reflect.DeepEqual(columns, []string{"key", "value"}) { + t.Fatalf("Error column result: %v", columns) } - - if err = st.DelValues(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) + if !reflect.DeepEqual(types, []string{"TEXT", "BLOB"}) { + t.Fatalf("Error types result: %v", types) } - - if err = st.DelValuesTx(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") + if len(data) != 3 { + t.Fatalf("Error result count: %v, should be 3", len(data)) } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } - - if _, err = st.GetValue(""); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } - - if _, err = st.GetValues(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } else { - t.Logf("Error occurred as expected: %s", err.Error()) - } - - if _, err = st.GetValuesTx(keysOfSampleTexts); err == nil { - t.Fatal("Unexpected result: returned nil while expecting an error") - } -} - -func TestDataPersistence(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - dsn := fmt.Sprintf("file:%s", fl.Name()) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - st, err := OpenStorage(dsn, "test-data-persistence") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // compare rows + should1 := []interface{}{[]byte("k0"), nil} + should2 := []interface{}{[]byte("k1"), []byte("v1")} + should3 := []interface{}{[]byte("k3"), []byte("v3-2")} + t.Logf("Rows: %v", data) + if !reflect.DeepEqual(data[0], should1) { + t.Fatalf("Error result row: %v, should: %v", data[0], should1) } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if !reflect.DeepEqual(data[1], should2) { + t.Fatalf("Error result row: %v, should: %v", data[1], should2) } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) + if !reflect.DeepEqual(data[2], should3) { + t.Fatalf("Error result row: %v, should: %v", data[2], should2) } } - // Hack the internal structs to close the database - if err = st.db.Close(); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - delete(index.db, dsn) - - // Now reopen the storage and verify the data - st, err = OpenStorage(dsn, "test-data-persistence") + // test query with projection + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("SELECT `key` FROM `kv` ORDER BY `key` ASC")}) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Query failed: %v", err.Error()) } - - content, _ := ioutil.ReadFile(fl.Name()) - if !bytes.Contains(content, []byte(sampleTexts[0].Key)) { - t.Fatal("db is corrupted") + if !reflect.DeepEqual(columns, []string{"key"}) { + t.Fatalf("Error column result: %v", columns) } - - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } - } -} - -func TestCipherDBDataPersistence(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") - dsn := fmt.Sprintf("file:%s?_crypto_key=auxten", fl.Name()) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if !reflect.DeepEqual(types, []string{"TEXT"}) { + t.Fatalf("Error types result: %v", types) } - - st, err := OpenStorage(dsn, "test-data-persistence") - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - // Set values - for _, row := range sampleTexts { - if err = st.SetValue(row.Key, row.Value); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if len(data) != 3 { + t.Fatalf("Error result count: %v, should be 3", len(data)) + } else { + // compare rows + should1 := []interface{}{[]byte("k0")} + should2 := []interface{}{[]byte("k1")} + should3 := []interface{}{[]byte("k3")} + t.Logf("Rows: %v", data) + if !reflect.DeepEqual(data[0], should1) { + t.Fatalf("Error result row: %v, should: %v", data[0], should1) } - } - - // Verify values - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if !reflect.DeepEqual(data[1], should2) { + t.Fatalf("Error result row: %v, should: %v", data[1], should2) } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) + if !reflect.DeepEqual(data[2], should3) { + t.Fatalf("Error result row: %v, should: %v", data[2], should2) } } - // Hack the internal structs to close the database - if err = st.db.Close(); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - delete(index.db, dsn) - - // Now reopen the storage and verify the data - st, err = OpenStorage(dsn, "test-data-persistence") + // test query with condition + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("SELECT `key` FROM `kv` WHERE `value` IS NOT NULL ORDER BY `key` ASC")}) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Query failed: %v", err.Error()) } - - content, _ := ioutil.ReadFile(fl.Name()) - if bytes.Contains(content, []byte(sampleTexts[0].Key)) { - t.Fatal("db not ciphered") + if !reflect.DeepEqual(columns, []string{"key"}) { + t.Fatalf("Error column result: %v", columns) } - - for k, v := range replacedSampleTexts { - ov, err := st.GetValue(k) - - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } - - if !reflect.DeepEqual(v, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", v, ov) - } + if !reflect.DeepEqual(types, []string{"TEXT"}) { + t.Fatalf("Error types result: %v", types) } -} - -func randomSleep() { - r := rand.Intn(10) - time.Sleep(time.Duration(r) * time.Millisecond) -} - -func randomGetValue(wg *sync.WaitGroup, st *Storage, t *testing.T) { - defer wg.Done() - - for i := 0; i < 1000; i++ { - randomSleep() - key := keysOfSampleTexts[rand.Intn(len(keysOfSampleTexts))] - value := replacedSampleTexts[key] - - if ov, err := st.GetValue(key); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) - } else if !reflect.DeepEqual(value, ov) { - t.Fatalf("Unexpected output result: input = %v, output = %v", value, ov) + if len(data) != 2 { + t.Fatalf("Error result count: %v, should be 3", len(data)) + } else { + // compare rows + should1 := []interface{}{[]byte("k1")} + should2 := []interface{}{[]byte("k3")} + t.Logf("Rows: %v", data) + if !reflect.DeepEqual(data[0], should1) { + t.Fatalf("Error result row: %v, should: %v", data[0], should1) + } + if !reflect.DeepEqual(data[1], should2) { + t.Fatalf("Error result row: %v, should: %v", data[1], should2) } } -} -func TestConcurrency(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") + // test failed query + columns, types, data, err = st.Query(context.Background(), []Query{newQuery("SQL???? WHAT!!!!")}) - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + if err == nil { + t.Fatal("Query should failed") + } else { + t.Logf("Query failed as expected with: %v", err.Error()) } - st, err := OpenStorage(fl.Name(), "test-data-persistence") + // test non-read query + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("DELETE FROM `kv` WHERE `value` IS NULL")}) - if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + execResult, err := st.Exec(context.Background(), + []Query{newQuery("INSERT OR REPLACE INTO `kv` VALUES ('k4', 'v4')")}) + if err != nil || execResult.RowsAffected != 1 { + t.Fatalf("Exec INSERT failed: %v", err) } - - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // test with arguments + execResult, err = st.Exec(context.Background(), []Query{newQuery("DELETE FROM `kv` WHERE `key`='k4'")}) + if err != nil || execResult.RowsAffected != 1 { + t.Fatalf("Exec DELETE failed: %v", err) } - - // Run concurrent GetValue - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go randomGetValue(&wg, st, t) + execResult, err = st.Exec(context.Background(), + []Query{newQuery("DELETE FROM `kv` WHERE `key`=?", "not_exist")}) + if err != nil || execResult.RowsAffected != 0 { + t.Fatalf("Exec DELETE failed: %v", err) } - wg.Wait() -} - -func TestCipherDBConcurrency(t *testing.T) { - // Open storage - fl, err := ioutil.TempFile("", "db") + // test again + columns, types, data, err = st.Query(context.Background(), []Query{newQuery("SELECT `key` FROM `kv`")}) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Query failed: %v", err.Error()) + } else if len(data) != 3 { + t.Fatalf("Last write query should not take any effect, row count: %v", len(data)) + } else { + t.Logf("Rows: %v", data) } - dsn := fmt.Sprintf("file:%s?_crypto_key=auxten", fl.Name()) - - st, err := OpenStorage(dsn, "test-data-persistence") + // test with select + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("SELECT `key` FROM `kv` WHERE `key` IN (?)", "k1")}) if err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + t.Fatalf("Query failed: %v", err.Error()) + } else if len(data) != 1 { + t.Fatalf("Should only have one record, but actually %v", len(data)) + } else { + t.Logf("Rows: %v", data) } - // Set values - if err = st.SetValuesTx(sampleTexts); err != nil { - t.Fatalf("Error occurred: %s", err.Error()) + // test with select with named arguments + columns, types, data, err = st.Query(context.Background(), + []Query{newNamedQuery("SELECT `key` FROM `kv` WHERE `key` IN (:test2, :test1)", map[string]interface{}{ + "test1": "k1", + "test2": "k3", + })}) + if err != nil { + t.Fatalf("Query failed: %v", err.Error()) + } else if len(data) != 2 { + t.Fatalf("Should only have two records, but actually %v", len(data)) + } else { + t.Logf("Rows: %v", data) } - // Run concurrent GetValue - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go randomGetValue(&wg, st, t) + // test with function + columns, types, data, err = st.Query(context.Background(), + []Query{newQuery("SELECT COUNT(1) AS `c` FROM `kv`")}) + if err != nil { + t.Fatalf("Query failed: %v", err.Error()) + } else { + if len(columns) != 1 { + t.Fatalf("Query result should contain only one column, now %v", len(columns)) + } else if columns[0] != "c" { + t.Fatalf("Query result column name is not defined alias, but :%v", columns[0]) + } + if len(types) != 1 { + t.Fatalf("Query result should contain only one column, now %v", len(types)) + } else { + t.Logf("Query result type is: %v", types[0]) + } + if len(data) != 1 || len(data[0]) != 1 { + t.Fatalf("Query result should contain only one row and one column, now %v", data) + } else if !reflect.DeepEqual(data[0][0], int64(3)) { + t.Fatalf("Query result should be table row count 3, but: %v", data[0]) + } } - wg.Wait() - - content, _ := ioutil.ReadFile(fl.Name()) - if bytes.Contains(content, []byte(sampleTexts[0].Key)) { - t.Fatal("db not ciphered") + // test with timestamp fields + _, err = st.Exec(context.Background(), []Query{ + newQuery("CREATE TABLE `tm` (tm TIMESTAMP)"), + newQuery("INSERT INTO `tm` VALUES(DATE('NOW'))"), + }) + if err != nil { + t.Fatalf("Query failed: %v", err.Error()) + } else { + // query for values + _, _, data, err = st.Query(context.Background(), []Query{newQuery("SELECT `tm` FROM `tm`")}) + if len(data) != 1 || len(data[0]) != 1 { + t.Fatalf("Query result should contain only one row and one column, now %v", data) + } else if !reflect.TypeOf(data[0][0]).AssignableTo(reflect.TypeOf(time.Time{})) { + t.Fatalf("Query result should be time.Time type, but: %v", reflect.TypeOf(data[0][0]).String()) + } } } diff --git a/test/GNTE/GNTE b/test/GNTE/GNTE index 51de88917..93e48d707 160000 --- a/test/GNTE/GNTE +++ b/test/GNTE/GNTE @@ -1 +1 @@ -Subproject commit 51de889172de377ec104900b9566ef8c8510525e +Subproject commit 93e48d7072b002c3d070f9b712ff22b53c65c6b3 diff --git a/test/GNTE/conf/node_miner_10.250.100.2/config.yaml b/test/GNTE/conf/node_miner_10.250.100.2/config.yaml index 97cb6d3a4..2a301d150 100644 --- a/test/GNTE/conf/node_miner_10.250.100.2/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.3/config.yaml b/test/GNTE/conf/node_miner_10.250.100.3/config.yaml index af65305fa..1106c28bb 100644 --- a/test/GNTE/conf/node_miner_10.250.100.3/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.3/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.4/config.yaml b/test/GNTE/conf/node_miner_10.250.100.4/config.yaml index 1b829fb70..b19dace89 100644 --- a/test/GNTE/conf/node_miner_10.250.100.4/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.4/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.5/config.yaml b/test/GNTE/conf/node_miner_10.250.100.5/config.yaml index 810af9a5f..a09d83606 100755 --- a/test/GNTE/conf/node_miner_10.250.100.5/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.5/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.6/config.yaml b/test/GNTE/conf/node_miner_10.250.100.6/config.yaml index 7e07f7965..b96505d19 100755 --- a/test/GNTE/conf/node_miner_10.250.100.6/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.6/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.7/config.yaml b/test/GNTE/conf/node_miner_10.250.100.7/config.yaml index 3b60c4402..5370e24f6 100755 --- a/test/GNTE/conf/node_miner_10.250.100.7/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.7/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.8/config.yaml b/test/GNTE/conf/node_miner_10.250.100.8/config.yaml index d81332473..bc633a6fc 100755 --- a/test/GNTE/conf/node_miner_10.250.100.8/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.8/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/GNTE/conf/node_miner_10.250.100.9/config.yaml b/test/GNTE/conf/node_miner_10.250.100.9/config.yaml index 6ca3fae99..887064529 100755 --- a/test/GNTE/conf/node_miner_10.250.100.9/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.9/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "60s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/fuse/node_0/config.yaml b/test/fuse/node_0/config.yaml new file mode 100644 index 000000000..9919e977e --- /dev/null +++ b/test/fuse/node_0/config.yaml @@ -0,0 +1,99 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:6122" +ThisNodeID: "00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_0/private.key b/test/fuse/node_0/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/fuse/node_0/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/fuse/node_1/config.yaml b/test/fuse/node_1/config.yaml new file mode 100644 index 000000000..caaa118d5 --- /dev/null +++ b/test/fuse/node_1/config.yaml @@ -0,0 +1,99 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:6121" +ThisNodeID: "00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_1/private.key b/test/fuse/node_1/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/fuse/node_1/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/fuse/node_2/config.yaml b/test/fuse/node_2/config.yaml new file mode 100644 index 000000000..18c3409d0 --- /dev/null +++ b/test/fuse/node_2/config.yaml @@ -0,0 +1,99 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:6120" +ThisNodeID: "000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_2/private.key b/test/fuse/node_2/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/fuse/node_2/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/fuse/node_c/config.yaml b/test/fuse/node_c/config.yaml new file mode 100644 index 000000000..d90eca3fe --- /dev/null +++ b/test/fuse/node_c/config.yaml @@ -0,0 +1,99 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:6120" +ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_c/private.key b/test/fuse/node_c/private.key new file mode 100644 index 000000000..f563980c1 Binary files /dev/null and b/test/fuse/node_c/private.key differ diff --git a/test/fuse/node_miner_0/config.yaml b/test/fuse/node_miner_0/config.yaml new file mode 100644 index 000000000..448bb795f --- /dev/null +++ b/test/fuse/node_miner_0/config.yaml @@ -0,0 +1,95 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3144" +ThisNodeID: "000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + MetricCollectInterval: "1h" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_miner_0/private.key b/test/fuse/node_miner_0/private.key new file mode 100644 index 000000000..12e7d3d80 --- /dev/null +++ b/test/fuse/node_miner_0/private.key @@ -0,0 +1 @@ +8s_/W-7IyH_DyTG*M9C#8p%x>SߪRLmPB>{:̜뢷|| \ No newline at end of file diff --git a/test/fuse/node_miner_1/config.yaml b/test/fuse/node_miner_1/config.yaml new file mode 100644 index 000000000..558ca1cb1 --- /dev/null +++ b/test/fuse/node_miner_1/config.yaml @@ -0,0 +1,95 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3145" +ThisNodeID: "000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + MetricCollectInterval: "1h" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_miner_1/private.key b/test/fuse/node_miner_1/private.key new file mode 100644 index 000000000..44e8915e6 --- /dev/null +++ b/test/fuse/node_miner_1/private.key @@ -0,0 +1,2 @@ +s]](o3R +D5*9C 7ZinƋSp*SS5^ޑax>Xо2#IxRw+Ŕ \ No newline at end of file diff --git a/test/fuse/node_miner_2/config.yaml b/test/fuse/node_miner_2/config.yaml new file mode 100644 index 000000000..e6edd4d68 --- /dev/null +++ b/test/fuse/node_miner_2/config.yaml @@ -0,0 +1,95 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3146" +ThisNodeID: "000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8" +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + MetricCollectInterval: "1h" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:6122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:6121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:6120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:3144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:3145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:3146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/fuse/node_miner_2/private.key b/test/fuse/node_miner_2/private.key new file mode 100644 index 000000000..adb437e75 --- /dev/null +++ b/test/fuse/node_miner_2/private.key @@ -0,0 +1 @@ +6 i.i%8pVVrLBKb: 1;(fF &y췥 RW3?CA;e"K2 \ No newline at end of file diff --git a/test/integration/node_miner_0/config.yaml b/test/integration/node_miner_0/config.yaml index 566c0a9b3..8fd498a09 100644 --- a/test/integration/node_miner_0/config.yaml +++ b/test/integration/node_miner_0/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_1/config.yaml b/test/integration/node_miner_1/config.yaml index a4e87e5ce..a2b44aaf6 100644 --- a/test/integration/node_miner_1/config.yaml +++ b/test/integration/node_miner_1/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_2/config.yaml b/test/integration/node_miner_2/config.yaml index b63633264..900670988 100644 --- a/test/integration/node_miner_2/config.yaml +++ b/test/integration/node_miner_2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/mainchain/node_miner_0/config.yaml b/test/mainchain/node_miner_0/config.yaml index ffd791a88..b5f65dc73 100644 --- a/test/mainchain/node_miner_0/config.yaml +++ b/test/mainchain/node_miner_0/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "5s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/mainchain/node_miner_1/config.yaml b/test/mainchain/node_miner_1/config.yaml index 615d25449..8d88cd5c9 100644 --- a/test/mainchain/node_miner_1/config.yaml +++ b/test/mainchain/node_miner_1/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "5s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/mainchain/node_miner_2/config.yaml b/test/mainchain/node_miner_2/config.yaml index 50f67a6b0..3e67adecf 100644 --- a/test/mainchain/node_miner_2/config.yaml +++ b/test/mainchain/node_miner_2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "5s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_miner_0/config.yaml b/test/observation/node_miner_0/config.yaml index ff26fc0eb..43934a95e 100644 --- a/test/observation/node_miner_0/config.yaml +++ b/test/observation/node_miner_0/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_miner_1/config.yaml b/test/observation/node_miner_1/config.yaml index 95bc06940..400a21ad9 100644 --- a/test/observation/node_miner_1/config.yaml +++ b/test/observation/node_miner_1/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_miner_2/config.yaml b/test/observation/node_miner_2/config.yaml index 0d3303606..edeb37ebc 100644 --- a/test/observation/node_miner_2/config.yaml +++ b/test/observation/node_miner_2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/service/node_miner_0/config.yaml b/test/service/node_miner_0/config.yaml index 415a50fdd..4c981fcb7 100644 --- a/test/service/node_miner_0/config.yaml +++ b/test/service/node_miner_0/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/service/node_miner_1/config.yaml b/test/service/node_miner_1/config.yaml index 8e11e22e0..e41bbbb9f 100644 --- a/test/service/node_miner_1/config.yaml +++ b/test/service/node_miner_1/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/service/node_miner_2/config.yaml b/test/service/node_miner_2/config.yaml index 65d3f034f..00d4caed0 100644 --- a/test/service/node_miner_2/config.yaml +++ b/test/service/node_miner_2/config.yaml @@ -28,7 +28,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - MetricCollectInterval: "60s" + MetricCollectInterval: "1h" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/types/ack_type.go b/types/ack_type.go new file mode 100644 index 000000000..af24a9da9 --- /dev/null +++ b/types/ack_type.go @@ -0,0 +1,99 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// AckHeader defines client ack entity. +type AckHeader struct { + Response SignedResponseHeader `json:"r"` + NodeID proto.NodeID `json:"i"` // ack node id + Timestamp time.Time `json:"t"` // time in UTC zone +} + +// SignedAckHeader defines client signed ack entity. +type SignedAckHeader struct { + AckHeader + verifier.DefaultHashSignVerifierImpl +} + +// Ack defines a whole client ack request entity. +type Ack struct { + proto.Envelope + Header SignedAckHeader `json:"h"` +} + +// AckResponse defines client ack response entity. +type AckResponse struct{} + +// Verify checks hash and signature in ack header. +func (sh *SignedAckHeader) Verify() (err error) { + // verify response + if err = sh.Response.Verify(); err != nil { + return + } + + return sh.DefaultHashSignVerifierImpl.Verify(&sh.AckHeader) +} + +// Sign the request. +func (sh *SignedAckHeader) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err error) { + // Only used by ack worker, and ack.Header is verified before build ack + if verifyReqHeader { + // check original header signature + if err = sh.Response.Verify(); err != nil { + return + } + } + + return sh.DefaultHashSignVerifierImpl.Sign(&sh.AckHeader, signer) +} + +// Verify checks hash and signature in ack. +func (a *Ack) Verify() error { + return a.Header.Verify() +} + +// Sign the request. +func (a *Ack) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err error) { + // sign + return a.Header.Sign(signer, verifyReqHeader) +} + +// ResponseHash returns the deep shadowed Response Hash field. +func (sh *SignedAckHeader) ResponseHash() hash.Hash { + return sh.AckHeader.Response.Hash() +} + +// SignedRequestHeader returns the deep shadowed Request reference. +func (sh *SignedAckHeader) SignedRequestHeader() *SignedRequestHeader { + return &sh.AckHeader.Response.Request +} + +// SignedResponseHeader returns the Response reference. +func (sh *SignedAckHeader) SignedResponseHeader() *SignedResponseHeader { + return &sh.Response +} diff --git a/types/ack_type_gen.go b/types/ack_type_gen.go new file mode 100644 index 000000000..90fc7a814 --- /dev/null +++ b/types/ack_type_gen.go @@ -0,0 +1,111 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Ack) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Ack) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *AckHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if oTemp, err := z.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AckHeader) Msgsize() (s int) { + s = 1 + 9 + z.Response.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z AckResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 0 + o = append(o, 0x80) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z AckResponse) Msgsize() (s int) { + s = 1 + return +} + +// MarshalHash marshals for hash +func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 3 + o = append(o, 0x82, 0x82, 0x83, 0x83) + if oTemp, err := z.AckHeader.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.AckHeader.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendTime(o, z.AckHeader.Timestamp) + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedAckHeader) Msgsize() (s int) { + s = 1 + 10 + 1 + 9 + z.AckHeader.Response.Msgsize() + 7 + z.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/ack_type_gen_test.go b/types/ack_type_gen_test.go similarity index 100% rename from worker/types/ack_type_gen_test.go rename to types/ack_type_gen_test.go diff --git a/sqlchain/types/billing_req.go b/types/billing_req.go similarity index 100% rename from sqlchain/types/billing_req.go rename to types/billing_req.go diff --git a/types/block.go b/types/block.go new file mode 100644 index 000000000..e66e688e4 --- /dev/null +++ b/types/block.go @@ -0,0 +1,188 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/merkle" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +//go:generate hsp + +// Header is a block header. +type Header struct { + Version int32 + Producer proto.NodeID + GenesisHash hash.Hash + ParentHash hash.Hash + MerkleRoot hash.Hash + Timestamp time.Time +} + +// SignedHeader is block header along with its producer signature. +type SignedHeader struct { + Header + HSV verifier.DefaultHashSignVerifierImpl +} + +// Sign calls DefaultHashSignVerifierImpl to calculate header hash and sign it with signer. +func (s *SignedHeader) Sign(signer *ca.PrivateKey) error { + return s.HSV.Sign(&s.Header, signer) +} + +// Verify verifies the signature of the signed header. +func (s *SignedHeader) Verify() error { + return s.HSV.Verify(&s.Header) +} + +// VerifyAsGenesis verifies the signed header as a genesis block header. +func (s *SignedHeader) VerifyAsGenesis() (err error) { + var pk *ca.PublicKey + log.WithFields(log.Fields{ + "producer": s.Producer, + "root": s.GenesisHash.String(), + "parent": s.ParentHash.String(), + "merkle": s.MerkleRoot.String(), + "block": s.HSV.Hash().String(), + }).Debug("Verifying genesis block header") + if pk, err = kms.GetPublicKey(s.Producer); err != nil { + return + } + if !pk.IsEqual(s.HSV.Signee) { + err = ErrNodePublicKeyNotMatch + return + } + return s.Verify() +} + +// QueryAsTx defines a tx struct which is combined with request and signed response header +// for block. +type QueryAsTx struct { + Request *Request + Response *SignedResponseHeader +} + +// Block is a node of blockchain. +type Block struct { + SignedHeader SignedHeader + FailedReqs []*Request + QueryTxs []*QueryAsTx + Acks []*SignedAckHeader +} + +// CalcNextID calculates the next query id by examinating every query in block, and adds write +// query number to the last offset. +// +// TODO(leventeliu): too tricky. Consider simply adding next id to each block header. +func (b *Block) CalcNextID() (id uint64, ok bool) { + for _, v := range b.QueryTxs { + if v.Request.Header.QueryType == WriteQuery { + var nid = v.Response.LogOffset + uint64(len(v.Request.Payload.Queries)) + if nid > id { + id = nid + } + ok = true + } + } + return +} + +// PackAndSignBlock generates the signature for the Block from the given PrivateKey. +func (b *Block) PackAndSignBlock(signer *ca.PrivateKey) (err error) { + // Calculate merkle root + b.SignedHeader.MerkleRoot = b.computeMerkleRoot() + return b.SignedHeader.Sign(signer) +} + +// Verify verifies the merkle root and header signature of the block. +func (b *Block) Verify() (err error) { + // Verify merkle root + if merkleRoot := b.computeMerkleRoot(); !merkleRoot.IsEqual(&b.SignedHeader.MerkleRoot) { + return ErrMerkleRootVerification + } + return b.SignedHeader.Verify() +} + +// VerifyAsGenesis verifies the block as a genesis block. +func (b *Block) VerifyAsGenesis() (err error) { + var pk *ca.PublicKey + if pk, err = kms.GetPublicKey(b.SignedHeader.Producer); err != nil { + return + } + if !pk.IsEqual(b.SignedHeader.HSV.Signee) { + err = ErrNodePublicKeyNotMatch + return + } + return b.Verify() +} + +// Timestamp returns the timestamp field of the block header. +func (b *Block) Timestamp() time.Time { + return b.SignedHeader.Timestamp +} + +// Producer returns the producer field of the block header. +func (b *Block) Producer() proto.NodeID { + return b.SignedHeader.Producer +} + +// ParentHash returns the parent hash field of the block header. +func (b *Block) ParentHash() *hash.Hash { + return &b.SignedHeader.ParentHash +} + +// BlockHash returns the parent hash field of the block header. +func (b *Block) BlockHash() *hash.Hash { + return &b.SignedHeader.HSV.DataHash +} + +// GenesisHash returns the parent hash field of the block header. +func (b *Block) GenesisHash() *hash.Hash { + return &b.SignedHeader.GenesisHash +} + +// Signee returns the signee field of the block signed header. +func (b *Block) Signee() *ca.PublicKey { + return b.SignedHeader.HSV.Signee +} + +func (b *Block) computeMerkleRoot() hash.Hash { + var hs = make([]*hash.Hash, 0, len(b.FailedReqs)+len(b.QueryTxs)+len(b.Acks)) + for i := range b.FailedReqs { + h := b.FailedReqs[i].Header.Hash() + hs = append(hs, &h) + } + for i := range b.QueryTxs { + h := b.QueryTxs[i].Response.Hash() + hs = append(hs, &h) + } + for i := range b.Acks { + h := b.Acks[i].Hash() + hs = append(hs, &h) + } + return *merkle.NewMerkle(hs).GetRoot() +} + +// Blocks is Block (reference) array. +type Blocks []*Block diff --git a/types/block_gen.go b/types/block_gen.go new file mode 100644 index 000000000..bf8ef8bd6 --- /dev/null +++ b/types/block_gen.go @@ -0,0 +1,241 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Block) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + // map header, size 2 + o = append(o, 0x84, 0x84, 0x82, 0x82) + if oTemp, err := z.SignedHeader.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.SignedHeader.HSV.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.QueryTxs))) + for za0002 := range z.QueryTxs { + if z.QueryTxs[za0002] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.QueryTxs[za0002].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.FailedReqs))) + for za0001 := range z.FailedReqs { + if z.FailedReqs[za0001] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.FailedReqs[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.Acks))) + for za0003 := range z.Acks { + if z.Acks[za0003] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Acks[za0003].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Block) Msgsize() (s int) { + s = 1 + 13 + 1 + 7 + z.SignedHeader.Header.Msgsize() + 4 + z.SignedHeader.HSV.Msgsize() + 9 + hsp.ArrayHeaderSize + for za0002 := range z.QueryTxs { + if z.QueryTxs[za0002] == nil { + s += hsp.NilSize + } else { + s += z.QueryTxs[za0002].Msgsize() + } + } + s += 11 + hsp.ArrayHeaderSize + for za0001 := range z.FailedReqs { + if z.FailedReqs[za0001] == nil { + s += hsp.NilSize + } else { + s += z.FailedReqs[za0001].Msgsize() + } + } + s += 5 + hsp.ArrayHeaderSize + for za0003 := range z.Acks { + if z.Acks[za0003] == nil { + s += hsp.NilSize + } else { + s += z.Acks[za0003].Msgsize() + } + } + return +} + +// MarshalHash marshals for hash +func (z Blocks) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendArrayHeader(o, uint32(len(z))) + for za0001 := range z { + if z[za0001] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z Blocks) Msgsize() (s int) { + s = hsp.ArrayHeaderSize + for za0001 := range z { + if z[za0001] == nil { + s += hsp.NilSize + } else { + s += z[za0001].Msgsize() + } + } + return +} + +// MarshalHash marshals for hash +func (z *Header) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 6 + o = append(o, 0x86, 0x86) + if oTemp, err := z.GenesisHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + if oTemp, err := z.ParentHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + o = hsp.AppendInt32(o, z.Version) + o = append(o, 0x86) + if oTemp, err := z.Producer.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Header) Msgsize() (s int) { + s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.ParentHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z *QueryAsTx) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if z.Request == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Request.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x82) + if z.Response == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *QueryAsTx) Msgsize() (s int) { + s = 1 + 8 + if z.Request == nil { + s += hsp.NilSize + } else { + s += z.Request.Msgsize() + } + s += 9 + if z.Response == nil { + s += hsp.NilSize + } else { + s += z.Response.Msgsize() + } + return +} + +// MarshalHash marshals for hash +func (z *SignedHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.HSV.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedHeader) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 4 + z.HSV.Msgsize() + return +} diff --git a/types/block_gen_test.go b/types/block_gen_test.go new file mode 100644 index 000000000..ff2b94d1d --- /dev/null +++ b/types/block_gen_test.go @@ -0,0 +1,195 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashBlock(t *testing.T) { + v := Block{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashBlock(b *testing.B) { + v := Block{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgBlock(b *testing.B) { + v := Block{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashBlocks(t *testing.T) { + v := Blocks{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashBlocks(b *testing.B) { + v := Blocks{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgBlocks(b *testing.B) { + v := Blocks{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashHeader(t *testing.T) { + v := Header{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashHeader(b *testing.B) { + v := Header{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgHeader(b *testing.B) { + v := Header{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQueryAsTx(t *testing.T) { + v := QueryAsTx{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQueryAsTx(b *testing.B) { + v := QueryAsTx{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQueryAsTx(b *testing.B) { + v := QueryAsTx{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedHeader(t *testing.T) { + v := SignedHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedHeader(b *testing.B) { + v := SignedHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedHeader(b *testing.B) { + v := SignedHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/types/block_test.go b/types/block_test.go new file mode 100644 index 000000000..e028babde --- /dev/null +++ b/types/block_test.go @@ -0,0 +1,450 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "bytes" + "math/big" + "reflect" + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +func TestSignAndVerify(t *testing.T) { + block, err := createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = block.Verify(); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + block.SignedHeader.HSV.DataHash[0]++ + + if err = errors.Cause(block.Verify()); err != verifier.ErrHashValueNotMatch { + t.Fatalf("Unexpected error: %v", err) + } + + block.Acks = append(block.Acks, &SignedAckHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x01}, + }, + }) + + if err = block.Verify(); err != ErrMerkleRootVerification { + t.Fatalf("Unexpected error: %v", err) + } +} + +func TestHeaderMarshalUnmarshaler(t *testing.T) { + block, err := createRandomBlock(genesisHash, false) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + origin := &block.SignedHeader.Header + enc, err := utils.EncodeMsgPack(origin) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + dec := &Header{} + if err = utils.DecodeMsgPack(enc.Bytes(), dec); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts1, err := origin.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts2, err := dec.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } + + if !reflect.DeepEqual(origin, dec) { + t.Fatalf("Values don't match:\n\tv1 = %+v\n\tv2 = %+v", origin, dec) + } +} + +func TestSignedHeaderMarshaleUnmarshaler(t *testing.T) { + block, err := createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + origin := &block.SignedHeader + enc, err := utils.EncodeMsgPack(origin) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + dec := &SignedHeader{} + + if err = utils.DecodeMsgPack(enc.Bytes(), dec); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts1, err := origin.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts2, err := dec.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } + + if !reflect.DeepEqual(origin.Header, dec.Header) { + t.Fatalf("Values don't match:\n\tv1 = %+v\n\tv2 = %+v", origin.Header, dec.Header) + } + + if err = origin.Verify(); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = dec.Verify(); err != nil { + t.Fatalf("Error occurred: %v", err) + } +} + +func TestBlockMarshalUnmarshaler(t *testing.T) { + origin, err := createRandomBlock(genesisHash, false) + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + origin2, err := createRandomBlock(genesisHash, false) + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + blocks := make(Blocks, 0, 2) + blocks = append(blocks, origin) + blocks = append(blocks, origin2) + blocks = append(blocks, nil) + + blocks2 := make(Blocks, 0, 2) + blocks2 = append(blocks2, origin) + blocks2 = append(blocks2, origin2) + blocks2 = append(blocks2, nil) + + bts1, err := blocks.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts2, err := blocks2.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } + + enc, err := utils.EncodeMsgPack(origin) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + dec := &Block{} + + if err = utils.DecodeMsgPack(enc.Bytes(), dec); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts1, err = origin.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + bts2, err = dec.MarshalHash() + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } + + if !reflect.DeepEqual(origin, dec) { + t.Fatalf("Values don't match:\n\tv1 = %+v\n\tv2 = %+v", origin, dec) + } +} + +func TestGenesis(t *testing.T) { + genesis, err := createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = genesis.VerifyAsGenesis(); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = genesis.SignedHeader.VerifyAsGenesis(); err != nil { + t.Fatalf("Error occurred: %v", err) + } + + // Test non-genesis block + genesis, err = createRandomBlock(genesisHash, false) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + if err = genesis.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } + + if err = genesis.SignedHeader.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } + + // Test altered public key block + genesis, err = createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + _, pub, err := asymmetric.GenSecp256k1KeyPair() + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + genesis.SignedHeader.HSV.Signee = pub + + if err = genesis.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } + + if err = genesis.SignedHeader.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } + + // Test altered signature + genesis, err = createRandomBlock(genesisHash, true) + + if err != nil { + t.Fatalf("Error occurred: %v", err) + } + + genesis.SignedHeader.HSV.Signature.R.Add(genesis.SignedHeader.HSV.Signature.R, big.NewInt(int64(1))) + genesis.SignedHeader.HSV.Signature.S.Add(genesis.SignedHeader.HSV.Signature.S, big.NewInt(int64(1))) + + if err = genesis.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatalf("Unexpected error: %v", err) + } + + if err = genesis.SignedHeader.VerifyAsGenesis(); err != nil { + t.Logf("Error occurred as expected: %v", err) + } else { + t.Fatal("Unexpected result: returned nil while expecting an error") + } +} + +func Test(t *testing.T) { + Convey("CalcNextID should return correct id of each testing block", t, func() { + var ( + nextid uint64 + ok bool + + cases = [...]struct { + block *Block + nextid uint64 + ok bool + }{ + { + block: &Block{ + QueryTxs: []*QueryAsTx{}, + }, + nextid: 0, + ok: false, + }, { + block: &Block{ + QueryTxs: nil, + }, + nextid: 0, + ok: false, + }, { + block: &Block{ + QueryTxs: []*QueryAsTx{ + &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: ReadQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 0, + }, + }, + }, + }, + }, + nextid: 0, + ok: false, + }, { + block: &Block{ + QueryTxs: []*QueryAsTx{ + &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 0, + }, + }, + }, + }, + }, + nextid: 10, + ok: true, + }, { + block: &Block{ + QueryTxs: []*QueryAsTx{ + &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: ReadQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 0, + }, + }, + }, &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 0, + }, + }, + }, &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: ReadQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 10), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 10, + }, + }, + }, &QueryAsTx{ + Request: &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + }, + }, + Payload: RequestPayload{ + Queries: make([]Query, 20), + }, + }, + Response: &SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + LogOffset: 10, + }, + }, + }, + }, + }, + nextid: 30, + ok: true, + }, + } + ) + + for _, v := range cases { + nextid, ok = v.block.CalcNextID() + So(ok, ShouldEqual, v.ok) + if ok { + So(nextid, ShouldEqual, v.nextid) + } + } + }) +} diff --git a/types/build.sh b/types/build.sh deleted file mode 100755 index 2b9fd9b67..000000000 --- a/types/build.sh +++ /dev/null @@ -1,4 +0,0 @@ -#! /usr/bin/env bash - -declare -r PB_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -protoc -I="$PB_DIR" --go_out="$PB_DIR" "$PB_DIR"/*.proto diff --git a/blockproducer/db_service_types.go b/types/db_service_types.go similarity index 62% rename from blockproducer/db_service_types.go rename to types/db_service_types.go index 3f94c991f..89c69bafa 100644 --- a/blockproducer/db_service_types.go +++ b/types/db_service_types.go @@ -14,55 +14,35 @@ * limitations under the License. */ -package blockproducer +package types import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" ) //go:generate hsp // CreateDatabaseRequestHeader defines client create database rpc header. type CreateDatabaseRequestHeader struct { - ResourceMeta wt.ResourceMeta + ResourceMeta ResourceMeta } // SignedCreateDatabaseRequestHeader defines signed client create database request header. type SignedCreateDatabaseRequestHeader struct { CreateDatabaseRequestHeader - Hash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in create database request header. func (sh *SignedCreateDatabaseRequestHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.CreateDatabaseRequestHeader, &sh.Hash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.CreateDatabaseRequestHeader) } // Sign the request. func (sh *SignedCreateDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - if err = buildHash(&sh.CreateDatabaseRequestHeader, &sh.Hash); err != nil { - return - } - - // sign - sh.Signature, err = signer.Sign(sh.Hash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.CreateDatabaseRequestHeader, signer) } // CreateDatabaseRequest defines client create database rpc request entity. @@ -84,42 +64,23 @@ func (r *CreateDatabaseRequest) Sign(signer *asymmetric.PrivateKey) (err error) // CreateDatabaseResponseHeader defines client create database rpc response header. type CreateDatabaseResponseHeader struct { - InstanceMeta wt.ServiceInstance + InstanceMeta ServiceInstance } // SignedCreateDatabaseResponseHeader defines signed client create database response header. type SignedCreateDatabaseResponseHeader struct { CreateDatabaseResponseHeader - Hash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in create database response header. func (sh *SignedCreateDatabaseResponseHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.CreateDatabaseResponseHeader, &sh.Hash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.CreateDatabaseResponseHeader) } // Sign the response. func (sh *SignedCreateDatabaseResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - if err = buildHash(&sh.CreateDatabaseResponseHeader, &sh.Hash); err != nil { - return - } - - // sign - sh.Signature, err = signer.Sign(sh.Hash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.CreateDatabaseResponseHeader, signer) } // CreateDatabaseResponse defines client create database rpc response entity. @@ -147,36 +108,17 @@ type DropDatabaseRequestHeader struct { // SignedDropDatabaseRequestHeader defines signed client drop database rpc request header. type SignedDropDatabaseRequestHeader struct { DropDatabaseRequestHeader - Hash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in request header. func (sh *SignedDropDatabaseRequestHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.DropDatabaseRequestHeader, &sh.Hash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.DropDatabaseRequestHeader) } // Sign the request. func (sh *SignedDropDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - if err = buildHash(&sh.DropDatabaseRequestHeader, &sh.Hash); err != nil { - return - } - - // sign - sh.Signature, err = signer.Sign(sh.Hash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.DropDatabaseRequestHeader, signer) } // DropDatabaseRequest defines client drop database rpc request entity. @@ -206,36 +148,17 @@ type GetDatabaseRequestHeader struct { // SignedGetDatabaseRequestHeader defines signed client get database rpc request header entity. type SignedGetDatabaseRequestHeader struct { GetDatabaseRequestHeader - Hash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in request header. func (sh *SignedGetDatabaseRequestHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.GetDatabaseRequestHeader, &sh.Hash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.GetDatabaseRequestHeader) } // Sign the request. func (sh *SignedGetDatabaseRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - if err = buildHash(&sh.GetDatabaseRequestHeader, &sh.Hash); err != nil { - return - } - - // sign - sh.Signature, err = signer.Sign(sh.Hash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.GetDatabaseRequestHeader, signer) } // GetDatabaseRequest defines client get database rpc request entity. @@ -256,42 +179,23 @@ func (r *GetDatabaseRequest) Sign(signer *asymmetric.PrivateKey) error { // GetDatabaseResponseHeader defines client get database rpc response header entity. type GetDatabaseResponseHeader struct { - InstanceMeta wt.ServiceInstance + InstanceMeta ServiceInstance } // SignedGetDatabaseResponseHeader defines client get database rpc response header entity. type SignedGetDatabaseResponseHeader struct { GetDatabaseResponseHeader - Hash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } // Verify checks hash and signature in response header. func (sh *SignedGetDatabaseResponseHeader) Verify() (err error) { - // verify hash - if err = verifyHash(&sh.GetDatabaseResponseHeader, &sh.Hash); err != nil { - return - } - // verify sign - if sh.Signee == nil || sh.Signature == nil || !sh.Signature.Verify(sh.Hash[:], sh.Signee) { - return wt.ErrSignVerification - } - return + return sh.DefaultHashSignVerifierImpl.Verify(&sh.GetDatabaseResponseHeader) } // Sign the request. func (sh *SignedGetDatabaseResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // build hash - if err = buildHash(&sh.GetDatabaseResponseHeader, &sh.Hash); err != nil { - return - } - - // sign - sh.Signature, err = signer.Sign(sh.Hash[:]) - sh.Signee = signer.PubKey() - - return + return sh.DefaultHashSignVerifierImpl.Sign(&sh.GetDatabaseResponseHeader, signer) } // GetDatabaseResponse defines client get database rpc response entity. @@ -309,29 +213,3 @@ func (r *GetDatabaseResponse) Verify() (err error) { func (r *GetDatabaseResponse) Sign(signer *asymmetric.PrivateKey) (err error) { return r.Header.Sign(signer) } - -// FIXME(xq262144) remove duplicated interface in utils package. -type canMarshalHash interface { - MarshalHash() ([]byte, error) -} - -func verifyHash(data canMarshalHash, h *hash.Hash) (err error) { - var newHash hash.Hash - if err = buildHash(data, &newHash); err != nil { - return - } - if !newHash.IsEqual(h) { - return ErrSignVerification - } - return -} - -func buildHash(data canMarshalHash, h *hash.Hash) (err error) { - var hashBytes []byte - if hashBytes, err = data.MarshalHash(); err != nil { - return - } - newHash := hash.THashH(hashBytes) - copy(h[:], newHash[:]) - return -} diff --git a/blockproducer/db_service_types_gen.go b/types/db_service_types_gen.go similarity index 65% rename from blockproducer/db_service_types_gen.go rename to types/db_service_types_gen.go index 4ab068cbb..188fcada8 100644 --- a/blockproducer/db_service_types_gen.go +++ b/types/db_service_types_gen.go @@ -1,4 +1,4 @@ -package blockproducer +package types // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. @@ -11,8 +11,16 @@ func (z *CreateDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -28,7 +36,7 @@ func (z *CreateDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 7 + 1 + 28 + 1 + 13 + z.Header.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() return } @@ -57,8 +65,16 @@ func (z *CreateDatabaseResponse) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -74,7 +90,7 @@ func (z *CreateDatabaseResponse) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabaseResponse) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 7 + 1 + 29 + 1 + 13 + z.Header.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() return } @@ -103,8 +119,16 @@ func (z *DropDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -120,7 +144,7 @@ func (z *DropDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *DropDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 7 + 1 + 26 + 1 + 11 + z.Header.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() return } @@ -164,8 +188,16 @@ func (z *GetDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -181,7 +213,7 @@ func (z *GetDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *GetDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 7 + 1 + 25 + 1 + 11 + z.Header.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() return } @@ -210,8 +242,16 @@ func (z *GetDatabaseResponse) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) + if oTemp, err := z.Header.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -227,7 +267,7 @@ func (z *GetDatabaseResponse) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *GetDatabaseResponse) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 7 + 1 + 26 + 1 + 13 + z.Header.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() return } @@ -255,36 +295,16 @@ func (z *GetDatabaseResponseHeader) Msgsize() (s int) { func (z *SignedCreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } + // map header, size 2 // map header, size 1 - o = append(o, 0x84, 0x81, 0x81) + o = append(o, 0x82, 0x82, 0x81, 0x81) if oTemp, err := z.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Hash.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -294,19 +314,7 @@ func (z *SignedCreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedCreateDatabaseRequestHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 28 + 1 + 13 + z.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 5 + z.Hash.Msgsize() + s = 1 + 28 + 1 + 13 + z.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } @@ -314,36 +322,16 @@ func (z *SignedCreateDatabaseRequestHeader) Msgsize() (s int) { func (z *SignedCreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } + // map header, size 2 // map header, size 1 - o = append(o, 0x84, 0x81, 0x81) + o = append(o, 0x82, 0x82, 0x81, 0x81) if oTemp, err := z.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Hash.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -353,19 +341,7 @@ func (z *SignedCreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedCreateDatabaseResponseHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 29 + 1 + 13 + z.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 5 + z.Hash.Msgsize() + s = 1 + 29 + 1 + 13 + z.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } @@ -373,36 +349,16 @@ func (z *SignedCreateDatabaseResponseHeader) Msgsize() (s int) { func (z *SignedDropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } + // map header, size 2 // map header, size 1 - o = append(o, 0x84, 0x81, 0x81) + o = append(o, 0x82, 0x82, 0x81, 0x81) if oTemp, err := z.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Hash.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -412,19 +368,7 @@ func (z *SignedDropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedDropDatabaseRequestHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 26 + 1 + 11 + z.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 5 + z.Hash.Msgsize() + s = 1 + 26 + 1 + 11 + z.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } @@ -432,36 +376,16 @@ func (z *SignedDropDatabaseRequestHeader) Msgsize() (s int) { func (z *SignedGetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } + // map header, size 2 // map header, size 1 - o = append(o, 0x84, 0x81, 0x81) + o = append(o, 0x82, 0x82, 0x81, 0x81) if oTemp, err := z.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Hash.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -471,19 +395,7 @@ func (z *SignedGetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedGetDatabaseRequestHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 25 + 1 + 11 + z.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 5 + z.Hash.Msgsize() + s = 1 + 25 + 1 + 11 + z.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } @@ -491,36 +403,16 @@ func (z *SignedGetDatabaseRequestHeader) Msgsize() (s int) { func (z *SignedGetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } + // map header, size 2 // map header, size 1 - o = append(o, 0x84, 0x81, 0x81) + o = append(o, 0x82, 0x82, 0x81, 0x81) if oTemp, err := z.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Hash.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -530,18 +422,6 @@ func (z *SignedGetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedGetDatabaseResponseHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 26 + 1 + 13 + z.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 5 + z.Hash.Msgsize() + s = 1 + 26 + 1 + 13 + z.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/blockproducer/db_service_types_gen_test.go b/types/db_service_types_gen_test.go similarity index 99% rename from blockproducer/db_service_types_gen_test.go rename to types/db_service_types_gen_test.go index b9ad910bd..dca3773e8 100644 --- a/blockproducer/db_service_types_gen_test.go +++ b/types/db_service_types_gen_test.go @@ -1,4 +1,4 @@ -package blockproducer +package types // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/types/db_service_types_test.go b/types/db_service_types_test.go new file mode 100644 index 000000000..9cb3cafee --- /dev/null +++ b/types/db_service_types_test.go @@ -0,0 +1,100 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + . "github.com/smartystreets/goconvey/convey" +) + +func TestTypes(t *testing.T) { + Convey("test nils", t, func() { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + + h1 := &SignedCreateDatabaseRequestHeader{} + err = h1.Sign(priv) + So(err, ShouldBeNil) + h1.Signee = nil + err = h1.Verify() + So(err, ShouldNotBeNil) + + h2 := &SignedCreateDatabaseResponseHeader{} + err = h2.Sign(priv) + So(err, ShouldBeNil) + h2.Signee = nil + err = h2.Verify() + So(err, ShouldNotBeNil) + + h3 := &SignedDropDatabaseRequestHeader{} + err = h3.Sign(priv) + So(err, ShouldBeNil) + h3.Signee = nil + err = h3.Verify() + So(err, ShouldNotBeNil) + + h4 := &SignedGetDatabaseRequestHeader{} + err = h4.Sign(priv) + So(err, ShouldBeNil) + h4.Signee = nil + err = h4.Verify() + So(err, ShouldNotBeNil) + + h5 := &SignedGetDatabaseResponseHeader{} + err = h5.Sign(priv) + So(err, ShouldBeNil) + h5.Signee = nil + err = h5.Verify() + So(err, ShouldNotBeNil) + }) + Convey("test nested sign/verify", t, func() { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + + r1 := &CreateDatabaseRequest{} + err = r1.Sign(priv) + So(err, ShouldBeNil) + err = r1.Verify() + So(err, ShouldBeNil) + + r2 := &CreateDatabaseResponse{} + err = r2.Sign(priv) + So(err, ShouldBeNil) + err = r2.Verify() + So(err, ShouldBeNil) + + r3 := &DropDatabaseRequest{} + err = r3.Sign(priv) + So(err, ShouldBeNil) + err = r3.Verify() + So(err, ShouldBeNil) + + r4 := &GetDatabaseRequest{} + err = r4.Sign(priv) + So(err, ShouldBeNil) + err = r4.Verify() + So(err, ShouldBeNil) + + r5 := &GetDatabaseResponse{} + err = r5.Sign(priv) + So(err, ShouldBeNil) + err = r5.Verify() + So(err, ShouldBeNil) + }) +} diff --git a/sqlchain/types/doc.go b/types/doc.go similarity index 100% rename from sqlchain/types/doc.go rename to types/doc.go diff --git a/types/errors.go b/types/errors.go new file mode 100644 index 000000000..650e46728 --- /dev/null +++ b/types/errors.go @@ -0,0 +1,31 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "errors" +) + +var ( + // ErrMerkleRootVerification indicates a failed merkle root verificatin. + ErrMerkleRootVerification = errors.New("merkle root verification failed") + // ErrNodePublicKeyNotMatch indicates that the public key given with a node does not match the + // one in the key store. + ErrNodePublicKeyNotMatch = errors.New("node publick key doesn't match") + // ErrSignRequest indicates a failed signature compute operation. + ErrSignRequest = errors.New("signature compute failed") +) diff --git a/types/init_service_type.go b/types/init_service_type.go new file mode 100644 index 000000000..9d5462788 --- /dev/null +++ b/types/init_service_type.go @@ -0,0 +1,84 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// InitService defines worker service init request. +type InitService struct { + proto.Envelope +} + +// ResourceMeta defines single database resource meta. +type ResourceMeta struct { + Node uint16 // reserved node count + Space uint64 // reserved storage space in bytes + Memory uint64 // reserved memory in bytes + LoadAvgPerCPU uint64 // max loadAvg15 per CPU + EncryptionKey string `hspack:"-"` // encryption key for database instance +} + +// ServiceInstance defines single instance to be initialized. +type ServiceInstance struct { + DatabaseID proto.DatabaseID + Peers *proto.Peers + ResourceMeta ResourceMeta + GenesisBlock *Block +} + +// InitServiceResponseHeader defines worker service init response header. +type InitServiceResponseHeader struct { + Instances []ServiceInstance +} + +// SignedInitServiceResponseHeader defines signed worker service init response header. +type SignedInitServiceResponseHeader struct { + InitServiceResponseHeader + verifier.DefaultHashSignVerifierImpl +} + +// InitServiceResponse defines worker service init response. +type InitServiceResponse struct { + Header SignedInitServiceResponseHeader +} + +// Verify checks hash and signature in init service response header. +func (sh *SignedInitServiceResponseHeader) Verify() (err error) { + return sh.DefaultHashSignVerifierImpl.Verify(&sh.InitServiceResponseHeader) +} + +// Sign the request. +func (sh *SignedInitServiceResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + return sh.DefaultHashSignVerifierImpl.Sign(&sh.InitServiceResponseHeader, signer) +} + +// Verify checks hash and signature in init service response header. +func (rs *InitServiceResponse) Verify() error { + return rs.Header.Verify() +} + +// Sign the request. +func (rs *InitServiceResponse) Sign(signer *asymmetric.PrivateKey) (err error) { + // sign + return rs.Header.Sign(signer) +} diff --git a/types/init_service_type_gen.go b/types/init_service_type_gen.go new file mode 100644 index 000000000..f859b2e88 --- /dev/null +++ b/types/init_service_type_gen.go @@ -0,0 +1,187 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *InitService) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *InitService) Msgsize() (s int) { + s = 1 + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *InitServiceResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *InitServiceResponse) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *InitServiceResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Instances))) + for za0001 := range z.Instances { + if oTemp, err := z.Instances[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *InitServiceResponseHeader) Msgsize() (s int) { + s = 1 + 10 + hsp.ArrayHeaderSize + for za0001 := range z.Instances { + s += z.Instances[za0001].Msgsize() + } + return +} + +// MarshalHash marshals for hash +func (z *ResourceMeta) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + o = hsp.AppendUint16(o, z.Node) + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.Space) + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.Memory) + o = append(o, 0x84) + o = hsp.AppendUint64(o, z.LoadAvgPerCPU) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ResourceMeta) Msgsize() (s int) { + s = 1 + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + 14 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z *ServiceInstance) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.GenesisBlock == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.GenesisBlock.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if z.Peers == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Peers.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ServiceInstance) Msgsize() (s int) { + s = 1 + 13 + if z.GenesisBlock == nil { + s += hsp.NilSize + } else { + s += z.GenesisBlock.Msgsize() + } + s += 6 + if z.Peers == nil { + s += hsp.NilSize + } else { + s += z.Peers.Msgsize() + } + s += 13 + z.ResourceMeta.Msgsize() + 11 + z.DatabaseID.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedInitServiceResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x82, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.InitServiceResponseHeader.Instances))) + for za0001 := range z.InitServiceResponseHeader.Instances { + if oTemp, err := z.InitServiceResponseHeader.Instances[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedInitServiceResponseHeader) Msgsize() (s int) { + s = 1 + 26 + 1 + 10 + hsp.ArrayHeaderSize + for za0001 := range z.InitServiceResponseHeader.Instances { + s += z.InitServiceResponseHeader.Instances[za0001].Msgsize() + } + s += 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/init_service_type_gen_test.go b/types/init_service_type_gen_test.go similarity index 100% rename from worker/types/init_service_type_gen_test.go rename to types/init_service_type_gen_test.go diff --git a/types/no_ack_report_type.go b/types/no_ack_report_type.go new file mode 100644 index 000000000..29187a451 --- /dev/null +++ b/types/no_ack_report_type.go @@ -0,0 +1,129 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// NoAckReportHeader defines worker issued client no ack report. +type NoAckReportHeader struct { + NodeID proto.NodeID // reporter node id + Timestamp time.Time // time in UTC zone + Response SignedResponseHeader +} + +// SignedNoAckReportHeader defines worker worker issued/signed client no ack report. +type SignedNoAckReportHeader struct { + NoAckReportHeader + verifier.DefaultHashSignVerifierImpl +} + +// NoAckReport defines whole worker no client ack report. +type NoAckReport struct { + proto.Envelope + Header SignedNoAckReportHeader +} + +// AggrNoAckReportHeader defines worker leader aggregated client no ack report. +type AggrNoAckReportHeader struct { + NodeID proto.NodeID // aggregated report node id + Timestamp time.Time // time in UTC zone + Reports []SignedNoAckReportHeader // no-ack reports + Peers *proto.Peers // serving peers during report +} + +// SignedAggrNoAckReportHeader defines worker leader aggregated/signed client no ack report. +type SignedAggrNoAckReportHeader struct { + AggrNoAckReportHeader + verifier.DefaultHashSignVerifierImpl +} + +// AggrNoAckReport defines whole worker leader no client ack report. +type AggrNoAckReport struct { + proto.Envelope + Header SignedAggrNoAckReportHeader +} + +// Verify checks hash and signature in signed no ack report header. +func (sh *SignedNoAckReportHeader) Verify() (err error) { + // verify original response + if err = sh.Response.Verify(); err != nil { + return + } + + return sh.DefaultHashSignVerifierImpl.Verify(&sh.NoAckReportHeader) +} + +// Sign the request. +func (sh *SignedNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + // verify original response + if err = sh.Response.Verify(); err != nil { + return + } + + return sh.DefaultHashSignVerifierImpl.Sign(&sh.NoAckReportHeader, signer) +} + +// Verify checks hash and signature in whole no ack report. +func (r *NoAckReport) Verify() error { + return r.Header.Verify() +} + +// Sign the request. +func (r *NoAckReport) Sign(signer *asymmetric.PrivateKey) error { + return r.Header.Sign(signer) +} + +// Verify checks hash and signature in aggregated no ack report. +func (sh *SignedAggrNoAckReportHeader) Verify() (err error) { + // verify original reports + for _, r := range sh.Reports { + if err = r.Verify(); err != nil { + return + } + } + + return sh.DefaultHashSignVerifierImpl.Verify(&sh.AggrNoAckReportHeader) +} + +// Sign the request. +func (sh *SignedAggrNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + for _, r := range sh.Reports { + if err = r.Verify(); err != nil { + return + } + } + + return sh.DefaultHashSignVerifierImpl.Sign(&sh.AggrNoAckReportHeader, signer) +} + +// Verify the whole aggregation no ack report. +func (r *AggrNoAckReport) Verify() (err error) { + return r.Header.Verify() +} + +// Sign the request. +func (r *AggrNoAckReport) Sign(signer *asymmetric.PrivateKey) error { + return r.Header.Sign(signer) +} diff --git a/types/no_ack_report_type_gen.go b/types/no_ack_report_type_gen.go new file mode 100644 index 000000000..e9e89abc6 --- /dev/null +++ b/types/no_ack_report_type_gen.go @@ -0,0 +1,206 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *AggrNoAckReport) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + o = append(o, 0x82, 0x82, 0x82, 0x82) + if oTemp, err := z.Header.AggrNoAckReportHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AggrNoAckReport) Msgsize() (s int) { + s = 1 + 7 + 1 + 22 + z.Header.AggrNoAckReportHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *AggrNoAckReportHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 4 + o = append(o, 0x84, 0x84) + if z.Peers == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Peers.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + o = hsp.AppendArrayHeader(o, uint32(len(z.Reports))) + for za0001 := range z.Reports { + if oTemp, err := z.Reports[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x84) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x84) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AggrNoAckReportHeader) Msgsize() (s int) { + s = 1 + 6 + if z.Peers == nil { + s += hsp.NilSize + } else { + s += z.Peers.Msgsize() + } + s += 8 + hsp.ArrayHeaderSize + for za0001 := range z.Reports { + s += z.Reports[za0001].Msgsize() + } + s += 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z *NoAckReport) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *NoAckReport) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *NoAckReportHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if oTemp, err := z.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *NoAckReportHeader) Msgsize() (s int) { + s = 1 + 9 + z.Response.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z *SignedAggrNoAckReportHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.AggrNoAckReportHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedAggrNoAckReportHeader) Msgsize() (s int) { + s = 1 + 22 + z.AggrNoAckReportHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *SignedNoAckReportHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 3 + o = append(o, 0x82, 0x82, 0x83, 0x83) + if oTemp, err := z.NoAckReportHeader.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendTime(o, z.NoAckReportHeader.Timestamp) + o = append(o, 0x83) + if oTemp, err := z.NoAckReportHeader.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedNoAckReportHeader) Msgsize() (s int) { + s = 1 + 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/no_ack_report_type_gen_test.go b/types/no_ack_report_type_gen_test.go similarity index 100% rename from worker/types/no_ack_report_type_gen_test.go rename to types/no_ack_report_type_gen_test.go diff --git a/sqlchain/types/observer.go b/types/observer.go similarity index 100% rename from sqlchain/types/observer.go rename to types/observer.go diff --git a/types/request_type.go b/types/request_type.go new file mode 100644 index 000000000..2105e641c --- /dev/null +++ b/types/request_type.go @@ -0,0 +1,147 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "fmt" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// QueryType enumerates available query type, currently read/write. +type QueryType int32 + +const ( + // ReadQuery defines a read query type. + ReadQuery QueryType = iota + // WriteQuery defines a write query type. + WriteQuery +) + +// NamedArg defines the named argument structure for database. +type NamedArg struct { + Name string + Value interface{} +} + +// Query defines single query. +type Query struct { + Pattern string + Args []NamedArg +} + +// RequestPayload defines a queries payload. +type RequestPayload struct { + Queries []Query `json:"qs"` +} + +// RequestHeader defines a query request header. +type RequestHeader struct { + QueryType QueryType `json:"qt"` + NodeID proto.NodeID `json:"id"` // request node id + DatabaseID proto.DatabaseID `json:"dbid"` // request database id + ConnectionID uint64 `json:"cid"` + SeqNo uint64 `json:"seq"` + Timestamp time.Time `json:"t"` // time in UTC zone + BatchCount uint64 `json:"bc"` // query count in this request + QueriesHash hash.Hash `json:"qh"` // hash of query payload +} + +// QueryKey defines an unique query key of a request. +type QueryKey struct { + NodeID proto.NodeID `json:"id"` + ConnectionID uint64 `json:"cid"` + SeqNo uint64 `json:"seq"` +} + +// String implements fmt.Stringer for logging purpose. +func (k *QueryKey) String() string { + return fmt.Sprintf("%s#%016x#%016x", string(k.NodeID[:8]), k.ConnectionID, k.SeqNo) +} + +// SignedRequestHeader defines a signed query request header. +type SignedRequestHeader struct { + RequestHeader + verifier.DefaultHashSignVerifierImpl +} + +// Request defines a complete query request. +type Request struct { + proto.Envelope + Header SignedRequestHeader `json:"h"` + Payload RequestPayload `json:"p"` +} + +// String implements fmt.Stringer for logging purpose. +func (t QueryType) String() string { + switch t { + case ReadQuery: + return "read" + case WriteQuery: + return "write" + default: + return "unknown" + } +} + +// Verify checks hash and signature in request header. +func (sh *SignedRequestHeader) Verify() (err error) { + return sh.DefaultHashSignVerifierImpl.Verify(&sh.RequestHeader) +} + +// Sign the request. +func (sh *SignedRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + return sh.DefaultHashSignVerifierImpl.Sign(&sh.RequestHeader, signer) +} + +// Verify checks hash and signature in whole request. +func (r *Request) Verify() (err error) { + // verify payload hash in signed header + if err = verifyHash(&r.Payload, &r.Header.QueriesHash); err != nil { + return + } + // verify header sign + return r.Header.Verify() +} + +// Sign the request. +func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { + // set query count + r.Header.BatchCount = uint64(len(r.Payload.Queries)) + + // compute payload hash + if err = buildHash(&r.Payload, &r.Header.QueriesHash); err != nil { + return + } + + return r.Header.Sign(signer) +} + +// GetQueryKey returns a unique query key of this request. +func (sh *SignedRequestHeader) GetQueryKey() QueryKey { + return QueryKey{ + NodeID: sh.NodeID, + ConnectionID: sh.ConnectionID, + SeqNo: sh.SeqNo, + } +} diff --git a/types/request_type_gen.go b/types/request_type_gen.go new file mode 100644 index 000000000..709e66bb9 --- /dev/null +++ b/types/request_type_gen.go @@ -0,0 +1,239 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z NamedArg) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o, err = hsp.AppendIntf(o, z.Value) + if err != nil { + return + } + o = append(o, 0x82) + o = hsp.AppendString(o, z.Name) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z NamedArg) Msgsize() (s int) { + s = 1 + 6 + hsp.GuessSize(z.Value) + 5 + hsp.StringPrefixSize + len(z.Name) + return +} + +// MarshalHash marshals for hash +func (z *Query) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Args))) + for za0001 := range z.Args { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendString(o, z.Args[za0001].Name) + o = append(o, 0x82) + o, err = hsp.AppendIntf(o, z.Args[za0001].Value) + if err != nil { + return + } + } + o = append(o, 0x82) + o = hsp.AppendString(o, z.Pattern) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Query) Msgsize() (s int) { + s = 1 + 5 + hsp.ArrayHeaderSize + for za0001 := range z.Args { + s += 1 + 5 + hsp.StringPrefixSize + len(z.Args[za0001].Name) + 6 + hsp.GuessSize(z.Args[za0001].Value) + } + s += 8 + hsp.StringPrefixSize + len(z.Pattern) + return +} + +// MarshalHash marshals for hash +func (z *QueryKey) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.ConnectionID) + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.SeqNo) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *QueryKey) Msgsize() (s int) { + s = 1 + 7 + z.NodeID.Msgsize() + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z QueryType) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendInt32(o, int32(z)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z QueryType) Msgsize() (s int) { + s = hsp.Int32Size + return +} + +// MarshalHash marshals for hash +func (z *Request) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + // map header, size 1 + o = append(o, 0x83, 0x83, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries))) + for za0001 := range z.Payload.Queries { + if oTemp, err := z.Payload.Queries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + // map header, size 2 + o = append(o, 0x83, 0x82, 0x82) + if oTemp, err := z.Header.RequestHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Request) Msgsize() (s int) { + s = 1 + 8 + 1 + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Payload.Queries { + s += z.Payload.Queries[za0001].Msgsize() + } + s += 7 + 1 + 14 + z.Header.RequestHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *RequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 8 + o = append(o, 0x88, 0x88) + o = hsp.AppendInt32(o, int32(z.QueryType)) + o = append(o, 0x88) + if oTemp, err := z.QueriesHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + o = hsp.AppendTime(o, z.Timestamp) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.ConnectionID) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.SeqNo) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.BatchCount) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RequestHeader) Msgsize() (s int) { + s = 1 + 10 + hsp.Int32Size + 12 + z.QueriesHash.Msgsize() + 11 + z.DatabaseID.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + 11 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z *RequestPayload) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Queries))) + for za0001 := range z.Queries { + if oTemp, err := z.Queries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RequestPayload) Msgsize() (s int) { + s = 1 + 8 + hsp.ArrayHeaderSize + for za0001 := range z.Queries { + s += z.Queries[za0001].Msgsize() + } + return +} + +// MarshalHash marshals for hash +func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.RequestHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedRequestHeader) Msgsize() (s int) { + s = 1 + 14 + z.RequestHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/request_type_gen_test.go b/types/request_type_gen_test.go similarity index 100% rename from worker/types/request_type_gen_test.go rename to types/request_type_gen_test.go diff --git a/types/response_type.go b/types/response_type.go new file mode 100644 index 000000000..968385eca --- /dev/null +++ b/types/response_type.go @@ -0,0 +1,110 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/pkg/errors" +) + +//go:generate hsp + +// ResponseRow defines single row of query response. +type ResponseRow struct { + Values []interface{} +} + +// ResponsePayload defines column names and rows of query response. +type ResponsePayload struct { + Columns []string `json:"c"` + DeclTypes []string `json:"t"` + Rows []ResponseRow `json:"r"` +} + +// ResponseHeader defines a query response header. +type ResponseHeader struct { + Request SignedRequestHeader `json:"r"` + NodeID proto.NodeID `json:"id"` // response node id + Timestamp time.Time `json:"t"` // time in UTC zone + RowCount uint64 `json:"c"` // response row count of payload + LogOffset uint64 `json:"o"` // request log offset + LastInsertID int64 `json:"l"` // insert insert id + AffectedRows int64 `json:"a"` // affected rows + PayloadHash hash.Hash `json:"dh"` // hash of query response payload +} + +// SignedResponseHeader defines a signed query response header. +type SignedResponseHeader struct { + ResponseHeader + verifier.DefaultHashSignVerifierImpl +} + +// Response defines a complete query response. +type Response struct { + Header SignedResponseHeader `json:"h"` + Payload ResponsePayload `json:"p"` +} + +// Verify checks hash and signature in response header. +func (sh *SignedResponseHeader) Verify() (err error) { + // verify original request header + if err = sh.Request.Verify(); err != nil { + return + } + + return sh.DefaultHashSignVerifierImpl.Verify(&sh.ResponseHeader) +} + +// Sign the request. +func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + // make sure original header is signed + if err = sh.Request.Verify(); err != nil { + err = errors.Wrapf(err, "SignedResponseHeader %v", sh) + return + } + + return sh.DefaultHashSignVerifierImpl.Sign(&sh.ResponseHeader, signer) +} + +// Verify checks hash and signature in whole response. +func (sh *Response) Verify() (err error) { + // verify data hash in header + if err = verifyHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { + return + } + + return sh.Header.Verify() +} + +// Sign the request. +func (sh *Response) Sign(signer *asymmetric.PrivateKey) (err error) { + // set rows count + sh.Header.RowCount = uint64(len(sh.Payload.Rows)) + + // build hash in header + if err = buildHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { + return + } + + // sign the request + return sh.Header.Sign(signer) +} diff --git a/types/response_type_gen.go b/types/response_type_gen.go new file mode 100644 index 000000000..78c9db9c3 --- /dev/null +++ b/types/response_type_gen.go @@ -0,0 +1,184 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Response) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Payload.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + // map header, size 2 + o = append(o, 0x82, 0x82, 0x82) + if oTemp, err := z.Header.ResponseHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Response) Msgsize() (s int) { + s = 1 + 8 + z.Payload.Msgsize() + 7 + 1 + 15 + z.Header.ResponseHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *ResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 8 + o = append(o, 0x88, 0x88) + if oTemp, err := z.Request.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + if oTemp, err := z.PayloadHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + o = hsp.AppendInt64(o, z.LastInsertID) + o = append(o, 0x88) + o = hsp.AppendInt64(o, z.AffectedRows) + o = append(o, 0x88) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x88) + o = hsp.AppendTime(o, z.Timestamp) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.RowCount) + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.LogOffset) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ResponseHeader) Msgsize() (s int) { + s = 1 + 8 + z.Request.Msgsize() + 12 + z.PayloadHash.Msgsize() + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + hsp.Uint64Size + 10 + hsp.Uint64Size + return +} + +// MarshalHash marshals for hash +func (z *ResponsePayload) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + o = append(o, 0x83, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.Rows))) + for za0003 := range z.Rows { + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Rows[za0003].Values))) + for za0004 := range z.Rows[za0003].Values { + o, err = hsp.AppendIntf(o, z.Rows[za0003].Values[za0004]) + if err != nil { + return + } + } + } + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.Columns))) + for za0001 := range z.Columns { + o = hsp.AppendString(o, z.Columns[za0001]) + } + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.DeclTypes))) + for za0002 := range z.DeclTypes { + o = hsp.AppendString(o, z.DeclTypes[za0002]) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ResponsePayload) Msgsize() (s int) { + s = 1 + 5 + hsp.ArrayHeaderSize + for za0003 := range z.Rows { + s += 1 + 7 + hsp.ArrayHeaderSize + for za0004 := range z.Rows[za0003].Values { + s += hsp.GuessSize(z.Rows[za0003].Values[za0004]) + } + } + s += 8 + hsp.ArrayHeaderSize + for za0001 := range z.Columns { + s += hsp.StringPrefixSize + len(z.Columns[za0001]) + } + s += 10 + hsp.ArrayHeaderSize + for za0002 := range z.DeclTypes { + s += hsp.StringPrefixSize + len(z.DeclTypes[za0002]) + } + return +} + +// MarshalHash marshals for hash +func (z *ResponseRow) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 1 + o = append(o, 0x81, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Values))) + for za0001 := range z.Values { + o, err = hsp.AppendIntf(o, z.Values[za0001]) + if err != nil { + return + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *ResponseRow) Msgsize() (s int) { + s = 1 + 7 + hsp.ArrayHeaderSize + for za0001 := range z.Values { + s += hsp.GuessSize(z.Values[za0001]) + } + return +} + +// MarshalHash marshals for hash +func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.ResponseHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedResponseHeader) Msgsize() (s int) { + s = 1 + 15 + z.ResponseHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/worker/types/response_type_gen_test.go b/types/response_type_gen_test.go similarity index 100% rename from worker/types/response_type_gen_test.go rename to types/response_type_gen_test.go diff --git a/types/types.proto b/types/types.proto deleted file mode 100644 index a2abe9f05..000000000 --- a/types/types.proto +++ /dev/null @@ -1,112 +0,0 @@ -syntax = "proto3"; -package types; - -message Signature { - string R = 1; - string S = 2; -} - -message PublicKey { - bytes PublicKey = 1; -} - -message Hash { - bytes Hash = 1; -} - -message UtxoEntry { - bool IsCoinbase = 1; - bool FromMainChain = 2; - uint32 BlockHeight = 3; - map SparseOutputs = 4; -} - -message Utxo { - UtxoHeader UtxoHeader = 1; - bool Spent = 2; - uint64 amount = 3; -} - -message UtxoHeader { - int32 Version = 1; - Hash PrevTxHash = 2; - PublicKey Signee = 3; - Signature Signature = 4; -} - -enum TxType { - QUERY = 0; - STORAGE = 1; -} - -message Tx { - repeated Utxo UtxoIn = 1; - repeated Utxo UtxoOut = 2; - TxType type = 3; - string Content = 4; -} - -message NodeID { - string NodeID = 1; -} - -message AccountAddress { - string AccountAddress = 1; -} - -message Header { - int32 Version = 1; - NodeID Producer = 2; - Hash Root = 3; - Hash Parent = 4; - Hash MerkleRoot = 5; - int64 Timestamp = 6; -} - -message SignedHeader { - Header Header = 1; - Hash BlockHash = 2; - PublicKey Signee = 3; - Signature Signature = 4; -} - -message State { - Hash Head = 1; - int32 Height = 2; -} - -message BPTx { - Hash TxHash = 1; - BPTxData TxData = 2; -} - -message BPTxData { - uint64 AccountNonce = 1; - AccountAddress Recipient = 2; - bytes Amount = 3; - bytes Payload = 4; - - Signature Signature = 5; - PublicKey Signee = 6; -} - -message BPHeader { - int32 Version = 1; - AccountAddress Producer = 2; - Hash Root = 3; - Hash Parent = 4; - Hash MerkleRoot = 5; - int64 Timestamp = 6; -} - -message BPSignedHeader { - BPHeader Header = 1; - Hash BlockHash = 2; - PublicKey Signee = 3; - Signature Signature = 4; -} - -message BPBlock { - BPSignedHeader Header = 1; - repeated BPTx Tx = 2; -} diff --git a/types/types_test.go b/types/types_test.go new file mode 100644 index 000000000..39960d09d --- /dev/null +++ b/types/types_test.go @@ -0,0 +1,692 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "fmt" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +func getCommKeys() (*asymmetric.PrivateKey, *asymmetric.PublicKey) { + testPriv := []byte{ + 0xea, 0xf0, 0x2c, 0xa3, 0x48, 0xc5, 0x24, 0xe6, + 0x39, 0x26, 0x55, 0xba, 0x4d, 0x29, 0x60, 0x3c, + 0xd1, 0xa7, 0x34, 0x7d, 0x9d, 0x65, 0xcf, 0xe9, + 0x3c, 0xe1, 0xeb, 0xff, 0xdc, 0xa2, 0x26, 0x94, + } + return asymmetric.PrivKeyFromBytes(testPriv) +} + +func TestSignedRequestHeader_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + req := &SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("node"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + } + + var err error + + err = req.Sign(privKey) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = req.Verify() + So(err, ShouldBeNil) + + // modify structure + req.Timestamp = req.Timestamp.Add(time.Second) + + err = req.Verify() + So(err, ShouldNotBeNil) + + s, err := req.MarshalHash() + So(err, ShouldBeNil) + So(s, ShouldNotBeEmpty) + }) + }) +} + +func TestRequest_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + req := &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("node"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + Payload: RequestPayload{ + Queries: []Query{ + { + Pattern: "INSERT INTO test VALUES(?)", + Args: []NamedArg{ + { + Value: 1, + }, + }, + }, + { + Pattern: "INSERT INTO test VALUES(?)", + Args: []NamedArg{ + { + Value: "happy", + }, + }, + }, + }, + }, + } + + var err error + + // sign + err = req.Sign(privKey) + So(err, ShouldBeNil) + So(req.Header.BatchCount, ShouldEqual, uint64(len(req.Payload.Queries))) + + // test queries hash + err = verifyHash(&req.Payload, &req.Header.QueriesHash) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = req.Verify() + So(err, ShouldBeNil) + + Convey("header change", func() { + // modify structure + req.Header.Timestamp = req.Header.Timestamp.Add(time.Second) + + err = req.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("header change with invalid queries hash", func() { + req.Payload.Queries = append(req.Payload.Queries, + Query{ + Pattern: "select 1", + }, + ) + + err = req.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestResponse_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + res := &Response{ + Header: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + Payload: ResponsePayload{ + Columns: []string{ + "test_integer", + "test_boolean", + "test_time", + "test_nil", + "test_float", + "test_binary_string", + "test_string", + "test_empty_time", + }, + DeclTypes: []string{ + "INTEGER", + "BOOLEAN", + "DATETIME", + "INTEGER", + "FLOAT", + "BLOB", + "TEXT", + "DATETIME", + }, + Rows: []ResponseRow{ + { + Values: []interface{}{ + int(1), + true, + time.Now().UTC(), + nil, + float64(1.0001), + "11111\0001111111", + "11111111111111", + time.Time{}, + }, + }, + }, + }, + } + + var err error + + // sign directly, embedded original request is not filled + err = res.Sign(privKey) + So(err, ShouldNotBeNil) + So(errors.Cause(err), ShouldBeIn, []error{ + verifier.ErrHashValueNotMatch, + verifier.ErrSignatureNotMatch, + }) + + // sign original request first + err = res.Header.Request.Sign(privKey) + So(err, ShouldBeNil) + + // sign again + err = res.Sign(privKey) + So(err, ShouldBeNil) + + // test hash + err = verifyHash(&res.Payload, &res.Header.PayloadHash) + So(err, ShouldBeNil) + + // verify + Convey("verify", func() { + err = res.Verify() + So(err, ShouldBeNil) + + Convey("encode/decode verify", func() { + buf, err := utils.EncodeMsgPack(res) + So(err, ShouldBeNil) + var r *Response + err = utils.DecodeMsgPack(buf.Bytes(), &r) + So(err, ShouldBeNil) + err = r.Verify() + So(err, ShouldBeNil) + }) + Convey("request change", func() { + res.Header.Request.BatchCount = 200 + + err = res.Verify() + So(err, ShouldNotBeNil) + }) + Convey("payload change", func() { + res.Payload.DeclTypes[0] = "INT" + + err = res.Verify() + So(err, ShouldNotBeNil) + }) + Convey("header change", func() { + res.Header.Timestamp = res.Header.Timestamp.Add(time.Second) + + err = res.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestAck_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + ack := &Ack{ + Header: SignedAckHeader{ + AckHeader: AckHeader{ + Response: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + Timestamp: time.Now().UTC(), + }, + }, + } + + var err error + + Convey("get query key", func() { + key := ack.Header.SignedRequestHeader().GetQueryKey() + So(key.NodeID, ShouldEqual, ack.Header.SignedRequestHeader().NodeID) + So(key.ConnectionID, ShouldEqual, ack.Header.SignedRequestHeader().ConnectionID) + So(key.SeqNo, ShouldEqual, ack.Header.SignedRequestHeader().SeqNo) + }) + + // sign directly, embedded original response is not filled + err = ack.Sign(privKey, false) + So(err, ShouldBeNil) + err = ack.Sign(privKey, true) + So(err, ShouldNotBeNil) + So(errors.Cause(err), ShouldBeIn, []error{ + verifier.ErrHashValueNotMatch, + verifier.ErrSignatureNotMatch, + }) + + // sign nested structure, step by step + // this is not required during runtime + // during runtime, nested structures is signed and provided by peers + err = ack.Header.Response.Request.Sign(privKey) + So(err, ShouldBeNil) + err = ack.Header.Response.Sign(privKey) + So(err, ShouldBeNil) + err = ack.Sign(privKey, true) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = ack.Verify() + So(err, ShouldBeNil) + + Convey("request change", func() { + ack.Header.Response.Request.QueryType = ReadQuery + + err = ack.Verify() + So(err, ShouldNotBeNil) + }) + Convey("response change", func() { + ack.Header.Response.RowCount = 100 + + err = ack.Verify() + So(err, ShouldNotBeNil) + }) + Convey("header change", func() { + ack.Header.Timestamp = ack.Header.Timestamp.Add(time.Second) + + err = ack.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestNoAckReport_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + noAck := &NoAckReport{ + Header: SignedNoAckReportHeader{ + NoAckReportHeader: NoAckReportHeader{ + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + Response: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + }, + }, + } + + var err error + + // sign directly, embedded original response/request is not filled + err = noAck.Sign(privKey) + So(err, ShouldNotBeNil) + So(errors.Cause(err), ShouldBeIn, []error{ + verifier.ErrHashValueNotMatch, + verifier.ErrSignatureNotMatch, + }) + + // sign nested structure + err = noAck.Header.Response.Request.Sign(privKey) + So(err, ShouldBeNil) + err = noAck.Header.Response.Sign(privKey) + So(err, ShouldBeNil) + err = noAck.Sign(privKey) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = noAck.Verify() + So(err, ShouldBeNil) + + Convey("request change", func() { + noAck.Header.Response.Request.QueryType = ReadQuery + + err = noAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("response change", func() { + noAck.Header.Response.RowCount = 100 + + err = noAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("header change", func() { + noAck.Header.Timestamp = noAck.Header.Timestamp.Add(time.Second) + + err = noAck.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestAggrNoAckReport_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + aggrNoAck := &AggrNoAckReport{ + Header: SignedAggrNoAckReportHeader{ + AggrNoAckReportHeader: AggrNoAckReportHeader{ + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Timestamp: time.Now().UTC(), + Reports: []SignedNoAckReportHeader{ + { + NoAckReportHeader: NoAckReportHeader{ + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + Response: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + }, + }, + { + NoAckReportHeader: NoAckReportHeader{ + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Timestamp: time.Now().UTC(), + Response: SignedResponseHeader{ + ResponseHeader: ResponseHeader{ + Request: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), + }, + }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), + }, + }, + }, + }, + }, + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + }, + }, + }, + }, + }, + } + + var err error + + // sign directly, embedded original response/request is not filled + err = aggrNoAck.Sign(privKey) + So(err, ShouldNotBeNil) + So(errors.Cause(err), ShouldBeIn, []error{ + verifier.ErrHashValueNotMatch, + verifier.ErrSignatureNotMatch, + }) + + // sign nested structure + err = aggrNoAck.Header.Reports[0].Response.Request.Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[1].Response.Request.Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[0].Response.Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[1].Response.Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[0].Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Header.Reports[1].Sign(privKey) + So(err, ShouldBeNil) + err = aggrNoAck.Sign(privKey) + So(err, ShouldBeNil) + + Convey("verify", func() { + err = aggrNoAck.Verify() + So(err, ShouldBeNil) + + Convey("request change", func() { + aggrNoAck.Header.Reports[0].Response.Request.QueryType = ReadQuery + + err = aggrNoAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("response change", func() { + aggrNoAck.Header.Reports[0].Response.RowCount = 1000 + + err = aggrNoAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("report change", func() { + aggrNoAck.Header.Reports[0].Timestamp = aggrNoAck.Header.Reports[0].Timestamp.Add(time.Second) + + err = aggrNoAck.Verify() + So(err, ShouldNotBeNil) + }) + + Convey("header change", func() { + aggrNoAck.Header.Timestamp = aggrNoAck.Header.Timestamp.Add(time.Second) + + err = aggrNoAck.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestInitServiceResponse_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + var err error + + initServiceResponse := &InitServiceResponse{ + Header: SignedInitServiceResponseHeader{ + InitServiceResponseHeader: InitServiceResponseHeader{ + Instances: []ServiceInstance{ + { + DatabaseID: proto.DatabaseID("db1"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + }, + }, + }, + // TODO(xq262144), should integrated with genesis block serialization test + GenesisBlock: nil, + }, + }, + }, + }, + } + + // sign + err = initServiceResponse.Sign(privKey) + + Convey("verify", func() { + err = initServiceResponse.Verify() + So(err, ShouldBeNil) + + Convey("header change", func() { + initServiceResponse.Header.Instances[0].DatabaseID = proto.DatabaseID("db2") + + err = initServiceResponse.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestUpdateService_Sign(t *testing.T) { + privKey, _ := getCommKeys() + + Convey("sign", t, func() { + var err error + + updateServiceReq := &UpdateService{ + Header: SignedUpdateServiceHeader{ + UpdateServiceHeader: UpdateServiceHeader{ + Op: CreateDB, + Instance: ServiceInstance{ + DatabaseID: proto.DatabaseID("db1"), + Peers: &proto.Peers{ + PeersHeader: proto.PeersHeader{ + Term: uint64(1), + Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + Servers: []proto.NodeID{ + proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), + proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + }, + }, + }, + // TODO(xq262144), should integrated with genesis block serialization test + GenesisBlock: nil, + }, + }, + }, + } + + // sign + err = updateServiceReq.Sign(privKey) + + Convey("verify", func() { + err = updateServiceReq.Verify() + So(err, ShouldBeNil) + + Convey("header change", func() { + updateServiceReq.Header.Instance.DatabaseID = proto.DatabaseID("db2") + + err = updateServiceReq.Verify() + So(err, ShouldNotBeNil) + }) + }) + }) +} + +func TestOther_MarshalHash(t *testing.T) { + Convey("marshal hash", t, func() { + tm := UpdateType(1) + s, err := tm.MarshalHash() + So(err, ShouldBeNil) + So(s, ShouldNotBeEmpty) + + tm2 := QueryType(1) + s, err = tm2.MarshalHash() + So(err, ShouldBeNil) + So(s, ShouldNotBeEmpty) + }) +} + +func TestQueryTypeStringer(t *testing.T) { + Convey("Query type stringer should return expected string", t, func() { + var cases = [...]struct { + i fmt.Stringer + s string + }{ + { + i: ReadQuery, + s: "read", + }, { + i: WriteQuery, + s: "write", + }, { + i: QueryType(0xffff), + s: "unknown", + }, + } + for _, v := range cases { + So(v.s, ShouldEqual, fmt.Sprintf("%v", v.i)) + } + }) +} diff --git a/types/update_service_type.go b/types/update_service_type.go new file mode 100644 index 000000000..7eca089bf --- /dev/null +++ b/types/update_service_type.go @@ -0,0 +1,79 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +//go:generate hsp + +// UpdateType defines service update type. +type UpdateType int32 + +const ( + // CreateDB indicates create database operation. + CreateDB UpdateType = iota + // UpdateDB indicates database peers update operation. + UpdateDB + // DropDB indicates drop database operation. + DropDB +) + +// UpdateServiceHeader defines service update header. +type UpdateServiceHeader struct { + Op UpdateType + Instance ServiceInstance +} + +// SignedUpdateServiceHeader defines signed service update header. +type SignedUpdateServiceHeader struct { + UpdateServiceHeader + verifier.DefaultHashSignVerifierImpl +} + +// UpdateService defines service update type. +type UpdateService struct { + proto.Envelope + Header SignedUpdateServiceHeader +} + +// UpdateServiceResponse defines empty response entity. +type UpdateServiceResponse struct{} + +// Verify checks hash and signature in update service header. +func (sh *SignedUpdateServiceHeader) Verify() (err error) { + return sh.DefaultHashSignVerifierImpl.Verify(&sh.UpdateServiceHeader) +} + +// Sign the request. +func (sh *SignedUpdateServiceHeader) Sign(signer *asymmetric.PrivateKey) (err error) { + return sh.DefaultHashSignVerifierImpl.Sign(&sh.UpdateServiceHeader, signer) +} + +// Verify checks hash and signature in update service. +func (s *UpdateService) Verify() error { + return s.Header.Verify() +} + +// Sign the request. +func (s *UpdateService) Sign(signer *asymmetric.PrivateKey) (err error) { + // sign + return s.Header.Sign(signer) +} diff --git a/types/update_service_type_gen.go b/types/update_service_type_gen.go new file mode 100644 index 000000000..c134c6635 --- /dev/null +++ b/types/update_service_type_gen.go @@ -0,0 +1,113 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *SignedUpdateServiceHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + // map header, size 2 + o = append(o, 0x82, 0x82, 0x82, 0x82) + o = hsp.AppendInt32(o, int32(z.UpdateServiceHeader.Op)) + o = append(o, 0x82) + if oTemp, err := z.UpdateServiceHeader.Instance.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedUpdateServiceHeader) Msgsize() (s int) { + s = 1 + 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *UpdateService) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *UpdateService) Msgsize() (s int) { + s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *UpdateServiceHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.Instance.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + o = hsp.AppendInt32(o, int32(z.Op)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *UpdateServiceHeader) Msgsize() (s int) { + s = 1 + 9 + z.Instance.Msgsize() + 3 + hsp.Int32Size + return +} + +// MarshalHash marshals for hash +func (z UpdateServiceResponse) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 0 + o = append(o, 0x80) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z UpdateServiceResponse) Msgsize() (s int) { + s = 1 + return +} + +// MarshalHash marshals for hash +func (z UpdateType) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendInt32(o, int32(z)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z UpdateType) Msgsize() (s int) { + s = hsp.Int32Size + return +} diff --git a/worker/types/update_service_type_gen_test.go b/types/update_service_type_gen_test.go similarity index 100% rename from worker/types/update_service_type_gen_test.go rename to types/update_service_type_gen_test.go diff --git a/types/util.go b/types/util.go new file mode 100644 index 000000000..ab09e4310 --- /dev/null +++ b/types/util.go @@ -0,0 +1,48 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/pkg/errors" +) + +type canMarshalHash interface { + MarshalHash() ([]byte, error) +} + +func verifyHash(data canMarshalHash, h *hash.Hash) (err error) { + var newHash hash.Hash + if err = buildHash(data, &newHash); err != nil { + return + } + if !newHash.IsEqual(h) { + return errors.Cause(verifier.ErrHashValueNotMatch) + } + return +} + +func buildHash(data canMarshalHash, h *hash.Hash) (err error) { + var hashBytes []byte + if hashBytes, err = data.MarshalHash(); err != nil { + return + } + newHash := hash.THashH(hashBytes) + copy(h[:], newHash[:]) + return +} diff --git a/types/xxx_test.go b/types/xxx_test.go new file mode 100644 index 000000000..55d87c586 --- /dev/null +++ b/types/xxx_test.go @@ -0,0 +1,136 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "io/ioutil" + "math/rand" + "os" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +var ( + genesisHash = hash.Hash{} +) + +func setup() { + rand.Seed(time.Now().UnixNano()) + rand.Read(genesisHash[:]) + f, err := ioutil.TempFile("", "keystore") + + if err != nil { + panic(err) + } + + f.Close() + + if err = kms.InitPublicKeyStore(f.Name(), nil); err != nil { + panic(err) + } + + kms.Unittest = true + + if priv, pub, err := asymmetric.GenSecp256k1KeyPair(); err == nil { + kms.SetLocalKeyPair(priv, pub) + } else { + panic(err) + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) +} + +func createRandomString(offset, length int, s *string) { + buff := make([]byte, rand.Intn(length)+offset) + rand.Read(buff) + *s = string(buff) +} + +func createRandomStrings(offset, length, soffset, slength int) (s []string) { + s = make([]string, rand.Intn(length)+offset) + + for i := range s { + createRandomString(soffset, slength, &s[i]) + } + + return +} + +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *Block, err error) { + // Generate key pair + priv, pub, err := asymmetric.GenSecp256k1KeyPair() + + if err != nil { + return + } + + h := hash.Hash{} + rand.Read(h[:]) + + b = &Block{ + SignedHeader: SignedHeader{ + Header: Header{ + Version: 0x01000000, + Producer: proto.NodeID(h.String()), + GenesisHash: genesisHash, + ParentHash: parent, + Timestamp: time.Now().UTC(), + }, + }, + } + + if isGenesis { + // Compute nonce with public key + nonceCh := make(chan cpuminer.NonceInfo) + quitCh := make(chan struct{}) + miner := cpuminer.NewCPUMiner(quitCh) + go miner.ComputeBlockNonce(cpuminer.MiningBlock{ + Data: pub.Serialize(), + NonceChan: nonceCh, + Stop: nil, + }, cpuminer.Uint256{A: 0, B: 0, C: 0, D: 0}, 4) + nonce := <-nonceCh + close(quitCh) + close(nonceCh) + // Add public key to KMS + id := cpuminer.HashBlock(pub.Serialize(), nonce.Nonce) + b.SignedHeader.Header.Producer = proto.NodeID(id.String()) + + if err = kms.SetPublicKey(proto.NodeID(id.String()), nonce.Nonce, pub); err != nil { + return nil, err + } + + // Set genesis hash as zero value + b.SignedHeader.GenesisHash = hash.Hash{} + } + + err = b.PackAndSignBlock(priv) + return +} + +func TestMain(m *testing.M) { + setup() + os.Exit(m.Run()) +} diff --git a/utils/profiler.go b/utils/profiler.go index 013dd3e7c..01b5f9b67 100644 --- a/utils/profiler.go +++ b/utils/profiler.go @@ -48,7 +48,7 @@ func StartProfile(cpuprofile, memprofile string) error { log.WithField("file", memprofile).WithError(err).Error("failed to create memory profile file") return err } - log.WithField("file", cpuprofile).WithError(err).Info("writing memory profiling to file") + log.WithField("file", memprofile).WithError(err).Info("writing memory profiling to file") prof.mem = f runtime.MemProfileRate = 4096 } @@ -63,7 +63,7 @@ func StopProfile() { log.Info("CPU profiling stopped") } if prof.mem != nil { - pprof.Lookup("heap").WriteTo(prof.mem, 0) + pprof.WriteHeapProfile(prof.mem) prof.mem.Close() log.Info("memory profiling stopped") } diff --git a/vendor/bazil.org/fuse/.gitattributes b/vendor/bazil.org/fuse/.gitattributes new file mode 100644 index 000000000..b65f2a9ff --- /dev/null +++ b/vendor/bazil.org/fuse/.gitattributes @@ -0,0 +1,2 @@ +*.go filter=gofmt +*.cgo filter=gofmt diff --git a/vendor/bazil.org/fuse/LICENSE b/vendor/bazil.org/fuse/LICENSE new file mode 100644 index 000000000..4ac7cd838 --- /dev/null +++ b/vendor/bazil.org/fuse/LICENSE @@ -0,0 +1,93 @@ +Copyright (c) 2013-2015 Tommi Virtanen. +Copyright (c) 2009, 2011, 2012 The Go Authors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +The following included software components have additional copyright +notices and license terms that may differ from the above. + + +File fuse.go: + +// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, +// which carries this notice: +// +// The files in this directory are subject to the following license. +// +// The author of this software is Russ Cox. +// +// Copyright (c) 2006 Russ Cox +// +// Permission to use, copy, modify, and distribute this software for any +// purpose without fee is hereby granted, provided that this entire notice +// is included in all copies of any software which is or includes a copy +// or modification of this software and in all copies of the supporting +// documentation for such software. +// +// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY +// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS +// FITNESS FOR ANY PARTICULAR PURPOSE. + + +File fuse_kernel.go: + +// Derived from FUSE's fuse_kernel.h +/* + This file defines the kernel interface of FUSE + Copyright (C) 2001-2007 Miklos Szeredi + + + This -- and only this -- header file may also be distributed under + the terms of the BSD Licence as follows: + + Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. +*/ diff --git a/vendor/bazil.org/fuse/README.md b/vendor/bazil.org/fuse/README.md new file mode 100644 index 000000000..8c6d556ee --- /dev/null +++ b/vendor/bazil.org/fuse/README.md @@ -0,0 +1,23 @@ +bazil.org/fuse -- Filesystems in Go +=================================== + +`bazil.org/fuse` is a Go library for writing FUSE userspace +filesystems. + +It is a from-scratch implementation of the kernel-userspace +communication protocol, and does not use the C library from the +project called FUSE. `bazil.org/fuse` embraces Go fully for safety and +ease of programming. + +Here’s how to get going: + + go get bazil.org/fuse + +Website: http://bazil.org/fuse/ + +Github repository: https://github.com/bazil/fuse + +API docs: http://godoc.org/bazil.org/fuse + +Our thanks to Russ Cox for his fuse library, which this project is +based on. diff --git a/vendor/bazil.org/fuse/buffer.go b/vendor/bazil.org/fuse/buffer.go new file mode 100644 index 000000000..bb1d2b776 --- /dev/null +++ b/vendor/bazil.org/fuse/buffer.go @@ -0,0 +1,35 @@ +package fuse + +import "unsafe" + +// buffer provides a mechanism for constructing a message from +// multiple segments. +type buffer []byte + +// alloc allocates size bytes and returns a pointer to the new +// segment. +func (w *buffer) alloc(size uintptr) unsafe.Pointer { + s := int(size) + if len(*w)+s > cap(*w) { + old := *w + *w = make([]byte, len(*w), 2*cap(*w)+s) + copy(*w, old) + } + l := len(*w) + *w = (*w)[:l+s] + return unsafe.Pointer(&(*w)[l]) +} + +// reset clears out the contents of the buffer. +func (w *buffer) reset() { + for i := range (*w)[:cap(*w)] { + (*w)[i] = 0 + } + *w = (*w)[:0] +} + +func newBuffer(extra uintptr) buffer { + const hdrSize = unsafe.Sizeof(outHeader{}) + buf := make(buffer, hdrSize, hdrSize+extra) + return buf +} diff --git a/vendor/bazil.org/fuse/debug.go b/vendor/bazil.org/fuse/debug.go new file mode 100644 index 000000000..be9f900d5 --- /dev/null +++ b/vendor/bazil.org/fuse/debug.go @@ -0,0 +1,21 @@ +package fuse + +import ( + "runtime" +) + +func stack() string { + buf := make([]byte, 1024) + return string(buf[:runtime.Stack(buf, false)]) +} + +func nop(msg interface{}) {} + +// Debug is called to output debug messages, including protocol +// traces. The default behavior is to do nothing. +// +// The messages have human-friendly string representations and are +// safe to marshal to JSON. +// +// Implementations must not retain msg. +var Debug func(msg interface{}) = nop diff --git a/vendor/bazil.org/fuse/error_darwin.go b/vendor/bazil.org/fuse/error_darwin.go new file mode 100644 index 000000000..a3fb89ca2 --- /dev/null +++ b/vendor/bazil.org/fuse/error_darwin.go @@ -0,0 +1,17 @@ +package fuse + +import ( + "syscall" +) + +const ( + ENOATTR = Errno(syscall.ENOATTR) +) + +const ( + errNoXattr = ENOATTR +) + +func init() { + errnoNames[errNoXattr] = "ENOATTR" +} diff --git a/vendor/bazil.org/fuse/error_freebsd.go b/vendor/bazil.org/fuse/error_freebsd.go new file mode 100644 index 000000000..c6ea6d6e7 --- /dev/null +++ b/vendor/bazil.org/fuse/error_freebsd.go @@ -0,0 +1,15 @@ +package fuse + +import "syscall" + +const ( + ENOATTR = Errno(syscall.ENOATTR) +) + +const ( + errNoXattr = ENOATTR +) + +func init() { + errnoNames[errNoXattr] = "ENOATTR" +} diff --git a/vendor/bazil.org/fuse/error_linux.go b/vendor/bazil.org/fuse/error_linux.go new file mode 100644 index 000000000..6f113e71e --- /dev/null +++ b/vendor/bazil.org/fuse/error_linux.go @@ -0,0 +1,17 @@ +package fuse + +import ( + "syscall" +) + +const ( + ENODATA = Errno(syscall.ENODATA) +) + +const ( + errNoXattr = ENODATA +) + +func init() { + errnoNames[errNoXattr] = "ENODATA" +} diff --git a/vendor/bazil.org/fuse/error_std.go b/vendor/bazil.org/fuse/error_std.go new file mode 100644 index 000000000..398f43fbf --- /dev/null +++ b/vendor/bazil.org/fuse/error_std.go @@ -0,0 +1,31 @@ +package fuse + +// There is very little commonality in extended attribute errors +// across platforms. +// +// getxattr return value for "extended attribute does not exist" is +// ENOATTR on OS X, and ENODATA on Linux and apparently at least +// NetBSD. There may be a #define ENOATTR on Linux too, but the value +// is ENODATA in the actual syscalls. FreeBSD and OpenBSD have no +// ENODATA, only ENOATTR. ENOATTR is not in any of the standards, +// ENODATA exists but is only used for STREAMs. +// +// Each platform will define it a errNoXattr constant, and this file +// will enforce that it implements the right interfaces and hide the +// implementation. +// +// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/getxattr.2.html +// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013090.html +// http://mail-index.netbsd.org/tech-kern/2012/04/30/msg013097.html +// http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html +// http://www.freebsd.org/cgi/man.cgi?query=extattr_get_file&sektion=2 +// http://nixdoc.net/man-pages/openbsd/man2/extattr_get_file.2.html + +// ErrNoXattr is a platform-independent error value meaning the +// extended attribute was not found. It can be used to respond to +// GetxattrRequest and such. +const ErrNoXattr = errNoXattr + +var _ error = ErrNoXattr +var _ Errno = ErrNoXattr +var _ ErrorNumber = ErrNoXattr diff --git a/vendor/bazil.org/fuse/fs/fstestutil/checkdir.go b/vendor/bazil.org/fuse/fs/fstestutil/checkdir.go new file mode 100644 index 000000000..74e5899e9 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/checkdir.go @@ -0,0 +1,70 @@ +package fstestutil + +import ( + "fmt" + "io/ioutil" + "os" +) + +// FileInfoCheck is a function that validates an os.FileInfo according +// to some criteria. +type FileInfoCheck func(fi os.FileInfo) error + +type checkDirError struct { + missing map[string]struct{} + extra map[string]os.FileMode +} + +func (e *checkDirError) Error() string { + return fmt.Sprintf("wrong directory contents: missing %v, extra %v", e.missing, e.extra) +} + +// CheckDir checks the contents of the directory at path, making sure +// every directory entry listed in want is present. If the check is +// not nil, it must also pass. +// +// If want contains the impossible filename "", unexpected files are +// checked with that. If the key is not in want, unexpected files are +// an error. +// +// Missing entries, that are listed in want but not seen, are an +// error. +func CheckDir(path string, want map[string]FileInfoCheck) error { + problems := &checkDirError{ + missing: make(map[string]struct{}, len(want)), + extra: make(map[string]os.FileMode), + } + for k := range want { + if k == "" { + continue + } + problems.missing[k] = struct{}{} + } + + fis, err := ioutil.ReadDir(path) + if err != nil { + return fmt.Errorf("cannot read directory: %v", err) + } + + for _, fi := range fis { + check, ok := want[fi.Name()] + if !ok { + check, ok = want[""] + } + if !ok { + problems.extra[fi.Name()] = fi.Mode() + continue + } + delete(problems.missing, fi.Name()) + if check != nil { + if err := check(fi); err != nil { + return fmt.Errorf("check failed: %v: %v", fi.Name(), err) + } + } + } + + if len(problems.missing) > 0 || len(problems.extra) > 0 { + return problems + } + return nil +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/debug.go b/vendor/bazil.org/fuse/fs/fstestutil/debug.go new file mode 100644 index 000000000..df44a0c65 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/debug.go @@ -0,0 +1,65 @@ +package fstestutil + +import ( + "flag" + "log" + "strconv" + + "bazil.org/fuse" +) + +type flagDebug bool + +var debug flagDebug + +var _ = flag.Value(&debug) + +func (f *flagDebug) IsBoolFlag() bool { + return true +} + +func nop(msg interface{}) {} + +func (f *flagDebug) Set(s string) error { + v, err := strconv.ParseBool(s) + if err != nil { + return err + } + *f = flagDebug(v) + if v { + fuse.Debug = logMsg + } else { + fuse.Debug = nop + } + return nil +} + +func (f *flagDebug) String() string { + return strconv.FormatBool(bool(*f)) +} + +func logMsg(msg interface{}) { + log.Printf("FUSE: %s\n", msg) +} + +func init() { + flag.Var(&debug, "fuse.debug", "log FUSE processing details") +} + +// DebugByDefault changes the default of the `-fuse.debug` flag to +// true. +// +// This package registers a command line flag `-fuse.debug` and when +// run with that flag (and activated inside the tests), logs FUSE +// debug messages. +// +// This is disabled by default, as most callers probably won't care +// about FUSE details. Use DebugByDefault for tests where you'd +// normally be passing `-fuse.debug` all the time anyway. +// +// Call from an init function. +func DebugByDefault() { + f := flag.Lookup("fuse.debug") + f.DefValue = "true" + f.Value.Set(f.DefValue) +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/doc.go b/vendor/bazil.org/fuse/fs/fstestutil/doc.go new file mode 100644 index 000000000..3f729dddc --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/doc.go @@ -0,0 +1 @@ +package fstestutil // import "bazil.org/fuse/fs/fstestutil" diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mounted.go b/vendor/bazil.org/fuse/fs/fstestutil/mounted.go new file mode 100644 index 000000000..2fae1588a --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mounted.go @@ -0,0 +1,141 @@ +package fstestutil + +import ( + "errors" + "io/ioutil" + "log" + "os" + "testing" + "time" + + "bazil.org/fuse" + "bazil.org/fuse/fs" +) + +// Mount contains information about the mount for the test to use. +type Mount struct { + // Dir is the temporary directory where the filesystem is mounted. + Dir string + + Conn *fuse.Conn + Server *fs.Server + + // Error will receive the return value of Serve. + Error <-chan error + + done <-chan struct{} + closed bool +} + +// Close unmounts the filesystem and waits for fs.Serve to return. Any +// returned error will be stored in Err. It is safe to call Close +// multiple times. +func (mnt *Mount) Close() { + if mnt.closed { + return + } + mnt.closed = true + for tries := 0; tries < 1000; tries++ { + err := fuse.Unmount(mnt.Dir) + if err != nil { + // TODO do more than log? + log.Printf("unmount error: %v", err) + time.Sleep(10 * time.Millisecond) + continue + } + break + } + <-mnt.done + mnt.Conn.Close() + os.Remove(mnt.Dir) +} + +// MountedFunc mounts a filesystem at a temporary directory. The +// filesystem used is constructed by calling a function, to allow +// storing fuse.Conn and fs.Server in the FS. +// +// It also waits until the filesystem is known to be visible (OS X +// workaround). +// +// After successful return, caller must clean up by calling Close. +func MountedFunc(fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { + dir, err := ioutil.TempDir("", "fusetest") + if err != nil { + return nil, err + } + c, err := fuse.Mount(dir, options...) + if err != nil { + return nil, err + } + server := fs.New(c, conf) + done := make(chan struct{}) + serveErr := make(chan error, 1) + mnt := &Mount{ + Dir: dir, + Conn: c, + Server: server, + Error: serveErr, + done: done, + } + filesys := fn(mnt) + go func() { + defer close(done) + serveErr <- server.Serve(filesys) + }() + + select { + case <-mnt.Conn.Ready: + if err := mnt.Conn.MountError; err != nil { + return nil, err + } + return mnt, nil + case err = <-mnt.Error: + // Serve quit early + if err != nil { + return nil, err + } + return nil, errors.New("Serve exited early") + } +} + +// Mounted mounts the fuse.Server at a temporary directory. +// +// It also waits until the filesystem is known to be visible (OS X +// workaround). +// +// After successful return, caller must clean up by calling Close. +func Mounted(filesys fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { + fn := func(*Mount) fs.FS { return filesys } + return MountedFunc(fn, conf, options...) +} + +// MountedFuncT mounts a filesystem at a temporary directory, +// directing it's debug log to the testing logger. +// +// See MountedFunc for usage. +// +// The debug log is not enabled by default. Use `-fuse.debug` or call +// DebugByDefault to enable. +func MountedFuncT(t testing.TB, fn func(*Mount) fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { + if conf == nil { + conf = &fs.Config{} + } + if debug && conf.Debug == nil { + conf.Debug = func(msg interface{}) { + t.Logf("FUSE: %s", msg) + } + } + return MountedFunc(fn, conf, options...) +} + +// MountedT mounts the filesystem at a temporary directory, +// directing it's debug log to the testing logger. +// +// See Mounted for usage. +// +// The debug log is not enabled by default. Use `-fuse.debug` or call +// DebugByDefault to enable. +func MountedT(t testing.TB, filesys fs.FS, conf *fs.Config, options ...fuse.MountOption) (*Mount, error) { + fn := func(*Mount) fs.FS { return filesys } + return MountedFuncT(t, fn, conf, options...) +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go new file mode 100644 index 000000000..654417bc4 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo.go @@ -0,0 +1,26 @@ +package fstestutil + +// MountInfo describes a mounted file system. +type MountInfo struct { + FSName string + Type string +} + +// GetMountInfo finds information about the mount at mnt. It is +// intended for use by tests only, and only fetches information +// relevant to the current tests. +func GetMountInfo(mnt string) (*MountInfo, error) { + return getMountInfo(mnt) +} + +// cstr converts a nil-terminated C string into a Go string +func cstr(ca []int8) string { + s := make([]byte, 0, len(ca)) + for _, c := range ca { + if c == 0x00 { + break + } + s = append(s, byte(c)) + } + return string(s) +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go new file mode 100644 index 000000000..f987bd8e7 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_darwin.go @@ -0,0 +1,29 @@ +package fstestutil + +import ( + "regexp" + "syscall" +) + +var re = regexp.MustCompile(`\\(.)`) + +// unescape removes backslash-escaping. The escaped characters are not +// mapped in any way; that is, unescape(`\n` ) == `n`. +func unescape(s string) string { + return re.ReplaceAllString(s, `$1`) +} + +func getMountInfo(mnt string) (*MountInfo, error) { + var st syscall.Statfs_t + err := syscall.Statfs(mnt, &st) + if err != nil { + return nil, err + } + i := &MountInfo{ + // osx getmntent(3) fails to un-escape the data, so we do it.. + // this might lead to double-unescaping in the future. fun. + // TestMountOptionFSNameEvilBackslashDouble checks for that. + FSName: unescape(cstr(st.Mntfromname[:])), + } + return i, nil +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go new file mode 100644 index 000000000..f70e9975e --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_freebsd.go @@ -0,0 +1,7 @@ +package fstestutil + +import "errors" + +func getMountInfo(mnt string) (*MountInfo, error) { + return nil, errors.New("FreeBSD has no useful mount information") +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go new file mode 100644 index 000000000..c502cf59b --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/mountinfo_linux.go @@ -0,0 +1,51 @@ +package fstestutil + +import ( + "errors" + "io/ioutil" + "strings" +) + +// Linux /proc/mounts shows current mounts. +// Same format as /etc/fstab. Quoting getmntent(3): +// +// Since fields in the mtab and fstab files are separated by whitespace, +// octal escapes are used to represent the four characters space (\040), +// tab (\011), newline (\012) and backslash (\134) in those files when +// they occur in one of the four strings in a mntent structure. +// +// http://linux.die.net/man/3/getmntent + +var fstabUnescape = strings.NewReplacer( + `\040`, "\040", + `\011`, "\011", + `\012`, "\012", + `\134`, "\134", +) + +var errNotFound = errors.New("mount not found") + +func getMountInfo(mnt string) (*MountInfo, error) { + data, err := ioutil.ReadFile("/proc/mounts") + if err != nil { + return nil, err + } + for _, line := range strings.Split(string(data), "\n") { + fields := strings.Fields(line) + if len(fields) < 3 { + continue + } + // Fields are: fsname dir type opts freq passno + fsname := fstabUnescape.Replace(fields[0]) + dir := fstabUnescape.Replace(fields[1]) + fstype := fstabUnescape.Replace(fields[2]) + if mnt == dir { + info := &MountInfo{ + FSName: fsname, + Type: fstype, + } + return info, nil + } + } + return nil, errNotFound +} diff --git a/vendor/bazil.org/fuse/fs/fstestutil/testfs.go b/vendor/bazil.org/fuse/fs/fstestutil/testfs.go new file mode 100644 index 000000000..c1988bf70 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/fstestutil/testfs.go @@ -0,0 +1,55 @@ +package fstestutil + +import ( + "os" + + "bazil.org/fuse" + "bazil.org/fuse/fs" + "golang.org/x/net/context" +) + +// SimpleFS is a trivial FS that just implements the Root method. +type SimpleFS struct { + Node fs.Node +} + +var _ = fs.FS(SimpleFS{}) + +func (f SimpleFS) Root() (fs.Node, error) { + return f.Node, nil +} + +// File can be embedded in a struct to make it look like a file. +type File struct{} + +func (f File) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = 0666 + return nil +} + +// Dir can be embedded in a struct to make it look like a directory. +type Dir struct{} + +func (f Dir) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = os.ModeDir | 0777 + return nil +} + +// ChildMap is a directory with child nodes looked up from a map. +type ChildMap map[string]fs.Node + +var _ = fs.Node(&ChildMap{}) +var _ = fs.NodeStringLookuper(&ChildMap{}) + +func (f *ChildMap) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = os.ModeDir | 0777 + return nil +} + +func (f *ChildMap) Lookup(ctx context.Context, name string) (fs.Node, error) { + child, ok := (*f)[name] + if !ok { + return nil, fuse.ENOENT + } + return child, nil +} diff --git a/vendor/bazil.org/fuse/fs/serve.go b/vendor/bazil.org/fuse/fs/serve.go new file mode 100644 index 000000000..e9fc56590 --- /dev/null +++ b/vendor/bazil.org/fuse/fs/serve.go @@ -0,0 +1,1568 @@ +// FUSE service loop, for servers that wish to use it. + +package fs // import "bazil.org/fuse/fs" + +import ( + "encoding/binary" + "fmt" + "hash/fnv" + "io" + "log" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +import ( + "bytes" + + "bazil.org/fuse" + "bazil.org/fuse/fuseutil" +) + +const ( + attrValidTime = 1 * time.Minute + entryValidTime = 1 * time.Minute +) + +// TODO: FINISH DOCS + +// An FS is the interface required of a file system. +// +// Other FUSE requests can be handled by implementing methods from the +// FS* interfaces, for example FSStatfser. +type FS interface { + // Root is called to obtain the Node for the file system root. + Root() (Node, error) +} + +type FSStatfser interface { + // Statfs is called to obtain file system metadata. + // It should write that data to resp. + Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error +} + +type FSDestroyer interface { + // Destroy is called when the file system is shutting down. + // + // Linux only sends this request for block device backed (fuseblk) + // filesystems, to allow them to flush writes to disk before the + // unmount completes. + Destroy() +} + +type FSInodeGenerator interface { + // GenerateInode is called to pick a dynamic inode number when it + // would otherwise be 0. + // + // Not all filesystems bother tracking inodes, but FUSE requires + // the inode to be set, and fewer duplicates in general makes UNIX + // tools work better. + // + // Operations where the nodes may return 0 inodes include Getattr, + // Setattr and ReadDir. + // + // If FS does not implement FSInodeGenerator, GenerateDynamicInode + // is used. + // + // Implementing this is useful to e.g. constrain the range of + // inode values used for dynamic inodes. + GenerateInode(parentInode uint64, name string) uint64 +} + +// A Node is the interface required of a file or directory. +// See the documentation for type FS for general information +// pertaining to all methods. +// +// A Node must be usable as a map key, that is, it cannot be a +// function, map or slice. +// +// Other FUSE requests can be handled by implementing methods from the +// Node* interfaces, for example NodeOpener. +// +// Methods returning Node should take care to return the same Node +// when the result is logically the same instance. Without this, each +// Node will get a new NodeID, causing spurious cache invalidations, +// extra lookups and aliasing anomalies. This may not matter for a +// simple, read-only filesystem. +type Node interface { + // Attr fills attr with the standard metadata for the node. + // + // Fields with reasonable defaults are prepopulated. For example, + // all times are set to a fixed moment when the program started. + // + // If Inode is left as 0, a dynamic inode number is chosen. + // + // The result may be cached for the duration set in Valid. + Attr(ctx context.Context, attr *fuse.Attr) error +} + +type NodeGetattrer interface { + // Getattr obtains the standard metadata for the receiver. + // It should store that metadata in resp. + // + // If this method is not implemented, the attributes will be + // generated based on Attr(), with zero values filled in. + Getattr(ctx context.Context, req *fuse.GetattrRequest, resp *fuse.GetattrResponse) error +} + +type NodeSetattrer interface { + // Setattr sets the standard metadata for the receiver. + // + // Note, this is also used to communicate changes in the size of + // the file, outside of Writes. + // + // req.Valid is a bitmask of what fields are actually being set. + // For example, the method should not change the mode of the file + // unless req.Valid.Mode() is true. + Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error +} + +type NodeSymlinker interface { + // Symlink creates a new symbolic link in the receiver, which must be a directory. + // + // TODO is the above true about directories? + Symlink(ctx context.Context, req *fuse.SymlinkRequest) (Node, error) +} + +// This optional request will be called only for symbolic link nodes. +type NodeReadlinker interface { + // Readlink reads a symbolic link. + Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) +} + +type NodeLinker interface { + // Link creates a new directory entry in the receiver based on an + // existing Node. Receiver must be a directory. + Link(ctx context.Context, req *fuse.LinkRequest, old Node) (Node, error) +} + +type NodeRemover interface { + // Remove removes the entry with the given name from + // the receiver, which must be a directory. The entry to be removed + // may correspond to a file (unlink) or to a directory (rmdir). + Remove(ctx context.Context, req *fuse.RemoveRequest) error +} + +type NodeAccesser interface { + // Access checks whether the calling context has permission for + // the given operations on the receiver. If so, Access should + // return nil. If not, Access should return EPERM. + // + // Note that this call affects the result of the access(2) system + // call but not the open(2) system call. If Access is not + // implemented, the Node behaves as if it always returns nil + // (permission granted), relying on checks in Open instead. + Access(ctx context.Context, req *fuse.AccessRequest) error +} + +type NodeStringLookuper interface { + // Lookup looks up a specific entry in the receiver, + // which must be a directory. Lookup should return a Node + // corresponding to the entry. If the name does not exist in + // the directory, Lookup should return ENOENT. + // + // Lookup need not to handle the names "." and "..". + Lookup(ctx context.Context, name string) (Node, error) +} + +type NodeRequestLookuper interface { + // Lookup looks up a specific entry in the receiver. + // See NodeStringLookuper for more. + Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (Node, error) +} + +type NodeMkdirer interface { + Mkdir(ctx context.Context, req *fuse.MkdirRequest) (Node, error) +} + +type NodeOpener interface { + // Open opens the receiver. After a successful open, a client + // process has a file descriptor referring to this Handle. + // + // Open can also be also called on non-files. For example, + // directories are Opened for ReadDir or fchdir(2). + // + // If this method is not implemented, the open will always + // succeed, and the Node itself will be used as the Handle. + // + // XXX note about access. XXX OpenFlags. + Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (Handle, error) +} + +type NodeCreater interface { + // Create creates a new directory entry in the receiver, which + // must be a directory. + Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (Node, Handle, error) +} + +type NodeForgetter interface { + // Forget about this node. This node will not receive further + // method calls. + // + // Forget is not necessarily seen on unmount, as all nodes are + // implicitly forgotten as part part of the unmount. + Forget() +} + +type NodeRenamer interface { + Rename(ctx context.Context, req *fuse.RenameRequest, newDir Node) error +} + +type NodeMknoder interface { + Mknod(ctx context.Context, req *fuse.MknodRequest) (Node, error) +} + +// TODO this should be on Handle not Node +type NodeFsyncer interface { + Fsync(ctx context.Context, req *fuse.FsyncRequest) error +} + +type NodeGetxattrer interface { + // Getxattr gets an extended attribute by the given name from the + // node. + // + // If there is no xattr by that name, returns fuse.ErrNoXattr. + Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error +} + +type NodeListxattrer interface { + // Listxattr lists the extended attributes recorded for the node. + Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error +} + +type NodeSetxattrer interface { + // Setxattr sets an extended attribute with the given name and + // value for the node. + Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error +} + +type NodeRemovexattrer interface { + // Removexattr removes an extended attribute for the name. + // + // If there is no xattr by that name, returns fuse.ErrNoXattr. + Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error +} + +var startTime = time.Now() + +func nodeAttr(ctx context.Context, n Node, attr *fuse.Attr) error { + attr.Valid = attrValidTime + attr.Nlink = 1 + attr.Atime = startTime + attr.Mtime = startTime + attr.Ctime = startTime + attr.Crtime = startTime + if err := n.Attr(ctx, attr); err != nil { + return err + } + return nil +} + +// A Handle is the interface required of an opened file or directory. +// See the documentation for type FS for general information +// pertaining to all methods. +// +// Other FUSE requests can be handled by implementing methods from the +// Handle* interfaces. The most common to implement are HandleReader, +// HandleReadDirer, and HandleWriter. +// +// TODO implement methods: Getlk, Setlk, Setlkw +type Handle interface { +} + +type HandleFlusher interface { + // Flush is called each time the file or directory is closed. + // Because there can be multiple file descriptors referring to a + // single opened file, Flush can be called multiple times. + Flush(ctx context.Context, req *fuse.FlushRequest) error +} + +type HandleReadAller interface { + ReadAll(ctx context.Context) ([]byte, error) +} + +type HandleReadDirAller interface { + ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) +} + +type HandleReader interface { + // Read requests to read data from the handle. + // + // There is a page cache in the kernel that normally submits only + // page-aligned reads spanning one or more pages. However, you + // should not rely on this. To see individual requests as + // submitted by the file system clients, set OpenDirectIO. + // + // Note that reads beyond the size of the file as reported by Attr + // are not even attempted (except in OpenDirectIO mode). + Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error +} + +type HandleWriter interface { + // Write requests to write data into the handle at the given offset. + // Store the amount of data written in resp.Size. + // + // There is a writeback page cache in the kernel that normally submits + // only page-aligned writes spanning one or more pages. However, + // you should not rely on this. To see individual requests as + // submitted by the file system clients, set OpenDirectIO. + // + // Writes that grow the file are expected to update the file size + // (as seen through Attr). Note that file size changes are + // communicated also through Setattr. + Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error +} + +type HandleReleaser interface { + Release(ctx context.Context, req *fuse.ReleaseRequest) error +} + +type Config struct { + // Function to send debug log messages to. If nil, use fuse.Debug. + // Note that changing this or fuse.Debug may not affect existing + // calls to Serve. + // + // See fuse.Debug for the rules that log functions must follow. + Debug func(msg interface{}) + + // Function to put things into context for processing the request. + // The returned context must have ctx as its parent. + // + // Note that changing this may not affect existing calls to Serve. + // + // Must not retain req. + WithContext func(ctx context.Context, req fuse.Request) context.Context +} + +// New returns a new FUSE server ready to serve this kernel FUSE +// connection. +// +// Config may be nil. +func New(conn *fuse.Conn, config *Config) *Server { + s := &Server{ + conn: conn, + req: map[fuse.RequestID]*serveRequest{}, + nodeRef: map[Node]fuse.NodeID{}, + dynamicInode: GenerateDynamicInode, + } + if config != nil { + s.debug = config.Debug + s.context = config.WithContext + } + if s.debug == nil { + s.debug = fuse.Debug + } + return s +} + +type Server struct { + // set in New + conn *fuse.Conn + debug func(msg interface{}) + context func(ctx context.Context, req fuse.Request) context.Context + + // set once at Serve time + fs FS + dynamicInode func(parent uint64, name string) uint64 + + // state, protected by meta + meta sync.Mutex + req map[fuse.RequestID]*serveRequest + node []*serveNode + nodeRef map[Node]fuse.NodeID + handle []*serveHandle + freeNode []fuse.NodeID + freeHandle []fuse.HandleID + nodeGen uint64 + + // Used to ensure worker goroutines finish before Serve returns + wg sync.WaitGroup +} + +// Serve serves the FUSE connection by making calls to the methods +// of fs and the Nodes and Handles it makes available. It returns only +// when the connection has been closed or an unexpected error occurs. +func (s *Server) Serve(fs FS) error { + defer s.wg.Wait() // Wait for worker goroutines to complete before return + + s.fs = fs + if dyn, ok := fs.(FSInodeGenerator); ok { + s.dynamicInode = dyn.GenerateInode + } + + root, err := fs.Root() + if err != nil { + return fmt.Errorf("cannot obtain root node: %v", err) + } + // Recognize the root node if it's ever returned from Lookup, + // passed to Invalidate, etc. + s.nodeRef[root] = 1 + s.node = append(s.node, nil, &serveNode{ + inode: 1, + generation: s.nodeGen, + node: root, + refs: 1, + }) + s.handle = append(s.handle, nil) + + for { + req, err := s.conn.ReadRequest() + if err != nil { + if err == io.EOF { + break + } + return err + } + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.serve(req) + }() + } + return nil +} + +// Serve serves a FUSE connection with the default settings. See +// Server.Serve. +func Serve(c *fuse.Conn, fs FS) error { + server := New(c, nil) + return server.Serve(fs) +} + +type nothing struct{} + +type serveRequest struct { + Request fuse.Request + cancel func() +} + +type serveNode struct { + inode uint64 + generation uint64 + node Node + refs uint64 + + // Delay freeing the NodeID until waitgroup is done. This allows + // using the NodeID for short periods of time without holding the + // Server.meta lock. + // + // Rules: + // + // - hold Server.meta while calling wg.Add, then unlock + // - do NOT try to reacquire Server.meta + wg sync.WaitGroup +} + +func (sn *serveNode) attr(ctx context.Context, attr *fuse.Attr) error { + err := nodeAttr(ctx, sn.node, attr) + if attr.Inode == 0 { + attr.Inode = sn.inode + } + return err +} + +type serveHandle struct { + handle Handle + readData []byte + nodeID fuse.NodeID +} + +// NodeRef is deprecated. It remains here to decrease code churn on +// FUSE library users. You may remove it from your program now; +// returning the same Node values are now recognized automatically, +// without needing NodeRef. +type NodeRef struct{} + +func (c *Server) saveNode(inode uint64, node Node) (id fuse.NodeID, gen uint64) { + c.meta.Lock() + defer c.meta.Unlock() + + if id, ok := c.nodeRef[node]; ok { + sn := c.node[id] + sn.refs++ + return id, sn.generation + } + + sn := &serveNode{inode: inode, node: node, refs: 1} + if n := len(c.freeNode); n > 0 { + id = c.freeNode[n-1] + c.freeNode = c.freeNode[:n-1] + c.node[id] = sn + c.nodeGen++ + } else { + id = fuse.NodeID(len(c.node)) + c.node = append(c.node, sn) + } + sn.generation = c.nodeGen + c.nodeRef[node] = id + return id, sn.generation +} + +func (c *Server) saveHandle(handle Handle, nodeID fuse.NodeID) (id fuse.HandleID) { + c.meta.Lock() + shandle := &serveHandle{handle: handle, nodeID: nodeID} + if n := len(c.freeHandle); n > 0 { + id = c.freeHandle[n-1] + c.freeHandle = c.freeHandle[:n-1] + c.handle[id] = shandle + } else { + id = fuse.HandleID(len(c.handle)) + c.handle = append(c.handle, shandle) + } + c.meta.Unlock() + return +} + +type nodeRefcountDropBug struct { + N uint64 + Refs uint64 + Node fuse.NodeID +} + +func (n *nodeRefcountDropBug) String() string { + return fmt.Sprintf("bug: trying to drop %d of %d references to %v", n.N, n.Refs, n.Node) +} + +func (c *Server) dropNode(id fuse.NodeID, n uint64) (forget bool) { + c.meta.Lock() + defer c.meta.Unlock() + snode := c.node[id] + + if snode == nil { + // this should only happen if refcounts kernel<->us disagree + // *and* two ForgetRequests for the same node race each other; + // this indicates a bug somewhere + c.debug(nodeRefcountDropBug{N: n, Node: id}) + + // we may end up triggering Forget twice, but that's better + // than not even once, and that's the best we can do + return true + } + + if n > snode.refs { + c.debug(nodeRefcountDropBug{N: n, Refs: snode.refs, Node: id}) + n = snode.refs + } + + snode.refs -= n + if snode.refs == 0 { + snode.wg.Wait() + c.node[id] = nil + delete(c.nodeRef, snode.node) + c.freeNode = append(c.freeNode, id) + return true + } + return false +} + +func (c *Server) dropHandle(id fuse.HandleID) { + c.meta.Lock() + c.handle[id] = nil + c.freeHandle = append(c.freeHandle, id) + c.meta.Unlock() +} + +type missingHandle struct { + Handle fuse.HandleID + MaxHandle fuse.HandleID +} + +func (m missingHandle) String() string { + return fmt.Sprint("missing handle: ", m.Handle, m.MaxHandle) +} + +// Returns nil for invalid handles. +func (c *Server) getHandle(id fuse.HandleID) (shandle *serveHandle) { + c.meta.Lock() + defer c.meta.Unlock() + if id < fuse.HandleID(len(c.handle)) { + shandle = c.handle[uint(id)] + } + if shandle == nil { + c.debug(missingHandle{ + Handle: id, + MaxHandle: fuse.HandleID(len(c.handle)), + }) + } + return +} + +type request struct { + Op string + Request *fuse.Header + In interface{} `json:",omitempty"` +} + +func (r request) String() string { + return fmt.Sprintf("<- %s", r.In) +} + +type logResponseHeader struct { + ID fuse.RequestID +} + +func (m logResponseHeader) String() string { + return fmt.Sprintf("ID=%v", m.ID) +} + +type response struct { + Op string + Request logResponseHeader + Out interface{} `json:",omitempty"` + // Errno contains the errno value as a string, for example "EPERM". + Errno string `json:",omitempty"` + // Error may contain a free form error message. + Error string `json:",omitempty"` +} + +func (r response) errstr() string { + s := r.Errno + if r.Error != "" { + // prefix the errno constant to the long form message + s = s + ": " + r.Error + } + return s +} + +func (r response) String() string { + switch { + case r.Errno != "" && r.Out != nil: + return fmt.Sprintf("-> [%v] %v error=%s", r.Request, r.Out, r.errstr()) + case r.Errno != "": + return fmt.Sprintf("-> [%v] %s error=%s", r.Request, r.Op, r.errstr()) + case r.Out != nil: + // make sure (seemingly) empty values are readable + switch r.Out.(type) { + case string: + return fmt.Sprintf("-> [%v] %s %q", r.Request, r.Op, r.Out) + case []byte: + return fmt.Sprintf("-> [%v] %s [% x]", r.Request, r.Op, r.Out) + default: + return fmt.Sprintf("-> [%v] %v", r.Request, r.Out) + } + default: + return fmt.Sprintf("-> [%v] %s", r.Request, r.Op) + } +} + +type notification struct { + Op string + Node fuse.NodeID + Out interface{} `json:",omitempty"` + Err string `json:",omitempty"` +} + +func (n notification) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "=> %s %v", n.Op, n.Node) + if n.Out != nil { + // make sure (seemingly) empty values are readable + switch n.Out.(type) { + case string: + fmt.Fprintf(&buf, " %q", n.Out) + case []byte: + fmt.Fprintf(&buf, " [% x]", n.Out) + default: + fmt.Fprintf(&buf, " %s", n.Out) + } + } + if n.Err != "" { + fmt.Fprintf(&buf, " Err:%v", n.Err) + } + return buf.String() +} + +type logMissingNode struct { + MaxNode fuse.NodeID +} + +func opName(req fuse.Request) string { + t := reflect.Indirect(reflect.ValueOf(req)).Type() + s := t.Name() + s = strings.TrimSuffix(s, "Request") + return s +} + +type logLinkRequestOldNodeNotFound struct { + Request *fuse.Header + In *fuse.LinkRequest +} + +func (m *logLinkRequestOldNodeNotFound) String() string { + return fmt.Sprintf("In LinkRequest (request %v), node %d not found", m.Request.Hdr().ID, m.In.OldNode) +} + +type renameNewDirNodeNotFound struct { + Request *fuse.Header + In *fuse.RenameRequest +} + +func (m *renameNewDirNodeNotFound) String() string { + return fmt.Sprintf("In RenameRequest (request %v), node %d not found", m.Request.Hdr().ID, m.In.NewDir) +} + +type handlerPanickedError struct { + Request interface{} + Err interface{} +} + +var _ error = handlerPanickedError{} + +func (h handlerPanickedError) Error() string { + return fmt.Sprintf("handler panicked: %v", h.Err) +} + +var _ fuse.ErrorNumber = handlerPanickedError{} + +func (h handlerPanickedError) Errno() fuse.Errno { + if err, ok := h.Err.(fuse.ErrorNumber); ok { + return err.Errno() + } + return fuse.DefaultErrno +} + +// handlerTerminatedError happens when a handler terminates itself +// with runtime.Goexit. This is most commonly because of incorrect use +// of testing.TB.FailNow, typically via t.Fatal. +type handlerTerminatedError struct { + Request interface{} +} + +var _ error = handlerTerminatedError{} + +func (h handlerTerminatedError) Error() string { + return fmt.Sprintf("handler terminated (called runtime.Goexit)") +} + +var _ fuse.ErrorNumber = handlerTerminatedError{} + +func (h handlerTerminatedError) Errno() fuse.Errno { + return fuse.DefaultErrno +} + +type handleNotReaderError struct { + handle Handle +} + +var _ error = handleNotReaderError{} + +func (e handleNotReaderError) Error() string { + return fmt.Sprintf("handle has no Read: %T", e.handle) +} + +var _ fuse.ErrorNumber = handleNotReaderError{} + +func (e handleNotReaderError) Errno() fuse.Errno { + return fuse.ENOTSUP +} + +func initLookupResponse(s *fuse.LookupResponse) { + s.EntryValid = entryValidTime +} + +func (c *Server) serve(r fuse.Request) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + parentCtx := ctx + if c.context != nil { + ctx = c.context(ctx, r) + } + + req := &serveRequest{Request: r, cancel: cancel} + + c.debug(request{ + Op: opName(r), + Request: r.Hdr(), + In: r, + }) + var node Node + var snode *serveNode + c.meta.Lock() + hdr := r.Hdr() + if id := hdr.Node; id != 0 { + if id < fuse.NodeID(len(c.node)) { + snode = c.node[uint(id)] + } + if snode == nil { + c.meta.Unlock() + c.debug(response{ + Op: opName(r), + Request: logResponseHeader{ID: hdr.ID}, + Error: fuse.ESTALE.ErrnoName(), + // this is the only place that sets both Error and + // Out; not sure if i want to do that; might get rid + // of len(c.node) things altogether + Out: logMissingNode{ + MaxNode: fuse.NodeID(len(c.node)), + }, + }) + r.RespondError(fuse.ESTALE) + return + } + node = snode.node + } + if c.req[hdr.ID] != nil { + // This happens with OSXFUSE. Assume it's okay and + // that we'll never see an interrupt for this one. + // Otherwise everything wedges. TODO: Report to OSXFUSE? + // + // TODO this might have been because of missing done() calls + } else { + c.req[hdr.ID] = req + } + c.meta.Unlock() + + // Call this before responding. + // After responding is too late: we might get another request + // with the same ID and be very confused. + done := func(resp interface{}) { + msg := response{ + Op: opName(r), + Request: logResponseHeader{ID: hdr.ID}, + } + if err, ok := resp.(error); ok { + msg.Error = err.Error() + if ferr, ok := err.(fuse.ErrorNumber); ok { + errno := ferr.Errno() + msg.Errno = errno.ErrnoName() + if errno == err { + // it's just a fuse.Errno with no extra detail; + // skip the textual message for log readability + msg.Error = "" + } + } else { + msg.Errno = fuse.DefaultErrno.ErrnoName() + } + } else { + msg.Out = resp + } + c.debug(msg) + + c.meta.Lock() + delete(c.req, hdr.ID) + c.meta.Unlock() + } + + var responded bool + defer func() { + if rec := recover(); rec != nil { + const size = 1 << 16 + buf := make([]byte, size) + n := runtime.Stack(buf, false) + buf = buf[:n] + log.Printf("fuse: panic in handler for %v: %v\n%s", r, rec, buf) + err := handlerPanickedError{ + Request: r, + Err: rec, + } + done(err) + r.RespondError(err) + return + } + + if !responded { + err := handlerTerminatedError{ + Request: r, + } + done(err) + r.RespondError(err) + } + }() + + if err := c.handleRequest(ctx, node, snode, r, done); err != nil { + if err == context.Canceled { + select { + case <-parentCtx.Done(): + // We canceled the parent context because of an + // incoming interrupt request, so return EINTR + // to trigger the right behavior in the client app. + // + // Only do this when it's the parent context that was + // canceled, not a context controlled by the program + // using this library, so we don't return EINTR too + // eagerly -- it might cause busy loops. + // + // Decent write-up on role of EINTR: + // http://250bpm.com/blog:12 + err = fuse.EINTR + default: + // nothing + } + } + done(err) + r.RespondError(err) + } + + // disarm runtime.Goexit protection + responded = true +} + +// handleRequest will either a) call done(s) and r.Respond(s) OR b) return an error. +func (c *Server) handleRequest(ctx context.Context, node Node, snode *serveNode, r fuse.Request, done func(resp interface{})) error { + switch r := r.(type) { + default: + // Note: To FUSE, ENOSYS means "this server never implements this request." + // It would be inappropriate to return ENOSYS for other operations in this + // switch that might only be unavailable in some contexts, not all. + return fuse.ENOSYS + + case *fuse.StatfsRequest: + s := &fuse.StatfsResponse{} + if fs, ok := c.fs.(FSStatfser); ok { + if err := fs.Statfs(ctx, r, s); err != nil { + return err + } + } + done(s) + r.Respond(s) + return nil + + // Node operations. + case *fuse.GetattrRequest: + s := &fuse.GetattrResponse{} + if n, ok := node.(NodeGetattrer); ok { + if err := n.Getattr(ctx, r, s); err != nil { + return err + } + } else { + if err := snode.attr(ctx, &s.Attr); err != nil { + return err + } + } + done(s) + r.Respond(s) + return nil + + case *fuse.SetattrRequest: + s := &fuse.SetattrResponse{} + if n, ok := node.(NodeSetattrer); ok { + if err := n.Setattr(ctx, r, s); err != nil { + return err + } + } + + if err := snode.attr(ctx, &s.Attr); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.SymlinkRequest: + s := &fuse.SymlinkResponse{} + initLookupResponse(&s.LookupResponse) + n, ok := node.(NodeSymlinker) + if !ok { + return fuse.EIO // XXX or EPERM like Mkdir? + } + n2, err := n.Symlink(ctx, r) + if err != nil { + return err + } + if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.NewName, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.ReadlinkRequest: + n, ok := node.(NodeReadlinker) + if !ok { + return fuse.EIO /// XXX or EPERM? + } + target, err := n.Readlink(ctx, r) + if err != nil { + return err + } + done(target) + r.Respond(target) + return nil + + case *fuse.LinkRequest: + n, ok := node.(NodeLinker) + if !ok { + return fuse.EIO /// XXX or EPERM? + } + c.meta.Lock() + var oldNode *serveNode + if int(r.OldNode) < len(c.node) { + oldNode = c.node[r.OldNode] + } + c.meta.Unlock() + if oldNode == nil { + c.debug(logLinkRequestOldNodeNotFound{ + Request: r.Hdr(), + In: r, + }) + return fuse.EIO + } + n2, err := n.Link(ctx, r, oldNode.node) + if err != nil { + return err + } + s := &fuse.LookupResponse{} + initLookupResponse(s) + if err := c.saveLookup(ctx, s, snode, r.NewName, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.RemoveRequest: + n, ok := node.(NodeRemover) + if !ok { + return fuse.EIO /// XXX or EPERM? + } + err := n.Remove(ctx, r) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.AccessRequest: + if n, ok := node.(NodeAccesser); ok { + if err := n.Access(ctx, r); err != nil { + return err + } + } + done(nil) + r.Respond() + return nil + + case *fuse.LookupRequest: + var n2 Node + var err error + s := &fuse.LookupResponse{} + initLookupResponse(s) + if n, ok := node.(NodeStringLookuper); ok { + n2, err = n.Lookup(ctx, r.Name) + } else if n, ok := node.(NodeRequestLookuper); ok { + n2, err = n.Lookup(ctx, r, s) + } else { + return fuse.ENOENT + } + if err != nil { + return err + } + if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.MkdirRequest: + s := &fuse.MkdirResponse{} + initLookupResponse(&s.LookupResponse) + n, ok := node.(NodeMkdirer) + if !ok { + return fuse.EPERM + } + n2, err := n.Mkdir(ctx, r) + if err != nil { + return err + } + if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.OpenRequest: + s := &fuse.OpenResponse{} + var h2 Handle + if n, ok := node.(NodeOpener); ok { + hh, err := n.Open(ctx, r, s) + if err != nil { + return err + } + h2 = hh + } else { + h2 = node + } + s.Handle = c.saveHandle(h2, r.Hdr().Node) + done(s) + r.Respond(s) + return nil + + case *fuse.CreateRequest: + n, ok := node.(NodeCreater) + if !ok { + // If we send back ENOSYS, FUSE will try mknod+open. + return fuse.EPERM + } + s := &fuse.CreateResponse{OpenResponse: fuse.OpenResponse{}} + initLookupResponse(&s.LookupResponse) + n2, h2, err := n.Create(ctx, r, s) + if err != nil { + return err + } + if err := c.saveLookup(ctx, &s.LookupResponse, snode, r.Name, n2); err != nil { + return err + } + s.Handle = c.saveHandle(h2, r.Hdr().Node) + done(s) + r.Respond(s) + return nil + + case *fuse.GetxattrRequest: + n, ok := node.(NodeGetxattrer) + if !ok { + return fuse.ENOTSUP + } + s := &fuse.GetxattrResponse{} + err := n.Getxattr(ctx, r, s) + if err != nil { + return err + } + if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { + return fuse.ERANGE + } + done(s) + r.Respond(s) + return nil + + case *fuse.ListxattrRequest: + n, ok := node.(NodeListxattrer) + if !ok { + return fuse.ENOTSUP + } + s := &fuse.ListxattrResponse{} + err := n.Listxattr(ctx, r, s) + if err != nil { + return err + } + if r.Size != 0 && uint64(len(s.Xattr)) > uint64(r.Size) { + return fuse.ERANGE + } + done(s) + r.Respond(s) + return nil + + case *fuse.SetxattrRequest: + n, ok := node.(NodeSetxattrer) + if !ok { + return fuse.ENOTSUP + } + err := n.Setxattr(ctx, r) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.RemovexattrRequest: + n, ok := node.(NodeRemovexattrer) + if !ok { + return fuse.ENOTSUP + } + err := n.Removexattr(ctx, r) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.ForgetRequest: + forget := c.dropNode(r.Hdr().Node, r.N) + if forget { + n, ok := node.(NodeForgetter) + if ok { + n.Forget() + } + } + done(nil) + r.Respond() + return nil + + // Handle operations. + case *fuse.ReadRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + return fuse.ESTALE + } + handle := shandle.handle + + s := &fuse.ReadResponse{Data: make([]byte, 0, r.Size)} + if r.Dir { + if h, ok := handle.(HandleReadDirAller); ok { + // detect rewinddir(3) or similar seek and refresh + // contents + if r.Offset == 0 { + shandle.readData = nil + } + + if shandle.readData == nil { + dirs, err := h.ReadDirAll(ctx) + if err != nil { + return err + } + var data []byte + for _, dir := range dirs { + if dir.Inode == 0 { + dir.Inode = c.dynamicInode(snode.inode, dir.Name) + } + data = fuse.AppendDirent(data, dir) + } + shandle.readData = data + } + fuseutil.HandleRead(r, s, shandle.readData) + done(s) + r.Respond(s) + return nil + } + } else { + if h, ok := handle.(HandleReadAller); ok { + if shandle.readData == nil { + data, err := h.ReadAll(ctx) + if err != nil { + return err + } + if data == nil { + data = []byte{} + } + shandle.readData = data + } + fuseutil.HandleRead(r, s, shandle.readData) + done(s) + r.Respond(s) + return nil + } + h, ok := handle.(HandleReader) + if !ok { + err := handleNotReaderError{handle: handle} + return err + } + if err := h.Read(ctx, r, s); err != nil { + return err + } + } + done(s) + r.Respond(s) + return nil + + case *fuse.WriteRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + return fuse.ESTALE + } + + s := &fuse.WriteResponse{} + if h, ok := shandle.handle.(HandleWriter); ok { + if err := h.Write(ctx, r, s); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + } + return fuse.EIO + + case *fuse.FlushRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + return fuse.ESTALE + } + handle := shandle.handle + + if h, ok := handle.(HandleFlusher); ok { + if err := h.Flush(ctx, r); err != nil { + return err + } + } + done(nil) + r.Respond() + return nil + + case *fuse.ReleaseRequest: + shandle := c.getHandle(r.Handle) + if shandle == nil { + return fuse.ESTALE + } + handle := shandle.handle + + // No matter what, release the handle. + c.dropHandle(r.Handle) + + if h, ok := handle.(HandleReleaser); ok { + if err := h.Release(ctx, r); err != nil { + return err + } + } + done(nil) + r.Respond() + return nil + + case *fuse.DestroyRequest: + if fs, ok := c.fs.(FSDestroyer); ok { + fs.Destroy() + } + done(nil) + r.Respond() + return nil + + case *fuse.RenameRequest: + c.meta.Lock() + var newDirNode *serveNode + if int(r.NewDir) < len(c.node) { + newDirNode = c.node[r.NewDir] + } + c.meta.Unlock() + if newDirNode == nil { + c.debug(renameNewDirNodeNotFound{ + Request: r.Hdr(), + In: r, + }) + return fuse.EIO + } + n, ok := node.(NodeRenamer) + if !ok { + return fuse.EIO // XXX or EPERM like Mkdir? + } + err := n.Rename(ctx, r, newDirNode.node) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.MknodRequest: + n, ok := node.(NodeMknoder) + if !ok { + return fuse.EIO + } + n2, err := n.Mknod(ctx, r) + if err != nil { + return err + } + s := &fuse.LookupResponse{} + initLookupResponse(s) + if err := c.saveLookup(ctx, s, snode, r.Name, n2); err != nil { + return err + } + done(s) + r.Respond(s) + return nil + + case *fuse.FsyncRequest: + n, ok := node.(NodeFsyncer) + if !ok { + return fuse.EIO + } + err := n.Fsync(ctx, r) + if err != nil { + return err + } + done(nil) + r.Respond() + return nil + + case *fuse.InterruptRequest: + c.meta.Lock() + ireq := c.req[r.IntrID] + if ireq != nil && ireq.cancel != nil { + ireq.cancel() + ireq.cancel = nil + } + c.meta.Unlock() + done(nil) + r.Respond() + return nil + + /* case *FsyncdirRequest: + return ENOSYS + + case *GetlkRequest, *SetlkRequest, *SetlkwRequest: + return ENOSYS + + case *BmapRequest: + return ENOSYS + + case *SetvolnameRequest, *GetxtimesRequest, *ExchangeRequest: + return ENOSYS + */ + } + + panic("not reached") +} + +func (c *Server) saveLookup(ctx context.Context, s *fuse.LookupResponse, snode *serveNode, elem string, n2 Node) error { + if err := nodeAttr(ctx, n2, &s.Attr); err != nil { + return err + } + if s.Attr.Inode == 0 { + s.Attr.Inode = c.dynamicInode(snode.inode, elem) + } + + s.Node, s.Generation = c.saveNode(s.Attr.Inode, n2) + return nil +} + +type invalidateNodeDetail struct { + Off int64 + Size int64 +} + +func (i invalidateNodeDetail) String() string { + return fmt.Sprintf("Off:%d Size:%d", i.Off, i.Size) +} + +func errstr(err error) string { + if err == nil { + return "" + } + return err.Error() +} + +func (s *Server) invalidateNode(node Node, off int64, size int64) error { + s.meta.Lock() + id, ok := s.nodeRef[node] + if ok { + snode := s.node[id] + snode.wg.Add(1) + defer snode.wg.Done() + } + s.meta.Unlock() + if !ok { + // This is what the kernel would have said, if we had been + // able to send this message; it's not cached. + return fuse.ErrNotCached + } + // Delay logging until after we can record the error too. We + // consider a /dev/fuse write to be instantaneous enough to not + // need separate before and after messages. + err := s.conn.InvalidateNode(id, off, size) + s.debug(notification{ + Op: "InvalidateNode", + Node: id, + Out: invalidateNodeDetail{ + Off: off, + Size: size, + }, + Err: errstr(err), + }) + return err +} + +// InvalidateNodeAttr invalidates the kernel cache of the attributes +// of node. +// +// Returns fuse.ErrNotCached if the kernel is not currently caching +// the node. +func (s *Server) InvalidateNodeAttr(node Node) error { + return s.invalidateNode(node, 0, 0) +} + +// InvalidateNodeData invalidates the kernel cache of the attributes +// and data of node. +// +// Returns fuse.ErrNotCached if the kernel is not currently caching +// the node. +func (s *Server) InvalidateNodeData(node Node) error { + return s.invalidateNode(node, 0, -1) +} + +// InvalidateNodeDataRange invalidates the kernel cache of the +// attributes and a range of the data of node. +// +// Returns fuse.ErrNotCached if the kernel is not currently caching +// the node. +func (s *Server) InvalidateNodeDataRange(node Node, off int64, size int64) error { + return s.invalidateNode(node, off, size) +} + +type invalidateEntryDetail struct { + Name string +} + +func (i invalidateEntryDetail) String() string { + return fmt.Sprintf("%q", i.Name) +} + +// InvalidateEntry invalidates the kernel cache of the directory entry +// identified by parent node and entry basename. +// +// Kernel may or may not cache directory listings. To invalidate +// those, use InvalidateNode to invalidate all of the data for a +// directory. (As of 2015-06, Linux FUSE does not cache directory +// listings.) +// +// Returns ErrNotCached if the kernel is not currently caching the +// node. +func (s *Server) InvalidateEntry(parent Node, name string) error { + s.meta.Lock() + id, ok := s.nodeRef[parent] + if ok { + snode := s.node[id] + snode.wg.Add(1) + defer snode.wg.Done() + } + s.meta.Unlock() + if !ok { + // This is what the kernel would have said, if we had been + // able to send this message; it's not cached. + return fuse.ErrNotCached + } + err := s.conn.InvalidateEntry(id, name) + s.debug(notification{ + Op: "InvalidateEntry", + Node: id, + Out: invalidateEntryDetail{ + Name: name, + }, + Err: errstr(err), + }) + return err +} + +// DataHandle returns a read-only Handle that satisfies reads +// using the given data. +func DataHandle(data []byte) Handle { + return &dataHandle{data} +} + +type dataHandle struct { + data []byte +} + +func (d *dataHandle) ReadAll(ctx context.Context) ([]byte, error) { + return d.data, nil +} + +// GenerateDynamicInode returns a dynamic inode. +// +// The parent inode and current entry name are used as the criteria +// for choosing a pseudorandom inode. This makes it likely the same +// entry will get the same inode on multiple runs. +func GenerateDynamicInode(parent uint64, name string) uint64 { + h := fnv.New64a() + var buf [8]byte + binary.LittleEndian.PutUint64(buf[:], parent) + _, _ = h.Write(buf[:]) + _, _ = h.Write([]byte(name)) + var inode uint64 + for { + inode = h.Sum64() + if inode != 0 { + break + } + // there's a tiny probability that result is zero; change the + // input a little and try again + _, _ = h.Write([]byte{'x'}) + } + return inode +} diff --git a/vendor/bazil.org/fuse/fs/tree.go b/vendor/bazil.org/fuse/fs/tree.go new file mode 100644 index 000000000..7e078045a --- /dev/null +++ b/vendor/bazil.org/fuse/fs/tree.go @@ -0,0 +1,99 @@ +// FUSE directory tree, for servers that wish to use it with the service loop. + +package fs + +import ( + "os" + pathpkg "path" + "strings" + + "golang.org/x/net/context" +) + +import ( + "bazil.org/fuse" +) + +// A Tree implements a basic read-only directory tree for FUSE. +// The Nodes contained in it may still be writable. +type Tree struct { + tree +} + +func (t *Tree) Root() (Node, error) { + return &t.tree, nil +} + +// Add adds the path to the tree, resolving to the given node. +// If path or a prefix of path has already been added to the tree, +// Add panics. +// +// Add is only safe to call before starting to serve requests. +func (t *Tree) Add(path string, node Node) { + path = pathpkg.Clean("/" + path)[1:] + elems := strings.Split(path, "/") + dir := Node(&t.tree) + for i, elem := range elems { + dt, ok := dir.(*tree) + if !ok { + panic("fuse: Tree.Add for " + strings.Join(elems[:i], "/") + " and " + path) + } + n := dt.lookup(elem) + if n != nil { + if i+1 == len(elems) { + panic("fuse: Tree.Add for " + path + " conflicts with " + elem) + } + dir = n + } else { + if i+1 == len(elems) { + dt.add(elem, node) + } else { + dir = &tree{} + dt.add(elem, dir) + } + } + } +} + +type treeDir struct { + name string + node Node +} + +type tree struct { + dir []treeDir +} + +func (t *tree) lookup(name string) Node { + for _, d := range t.dir { + if d.name == name { + return d.node + } + } + return nil +} + +func (t *tree) add(name string, n Node) { + t.dir = append(t.dir, treeDir{name, n}) +} + +func (t *tree) Attr(ctx context.Context, a *fuse.Attr) error { + a.Mode = os.ModeDir | 0555 + return nil +} + +func (t *tree) Lookup(ctx context.Context, name string) (Node, error) { + n := t.lookup(name) + if n != nil { + return n, nil + } + return nil, fuse.ENOENT +} + +func (t *tree) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { + var out []fuse.Dirent + for _, d := range t.dir { + out = append(out, fuse.Dirent{Name: d.name}) + } + return out, nil +} diff --git a/vendor/bazil.org/fuse/fuse.go b/vendor/bazil.org/fuse/fuse.go new file mode 100644 index 000000000..7dc70f9e1 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse.go @@ -0,0 +1,2304 @@ +// See the file LICENSE for copyright and licensing information. + +// Adapted from Plan 9 from User Space's src/cmd/9pfuse/fuse.c, +// which carries this notice: +// +// The files in this directory are subject to the following license. +// +// The author of this software is Russ Cox. +// +// Copyright (c) 2006 Russ Cox +// +// Permission to use, copy, modify, and distribute this software for any +// purpose without fee is hereby granted, provided that this entire notice +// is included in all copies of any software which is or includes a copy +// or modification of this software and in all copies of the supporting +// documentation for such software. +// +// THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED +// WARRANTY. IN PARTICULAR, THE AUTHOR MAKES NO REPRESENTATION OR WARRANTY +// OF ANY KIND CONCERNING THE MERCHANTABILITY OF THIS SOFTWARE OR ITS +// FITNESS FOR ANY PARTICULAR PURPOSE. + +// Package fuse enables writing FUSE file systems on Linux, OS X, and FreeBSD. +// +// On OS X, it requires OSXFUSE (http://osxfuse.github.com/). +// +// There are two approaches to writing a FUSE file system. The first is to speak +// the low-level message protocol, reading from a Conn using ReadRequest and +// writing using the various Respond methods. This approach is closest to +// the actual interaction with the kernel and can be the simplest one in contexts +// such as protocol translators. +// +// Servers of synthesized file systems tend to share common +// bookkeeping abstracted away by the second approach, which is to +// call fs.Serve to serve the FUSE protocol using an implementation of +// the service methods in the interfaces FS* (file system), Node* (file +// or directory), and Handle* (opened file or directory). +// There are a daunting number of such methods that can be written, +// but few are required. +// The specific methods are described in the documentation for those interfaces. +// +// The hellofs subdirectory contains a simple illustration of the fs.Serve approach. +// +// Service Methods +// +// The required and optional methods for the FS, Node, and Handle interfaces +// have the general form +// +// Op(ctx context.Context, req *OpRequest, resp *OpResponse) error +// +// where Op is the name of a FUSE operation. Op reads request +// parameters from req and writes results to resp. An operation whose +// only result is the error result omits the resp parameter. +// +// Multiple goroutines may call service methods simultaneously; the +// methods being called are responsible for appropriate +// synchronization. +// +// The operation must not hold on to the request or response, +// including any []byte fields such as WriteRequest.Data or +// SetxattrRequest.Xattr. +// +// Errors +// +// Operations can return errors. The FUSE interface can only +// communicate POSIX errno error numbers to file system clients, the +// message is not visible to file system clients. The returned error +// can implement ErrorNumber to control the errno returned. Without +// ErrorNumber, a generic errno (EIO) is returned. +// +// Error messages will be visible in the debug log as part of the +// response. +// +// Interrupted Operations +// +// In some file systems, some operations +// may take an undetermined amount of time. For example, a Read waiting for +// a network message or a matching Write might wait indefinitely. If the request +// is cancelled and no longer needed, the context will be cancelled. +// Blocking operations should select on a receive from ctx.Done() and attempt to +// abort the operation early if the receive succeeds (meaning the channel is closed). +// To indicate that the operation failed because it was aborted, return fuse.EINTR. +// +// If an operation does not block for an indefinite amount of time, supporting +// cancellation is not necessary. +// +// Authentication +// +// All requests types embed a Header, meaning that the method can +// inspect req.Pid, req.Uid, and req.Gid as necessary to implement +// permission checking. The kernel FUSE layer normally prevents other +// users from accessing the FUSE file system (to change this, see +// AllowOther, AllowRoot), but does not enforce access modes (to +// change this, see DefaultPermissions). +// +// Mount Options +// +// Behavior and metadata of the mounted file system can be changed by +// passing MountOption values to Mount. +// +package fuse // import "bazil.org/fuse" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "sync" + "syscall" + "time" + "unsafe" +) + +// A Conn represents a connection to a mounted FUSE file system. +type Conn struct { + // Ready is closed when the mount is complete or has failed. + Ready <-chan struct{} + + // MountError stores any error from the mount process. Only valid + // after Ready is closed. + MountError error + + // File handle for kernel communication. Only safe to access if + // rio or wio is held. + dev *os.File + wio sync.RWMutex + rio sync.RWMutex + + // Protocol version negotiated with InitRequest/InitResponse. + proto Protocol +} + +// MountpointDoesNotExistError is an error returned when the +// mountpoint does not exist. +type MountpointDoesNotExistError struct { + Path string +} + +var _ error = (*MountpointDoesNotExistError)(nil) + +func (e *MountpointDoesNotExistError) Error() string { + return fmt.Sprintf("mountpoint does not exist: %v", e.Path) +} + +// Mount mounts a new FUSE connection on the named directory +// and returns a connection for reading and writing FUSE messages. +// +// After a successful return, caller must call Close to free +// resources. +// +// Even on successful return, the new mount is not guaranteed to be +// visible until after Conn.Ready is closed. See Conn.MountError for +// possible errors. Incoming requests on Conn must be served to make +// progress. +func Mount(dir string, options ...MountOption) (*Conn, error) { + conf := mountConfig{ + options: make(map[string]string), + } + for _, option := range options { + if err := option(&conf); err != nil { + return nil, err + } + } + + ready := make(chan struct{}, 1) + c := &Conn{ + Ready: ready, + } + f, err := mount(dir, &conf, ready, &c.MountError) + if err != nil { + return nil, err + } + c.dev = f + + if err := initMount(c, &conf); err != nil { + c.Close() + if err == ErrClosedWithoutInit { + // see if we can provide a better error + <-c.Ready + if err := c.MountError; err != nil { + return nil, err + } + } + return nil, err + } + + return c, nil +} + +type OldVersionError struct { + Kernel Protocol + LibraryMin Protocol +} + +func (e *OldVersionError) Error() string { + return fmt.Sprintf("kernel FUSE version is too old: %v < %v", e.Kernel, e.LibraryMin) +} + +var ( + ErrClosedWithoutInit = errors.New("fuse connection closed without init") +) + +func initMount(c *Conn, conf *mountConfig) error { + req, err := c.ReadRequest() + if err != nil { + if err == io.EOF { + return ErrClosedWithoutInit + } + return err + } + r, ok := req.(*InitRequest) + if !ok { + return fmt.Errorf("missing init, got: %T", req) + } + + min := Protocol{protoVersionMinMajor, protoVersionMinMinor} + if r.Kernel.LT(min) { + req.RespondError(Errno(syscall.EPROTO)) + c.Close() + return &OldVersionError{ + Kernel: r.Kernel, + LibraryMin: min, + } + } + + proto := Protocol{protoVersionMaxMajor, protoVersionMaxMinor} + if r.Kernel.LT(proto) { + // Kernel doesn't support the latest version we have. + proto = r.Kernel + } + c.proto = proto + + s := &InitResponse{ + Library: proto, + MaxReadahead: conf.maxReadahead, + MaxWrite: maxWrite, + Flags: InitBigWrites | conf.initFlags, + } + r.Respond(s) + return nil +} + +// A Request represents a single FUSE request received from the kernel. +// Use a type switch to determine the specific kind. +// A request of unrecognized type will have concrete type *Header. +type Request interface { + // Hdr returns the Header associated with this request. + Hdr() *Header + + // RespondError responds to the request with the given error. + RespondError(error) + + String() string +} + +// A RequestID identifies an active FUSE request. +type RequestID uint64 + +func (r RequestID) String() string { + return fmt.Sprintf("%#x", uint64(r)) +} + +// A NodeID is a number identifying a directory or file. +// It must be unique among IDs returned in LookupResponses +// that have not yet been forgotten by ForgetRequests. +type NodeID uint64 + +func (n NodeID) String() string { + return fmt.Sprintf("%#x", uint64(n)) +} + +// A HandleID is a number identifying an open directory or file. +// It only needs to be unique while the directory or file is open. +type HandleID uint64 + +func (h HandleID) String() string { + return fmt.Sprintf("%#x", uint64(h)) +} + +// The RootID identifies the root directory of a FUSE file system. +const RootID NodeID = rootID + +// A Header describes the basic information sent in every request. +type Header struct { + Conn *Conn `json:"-"` // connection this request was received on + ID RequestID // unique ID for request + Node NodeID // file or directory the request is about + Uid uint32 // user ID of process making request + Gid uint32 // group ID of process making request + Pid uint32 // process ID of process making request + + // for returning to reqPool + msg *message +} + +func (h *Header) String() string { + return fmt.Sprintf("ID=%v Node=%v Uid=%d Gid=%d Pid=%d", h.ID, h.Node, h.Uid, h.Gid, h.Pid) +} + +func (h *Header) Hdr() *Header { + return h +} + +func (h *Header) noResponse() { + putMessage(h.msg) +} + +func (h *Header) respond(msg []byte) { + out := (*outHeader)(unsafe.Pointer(&msg[0])) + out.Unique = uint64(h.ID) + h.Conn.respond(msg) + putMessage(h.msg) +} + +// An ErrorNumber is an error with a specific error number. +// +// Operations may return an error value that implements ErrorNumber to +// control what specific error number (errno) to return. +type ErrorNumber interface { + // Errno returns the the error number (errno) for this error. + Errno() Errno +} + +const ( + // ENOSYS indicates that the call is not supported. + ENOSYS = Errno(syscall.ENOSYS) + + // ESTALE is used by Serve to respond to violations of the FUSE protocol. + ESTALE = Errno(syscall.ESTALE) + + ENOENT = Errno(syscall.ENOENT) + EIO = Errno(syscall.EIO) + EPERM = Errno(syscall.EPERM) + + // EINTR indicates request was interrupted by an InterruptRequest. + // See also fs.Intr. + EINTR = Errno(syscall.EINTR) + + ERANGE = Errno(syscall.ERANGE) + ENOTSUP = Errno(syscall.ENOTSUP) + EEXIST = Errno(syscall.EEXIST) +) + +// DefaultErrno is the errno used when error returned does not +// implement ErrorNumber. +const DefaultErrno = EIO + +var errnoNames = map[Errno]string{ + ENOSYS: "ENOSYS", + ESTALE: "ESTALE", + ENOENT: "ENOENT", + EIO: "EIO", + EPERM: "EPERM", + EINTR: "EINTR", + EEXIST: "EEXIST", +} + +// Errno implements Error and ErrorNumber using a syscall.Errno. +type Errno syscall.Errno + +var _ = ErrorNumber(Errno(0)) +var _ = error(Errno(0)) + +func (e Errno) Errno() Errno { + return e +} + +func (e Errno) String() string { + return syscall.Errno(e).Error() +} + +func (e Errno) Error() string { + return syscall.Errno(e).Error() +} + +// ErrnoName returns the short non-numeric identifier for this errno. +// For example, "EIO". +func (e Errno) ErrnoName() string { + s := errnoNames[e] + if s == "" { + s = fmt.Sprint(e.Errno()) + } + return s +} + +func (e Errno) MarshalText() ([]byte, error) { + s := e.ErrnoName() + return []byte(s), nil +} + +func (h *Header) RespondError(err error) { + errno := DefaultErrno + if ferr, ok := err.(ErrorNumber); ok { + errno = ferr.Errno() + } + // FUSE uses negative errors! + // TODO: File bug report against OSXFUSE: positive error causes kernel panic. + buf := newBuffer(0) + hOut := (*outHeader)(unsafe.Pointer(&buf[0])) + hOut.Error = -int32(errno) + h.respond(buf) +} + +// All requests read from the kernel, without data, are shorter than +// this. +var maxRequestSize = syscall.Getpagesize() +var bufSize = maxRequestSize + maxWrite + +// reqPool is a pool of messages. +// +// Lifetime of a logical message is from getMessage to putMessage. +// getMessage is called by ReadRequest. putMessage is called by +// Conn.ReadRequest, Request.Respond, or Request.RespondError. +// +// Messages in the pool are guaranteed to have conn and off zeroed, +// buf allocated and len==bufSize, and hdr set. +var reqPool = sync.Pool{ + New: allocMessage, +} + +func allocMessage() interface{} { + m := &message{buf: make([]byte, bufSize)} + m.hdr = (*inHeader)(unsafe.Pointer(&m.buf[0])) + return m +} + +func getMessage(c *Conn) *message { + m := reqPool.Get().(*message) + m.conn = c + return m +} + +func putMessage(m *message) { + m.buf = m.buf[:bufSize] + m.conn = nil + m.off = 0 + reqPool.Put(m) +} + +// a message represents the bytes of a single FUSE message +type message struct { + conn *Conn + buf []byte // all bytes + hdr *inHeader // header + off int // offset for reading additional fields +} + +func (m *message) len() uintptr { + return uintptr(len(m.buf) - m.off) +} + +func (m *message) data() unsafe.Pointer { + var p unsafe.Pointer + if m.off < len(m.buf) { + p = unsafe.Pointer(&m.buf[m.off]) + } + return p +} + +func (m *message) bytes() []byte { + return m.buf[m.off:] +} + +func (m *message) Header() Header { + h := m.hdr + return Header{ + Conn: m.conn, + ID: RequestID(h.Unique), + Node: NodeID(h.Nodeid), + Uid: h.Uid, + Gid: h.Gid, + Pid: h.Pid, + + msg: m, + } +} + +// fileMode returns a Go os.FileMode from a Unix mode. +func fileMode(unixMode uint32) os.FileMode { + mode := os.FileMode(unixMode & 0777) + switch unixMode & syscall.S_IFMT { + case syscall.S_IFREG: + // nothing + case syscall.S_IFDIR: + mode |= os.ModeDir + case syscall.S_IFCHR: + mode |= os.ModeCharDevice | os.ModeDevice + case syscall.S_IFBLK: + mode |= os.ModeDevice + case syscall.S_IFIFO: + mode |= os.ModeNamedPipe + case syscall.S_IFLNK: + mode |= os.ModeSymlink + case syscall.S_IFSOCK: + mode |= os.ModeSocket + default: + // no idea + mode |= os.ModeDevice + } + if unixMode&syscall.S_ISUID != 0 { + mode |= os.ModeSetuid + } + if unixMode&syscall.S_ISGID != 0 { + mode |= os.ModeSetgid + } + return mode +} + +type noOpcode struct { + Opcode uint32 +} + +func (m noOpcode) String() string { + return fmt.Sprintf("No opcode %v", m.Opcode) +} + +type malformedMessage struct { +} + +func (malformedMessage) String() string { + return "malformed message" +} + +// Close closes the FUSE connection. +func (c *Conn) Close() error { + c.wio.Lock() + defer c.wio.Unlock() + c.rio.Lock() + defer c.rio.Unlock() + return c.dev.Close() +} + +// caller must hold wio or rio +func (c *Conn) fd() int { + return int(c.dev.Fd()) +} + +func (c *Conn) Protocol() Protocol { + return c.proto +} + +// ReadRequest returns the next FUSE request from the kernel. +// +// Caller must call either Request.Respond or Request.RespondError in +// a reasonable time. Caller must not retain Request after that call. +func (c *Conn) ReadRequest() (Request, error) { + m := getMessage(c) +loop: + c.rio.RLock() + n, err := syscall.Read(c.fd(), m.buf) + c.rio.RUnlock() + if err == syscall.EINTR { + // OSXFUSE sends EINTR to userspace when a request interrupt + // completed before it got sent to userspace? + goto loop + } + if err != nil && err != syscall.ENODEV { + putMessage(m) + return nil, err + } + if n <= 0 { + putMessage(m) + return nil, io.EOF + } + m.buf = m.buf[:n] + + if n < inHeaderSize { + putMessage(m) + return nil, errors.New("fuse: message too short") + } + + // FreeBSD FUSE sends a short length in the header + // for FUSE_INIT even though the actual read length is correct. + if n == inHeaderSize+initInSize && m.hdr.Opcode == opInit && m.hdr.Len < uint32(n) { + m.hdr.Len = uint32(n) + } + + // OSXFUSE sometimes sends the wrong m.hdr.Len in a FUSE_WRITE message. + if m.hdr.Len < uint32(n) && m.hdr.Len >= uint32(unsafe.Sizeof(writeIn{})) && m.hdr.Opcode == opWrite { + m.hdr.Len = uint32(n) + } + + if m.hdr.Len != uint32(n) { + // prepare error message before returning m to pool + err := fmt.Errorf("fuse: read %d opcode %d but expected %d", n, m.hdr.Opcode, m.hdr.Len) + putMessage(m) + return nil, err + } + + m.off = inHeaderSize + + // Convert to data structures. + // Do not trust kernel to hand us well-formed data. + var req Request + switch m.hdr.Opcode { + default: + Debug(noOpcode{Opcode: m.hdr.Opcode}) + goto unrecognized + + case opLookup: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &LookupRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + } + + case opForget: + in := (*forgetIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ForgetRequest{ + Header: m.Header(), + N: in.Nlookup, + } + + case opGetattr: + switch { + case c.proto.LT(Protocol{7, 9}): + req = &GetattrRequest{ + Header: m.Header(), + } + + default: + in := (*getattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &GetattrRequest{ + Header: m.Header(), + Flags: GetattrFlags(in.GetattrFlags), + Handle: HandleID(in.Fh), + } + } + + case opSetattr: + in := (*setattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &SetattrRequest{ + Header: m.Header(), + Valid: SetattrValid(in.Valid), + Handle: HandleID(in.Fh), + Size: in.Size, + Atime: time.Unix(int64(in.Atime), int64(in.AtimeNsec)), + Mtime: time.Unix(int64(in.Mtime), int64(in.MtimeNsec)), + Mode: fileMode(in.Mode), + Uid: in.Uid, + Gid: in.Gid, + Bkuptime: in.BkupTime(), + Chgtime: in.Chgtime(), + Flags: in.Flags(), + } + + case opReadlink: + if len(m.bytes()) > 0 { + goto corrupt + } + req = &ReadlinkRequest{ + Header: m.Header(), + } + + case opSymlink: + // m.bytes() is "newName\0target\0" + names := m.bytes() + if len(names) == 0 || names[len(names)-1] != 0 { + goto corrupt + } + i := bytes.IndexByte(names, '\x00') + if i < 0 { + goto corrupt + } + newName, target := names[0:i], names[i+1:len(names)-1] + req = &SymlinkRequest{ + Header: m.Header(), + NewName: string(newName), + Target: string(target), + } + + case opLink: + in := (*linkIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + newName := m.bytes()[unsafe.Sizeof(*in):] + if len(newName) < 2 || newName[len(newName)-1] != 0 { + goto corrupt + } + newName = newName[:len(newName)-1] + req = &LinkRequest{ + Header: m.Header(), + OldNode: NodeID(in.Oldnodeid), + NewName: string(newName), + } + + case opMknod: + size := mknodInSize(c.proto) + if m.len() < size { + goto corrupt + } + in := (*mknodIn)(m.data()) + name := m.bytes()[size:] + if len(name) < 2 || name[len(name)-1] != '\x00' { + goto corrupt + } + name = name[:len(name)-1] + r := &MknodRequest{ + Header: m.Header(), + Mode: fileMode(in.Mode), + Rdev: in.Rdev, + Name: string(name), + } + if c.proto.GE(Protocol{7, 12}) { + r.Umask = fileMode(in.Umask) & os.ModePerm + } + req = r + + case opMkdir: + size := mkdirInSize(c.proto) + if m.len() < size { + goto corrupt + } + in := (*mkdirIn)(m.data()) + name := m.bytes()[size:] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + r := &MkdirRequest{ + Header: m.Header(), + Name: string(name[:i]), + // observed on Linux: mkdirIn.Mode & syscall.S_IFMT == 0, + // and this causes fileMode to go into it's "no idea" + // code branch; enforce type to directory + Mode: fileMode((in.Mode &^ syscall.S_IFMT) | syscall.S_IFDIR), + } + if c.proto.GE(Protocol{7, 12}) { + r.Umask = fileMode(in.Umask) & os.ModePerm + } + req = r + + case opUnlink, opRmdir: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &RemoveRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + Dir: m.hdr.Opcode == opRmdir, + } + + case opRename: + in := (*renameIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + newDirNodeID := NodeID(in.Newdir) + oldNew := m.bytes()[unsafe.Sizeof(*in):] + // oldNew should be "old\x00new\x00" + if len(oldNew) < 4 { + goto corrupt + } + if oldNew[len(oldNew)-1] != '\x00' { + goto corrupt + } + i := bytes.IndexByte(oldNew, '\x00') + if i < 0 { + goto corrupt + } + oldName, newName := string(oldNew[:i]), string(oldNew[i+1:len(oldNew)-1]) + req = &RenameRequest{ + Header: m.Header(), + NewDir: newDirNodeID, + OldName: oldName, + NewName: newName, + } + + case opOpendir, opOpen: + in := (*openIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &OpenRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opOpendir, + Flags: openFlags(in.Flags), + } + + case opRead, opReaddir: + in := (*readIn)(m.data()) + if m.len() < readInSize(c.proto) { + goto corrupt + } + r := &ReadRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opReaddir, + Handle: HandleID(in.Fh), + Offset: int64(in.Offset), + Size: int(in.Size), + } + if c.proto.GE(Protocol{7, 9}) { + r.Flags = ReadFlags(in.ReadFlags) + r.LockOwner = in.LockOwner + r.FileFlags = openFlags(in.Flags) + } + req = r + + case opWrite: + in := (*writeIn)(m.data()) + if m.len() < writeInSize(c.proto) { + goto corrupt + } + r := &WriteRequest{ + Header: m.Header(), + Handle: HandleID(in.Fh), + Offset: int64(in.Offset), + Flags: WriteFlags(in.WriteFlags), + } + if c.proto.GE(Protocol{7, 9}) { + r.LockOwner = in.LockOwner + r.FileFlags = openFlags(in.Flags) + } + buf := m.bytes()[writeInSize(c.proto):] + if uint32(len(buf)) < in.Size { + goto corrupt + } + r.Data = buf + req = r + + case opStatfs: + req = &StatfsRequest{ + Header: m.Header(), + } + + case opRelease, opReleasedir: + in := (*releaseIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ReleaseRequest{ + Header: m.Header(), + Dir: m.hdr.Opcode == opReleasedir, + Handle: HandleID(in.Fh), + Flags: openFlags(in.Flags), + ReleaseFlags: ReleaseFlags(in.ReleaseFlags), + LockOwner: in.LockOwner, + } + + case opFsync, opFsyncdir: + in := (*fsyncIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &FsyncRequest{ + Dir: m.hdr.Opcode == opFsyncdir, + Header: m.Header(), + Handle: HandleID(in.Fh), + Flags: in.FsyncFlags, + } + + case opSetxattr: + in := (*setxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + m.off += int(unsafe.Sizeof(*in)) + name := m.bytes() + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + xattr := name[i+1:] + if uint32(len(xattr)) < in.Size { + goto corrupt + } + xattr = xattr[:in.Size] + req = &SetxattrRequest{ + Header: m.Header(), + Flags: in.Flags, + Position: in.position(), + Name: string(name[:i]), + Xattr: xattr, + } + + case opGetxattr: + in := (*getxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + name := m.bytes()[unsafe.Sizeof(*in):] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + req = &GetxattrRequest{ + Header: m.Header(), + Name: string(name[:i]), + Size: in.Size, + Position: in.position(), + } + + case opListxattr: + in := (*getxattrIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &ListxattrRequest{ + Header: m.Header(), + Size: in.Size, + Position: in.position(), + } + + case opRemovexattr: + buf := m.bytes() + n := len(buf) + if n == 0 || buf[n-1] != '\x00' { + goto corrupt + } + req = &RemovexattrRequest{ + Header: m.Header(), + Name: string(buf[:n-1]), + } + + case opFlush: + in := (*flushIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &FlushRequest{ + Header: m.Header(), + Handle: HandleID(in.Fh), + Flags: in.FlushFlags, + LockOwner: in.LockOwner, + } + + case opInit: + in := (*initIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &InitRequest{ + Header: m.Header(), + Kernel: Protocol{in.Major, in.Minor}, + MaxReadahead: in.MaxReadahead, + Flags: InitFlags(in.Flags), + } + + case opGetlk: + panic("opGetlk") + case opSetlk: + panic("opSetlk") + case opSetlkw: + panic("opSetlkw") + + case opAccess: + in := (*accessIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &AccessRequest{ + Header: m.Header(), + Mask: in.Mask, + } + + case opCreate: + size := createInSize(c.proto) + if m.len() < size { + goto corrupt + } + in := (*createIn)(m.data()) + name := m.bytes()[size:] + i := bytes.IndexByte(name, '\x00') + if i < 0 { + goto corrupt + } + r := &CreateRequest{ + Header: m.Header(), + Flags: openFlags(in.Flags), + Mode: fileMode(in.Mode), + Name: string(name[:i]), + } + if c.proto.GE(Protocol{7, 12}) { + r.Umask = fileMode(in.Umask) & os.ModePerm + } + req = r + + case opInterrupt: + in := (*interruptIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + req = &InterruptRequest{ + Header: m.Header(), + IntrID: RequestID(in.Unique), + } + + case opBmap: + panic("opBmap") + + case opDestroy: + req = &DestroyRequest{ + Header: m.Header(), + } + + // OS X + case opSetvolname: + panic("opSetvolname") + case opGetxtimes: + panic("opGetxtimes") + case opExchange: + in := (*exchangeIn)(m.data()) + if m.len() < unsafe.Sizeof(*in) { + goto corrupt + } + oldDirNodeID := NodeID(in.Olddir) + newDirNodeID := NodeID(in.Newdir) + oldNew := m.bytes()[unsafe.Sizeof(*in):] + // oldNew should be "oldname\x00newname\x00" + if len(oldNew) < 4 { + goto corrupt + } + if oldNew[len(oldNew)-1] != '\x00' { + goto corrupt + } + i := bytes.IndexByte(oldNew, '\x00') + if i < 0 { + goto corrupt + } + oldName, newName := string(oldNew[:i]), string(oldNew[i+1:len(oldNew)-1]) + req = &ExchangeDataRequest{ + Header: m.Header(), + OldDir: oldDirNodeID, + NewDir: newDirNodeID, + OldName: oldName, + NewName: newName, + // TODO options + } + } + + return req, nil + +corrupt: + Debug(malformedMessage{}) + putMessage(m) + return nil, fmt.Errorf("fuse: malformed message") + +unrecognized: + // Unrecognized message. + // Assume higher-level code will send a "no idea what you mean" error. + h := m.Header() + return &h, nil +} + +type bugShortKernelWrite struct { + Written int64 + Length int64 + Error string + Stack string +} + +func (b bugShortKernelWrite) String() string { + return fmt.Sprintf("short kernel write: written=%d/%d error=%q stack=\n%s", b.Written, b.Length, b.Error, b.Stack) +} + +type bugKernelWriteError struct { + Error string + Stack string +} + +func (b bugKernelWriteError) String() string { + return fmt.Sprintf("kernel write error: error=%q stack=\n%s", b.Error, b.Stack) +} + +// safe to call even with nil error +func errorString(err error) string { + if err == nil { + return "" + } + return err.Error() +} + +func (c *Conn) writeToKernel(msg []byte) error { + out := (*outHeader)(unsafe.Pointer(&msg[0])) + out.Len = uint32(len(msg)) + + c.wio.RLock() + defer c.wio.RUnlock() + nn, err := syscall.Write(c.fd(), msg) + if err == nil && nn != len(msg) { + Debug(bugShortKernelWrite{ + Written: int64(nn), + Length: int64(len(msg)), + Error: errorString(err), + Stack: stack(), + }) + } + return err +} + +func (c *Conn) respond(msg []byte) { + if err := c.writeToKernel(msg); err != nil { + Debug(bugKernelWriteError{ + Error: errorString(err), + Stack: stack(), + }) + } +} + +type notCachedError struct{} + +func (notCachedError) Error() string { + return "node not cached" +} + +var _ ErrorNumber = notCachedError{} + +func (notCachedError) Errno() Errno { + // Behave just like if the original syscall.ENOENT had been passed + // straight through. + return ENOENT +} + +var ( + ErrNotCached = notCachedError{} +) + +// sendInvalidate sends an invalidate notification to kernel. +// +// A returned ENOENT is translated to a friendlier error. +func (c *Conn) sendInvalidate(msg []byte) error { + switch err := c.writeToKernel(msg); err { + case syscall.ENOENT: + return ErrNotCached + default: + return err + } +} + +// InvalidateNode invalidates the kernel cache of the attributes and a +// range of the data of a node. +// +// Giving offset 0 and size -1 means all data. To invalidate just the +// attributes, give offset 0 and size 0. +// +// Returns ErrNotCached if the kernel is not currently caching the +// node. +func (c *Conn) InvalidateNode(nodeID NodeID, off int64, size int64) error { + buf := newBuffer(unsafe.Sizeof(notifyInvalInodeOut{})) + h := (*outHeader)(unsafe.Pointer(&buf[0])) + // h.Unique is 0 + h.Error = notifyCodeInvalInode + out := (*notifyInvalInodeOut)(buf.alloc(unsafe.Sizeof(notifyInvalInodeOut{}))) + out.Ino = uint64(nodeID) + out.Off = off + out.Len = size + return c.sendInvalidate(buf) +} + +// InvalidateEntry invalidates the kernel cache of the directory entry +// identified by parent directory node ID and entry basename. +// +// Kernel may or may not cache directory listings. To invalidate +// those, use InvalidateNode to invalidate all of the data for a +// directory. (As of 2015-06, Linux FUSE does not cache directory +// listings.) +// +// Returns ErrNotCached if the kernel is not currently caching the +// node. +func (c *Conn) InvalidateEntry(parent NodeID, name string) error { + const maxUint32 = ^uint32(0) + if uint64(len(name)) > uint64(maxUint32) { + // very unlikely, but we don't want to silently truncate + return syscall.ENAMETOOLONG + } + buf := newBuffer(unsafe.Sizeof(notifyInvalEntryOut{}) + uintptr(len(name)) + 1) + h := (*outHeader)(unsafe.Pointer(&buf[0])) + // h.Unique is 0 + h.Error = notifyCodeInvalEntry + out := (*notifyInvalEntryOut)(buf.alloc(unsafe.Sizeof(notifyInvalEntryOut{}))) + out.Parent = uint64(parent) + out.Namelen = uint32(len(name)) + buf = append(buf, name...) + buf = append(buf, '\x00') + return c.sendInvalidate(buf) +} + +// An InitRequest is the first request sent on a FUSE file system. +type InitRequest struct { + Header `json:"-"` + Kernel Protocol + // Maximum readahead in bytes that the kernel plans to use. + MaxReadahead uint32 + Flags InitFlags +} + +var _ = Request(&InitRequest{}) + +func (r *InitRequest) String() string { + return fmt.Sprintf("Init [%v] %v ra=%d fl=%v", &r.Header, r.Kernel, r.MaxReadahead, r.Flags) +} + +// An InitResponse is the response to an InitRequest. +type InitResponse struct { + Library Protocol + // Maximum readahead in bytes that the kernel can use. Ignored if + // greater than InitRequest.MaxReadahead. + MaxReadahead uint32 + Flags InitFlags + // Maximum size of a single write operation. + // Linux enforces a minimum of 4 KiB. + MaxWrite uint32 +} + +func (r *InitResponse) String() string { + return fmt.Sprintf("Init %v ra=%d fl=%v w=%d", r.Library, r.MaxReadahead, r.Flags, r.MaxWrite) +} + +// Respond replies to the request with the given response. +func (r *InitRequest) Respond(resp *InitResponse) { + buf := newBuffer(unsafe.Sizeof(initOut{})) + out := (*initOut)(buf.alloc(unsafe.Sizeof(initOut{}))) + out.Major = resp.Library.Major + out.Minor = resp.Library.Minor + out.MaxReadahead = resp.MaxReadahead + out.Flags = uint32(resp.Flags) + out.MaxWrite = resp.MaxWrite + + // MaxWrite larger than our receive buffer would just lead to + // errors on large writes. + if out.MaxWrite > maxWrite { + out.MaxWrite = maxWrite + } + r.respond(buf) +} + +// A StatfsRequest requests information about the mounted file system. +type StatfsRequest struct { + Header `json:"-"` +} + +var _ = Request(&StatfsRequest{}) + +func (r *StatfsRequest) String() string { + return fmt.Sprintf("Statfs [%s]", &r.Header) +} + +// Respond replies to the request with the given response. +func (r *StatfsRequest) Respond(resp *StatfsResponse) { + buf := newBuffer(unsafe.Sizeof(statfsOut{})) + out := (*statfsOut)(buf.alloc(unsafe.Sizeof(statfsOut{}))) + out.St = kstatfs{ + Blocks: resp.Blocks, + Bfree: resp.Bfree, + Bavail: resp.Bavail, + Files: resp.Files, + Ffree: resp.Ffree, + Bsize: resp.Bsize, + Namelen: resp.Namelen, + Frsize: resp.Frsize, + } + r.respond(buf) +} + +// A StatfsResponse is the response to a StatfsRequest. +type StatfsResponse struct { + Blocks uint64 // Total data blocks in file system. + Bfree uint64 // Free blocks in file system. + Bavail uint64 // Free blocks in file system if you're not root. + Files uint64 // Total files in file system. + Ffree uint64 // Free files in file system. + Bsize uint32 // Block size + Namelen uint32 // Maximum file name length? + Frsize uint32 // Fragment size, smallest addressable data size in the file system. +} + +func (r *StatfsResponse) String() string { + return fmt.Sprintf("Statfs blocks=%d/%d/%d files=%d/%d bsize=%d frsize=%d namelen=%d", + r.Bavail, r.Bfree, r.Blocks, + r.Ffree, r.Files, + r.Bsize, + r.Frsize, + r.Namelen, + ) +} + +// An AccessRequest asks whether the file can be accessed +// for the purpose specified by the mask. +type AccessRequest struct { + Header `json:"-"` + Mask uint32 +} + +var _ = Request(&AccessRequest{}) + +func (r *AccessRequest) String() string { + return fmt.Sprintf("Access [%s] mask=%#x", &r.Header, r.Mask) +} + +// Respond replies to the request indicating that access is allowed. +// To deny access, use RespondError. +func (r *AccessRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// An Attr is the metadata for a single file or directory. +type Attr struct { + Valid time.Duration // how long Attr can be cached + + Inode uint64 // inode number + Size uint64 // size in bytes + Blocks uint64 // size in 512-byte units + Atime time.Time // time of last access + Mtime time.Time // time of last modification + Ctime time.Time // time of last inode change + Crtime time.Time // time of creation (OS X only) + Mode os.FileMode // file mode + Nlink uint32 // number of links (usually 1) + Uid uint32 // owner uid + Gid uint32 // group gid + Rdev uint32 // device numbers + Flags uint32 // chflags(2) flags (OS X only) + BlockSize uint32 // preferred blocksize for filesystem I/O +} + +func (a Attr) String() string { + return fmt.Sprintf("valid=%v ino=%v size=%d mode=%v", a.Valid, a.Inode, a.Size, a.Mode) +} + +func unix(t time.Time) (sec uint64, nsec uint32) { + nano := t.UnixNano() + sec = uint64(nano / 1e9) + nsec = uint32(nano % 1e9) + return +} + +func (a *Attr) attr(out *attr, proto Protocol) { + out.Ino = a.Inode + out.Size = a.Size + out.Blocks = a.Blocks + out.Atime, out.AtimeNsec = unix(a.Atime) + out.Mtime, out.MtimeNsec = unix(a.Mtime) + out.Ctime, out.CtimeNsec = unix(a.Ctime) + out.SetCrtime(unix(a.Crtime)) + out.Mode = uint32(a.Mode) & 0777 + switch { + default: + out.Mode |= syscall.S_IFREG + case a.Mode&os.ModeDir != 0: + out.Mode |= syscall.S_IFDIR + case a.Mode&os.ModeDevice != 0: + if a.Mode&os.ModeCharDevice != 0 { + out.Mode |= syscall.S_IFCHR + } else { + out.Mode |= syscall.S_IFBLK + } + case a.Mode&os.ModeNamedPipe != 0: + out.Mode |= syscall.S_IFIFO + case a.Mode&os.ModeSymlink != 0: + out.Mode |= syscall.S_IFLNK + case a.Mode&os.ModeSocket != 0: + out.Mode |= syscall.S_IFSOCK + } + if a.Mode&os.ModeSetuid != 0 { + out.Mode |= syscall.S_ISUID + } + if a.Mode&os.ModeSetgid != 0 { + out.Mode |= syscall.S_ISGID + } + out.Nlink = a.Nlink + out.Uid = a.Uid + out.Gid = a.Gid + out.Rdev = a.Rdev + out.SetFlags(a.Flags) + if proto.GE(Protocol{7, 9}) { + out.Blksize = a.BlockSize + } + + return +} + +// A GetattrRequest asks for the metadata for the file denoted by r.Node. +type GetattrRequest struct { + Header `json:"-"` + Flags GetattrFlags + Handle HandleID +} + +var _ = Request(&GetattrRequest{}) + +func (r *GetattrRequest) String() string { + return fmt.Sprintf("Getattr [%s] %v fl=%v", &r.Header, r.Handle, r.Flags) +} + +// Respond replies to the request with the given response. +func (r *GetattrRequest) Respond(resp *GetattrResponse) { + size := attrOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*attrOut)(buf.alloc(size)) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A GetattrResponse is the response to a GetattrRequest. +type GetattrResponse struct { + Attr Attr // file attributes +} + +func (r *GetattrResponse) String() string { + return fmt.Sprintf("Getattr %v", r.Attr) +} + +// A GetxattrRequest asks for the extended attributes associated with r.Node. +type GetxattrRequest struct { + Header `json:"-"` + + // Maximum size to return. + Size uint32 + + // Name of the attribute requested. + Name string + + // Offset within extended attributes. + // + // Only valid for OS X, and then only with the resource fork + // attribute. + Position uint32 +} + +var _ = Request(&GetxattrRequest{}) + +func (r *GetxattrRequest) String() string { + return fmt.Sprintf("Getxattr [%s] %q %d @%d", &r.Header, r.Name, r.Size, r.Position) +} + +// Respond replies to the request with the given response. +func (r *GetxattrRequest) Respond(resp *GetxattrResponse) { + if r.Size == 0 { + buf := newBuffer(unsafe.Sizeof(getxattrOut{})) + out := (*getxattrOut)(buf.alloc(unsafe.Sizeof(getxattrOut{}))) + out.Size = uint32(len(resp.Xattr)) + r.respond(buf) + } else { + buf := newBuffer(uintptr(len(resp.Xattr))) + buf = append(buf, resp.Xattr...) + r.respond(buf) + } +} + +// A GetxattrResponse is the response to a GetxattrRequest. +type GetxattrResponse struct { + Xattr []byte +} + +func (r *GetxattrResponse) String() string { + return fmt.Sprintf("Getxattr %x", r.Xattr) +} + +// A ListxattrRequest asks to list the extended attributes associated with r.Node. +type ListxattrRequest struct { + Header `json:"-"` + Size uint32 // maximum size to return + Position uint32 // offset within attribute list +} + +var _ = Request(&ListxattrRequest{}) + +func (r *ListxattrRequest) String() string { + return fmt.Sprintf("Listxattr [%s] %d @%d", &r.Header, r.Size, r.Position) +} + +// Respond replies to the request with the given response. +func (r *ListxattrRequest) Respond(resp *ListxattrResponse) { + if r.Size == 0 { + buf := newBuffer(unsafe.Sizeof(getxattrOut{})) + out := (*getxattrOut)(buf.alloc(unsafe.Sizeof(getxattrOut{}))) + out.Size = uint32(len(resp.Xattr)) + r.respond(buf) + } else { + buf := newBuffer(uintptr(len(resp.Xattr))) + buf = append(buf, resp.Xattr...) + r.respond(buf) + } +} + +// A ListxattrResponse is the response to a ListxattrRequest. +type ListxattrResponse struct { + Xattr []byte +} + +func (r *ListxattrResponse) String() string { + return fmt.Sprintf("Listxattr %x", r.Xattr) +} + +// Append adds an extended attribute name to the response. +func (r *ListxattrResponse) Append(names ...string) { + for _, name := range names { + r.Xattr = append(r.Xattr, name...) + r.Xattr = append(r.Xattr, '\x00') + } +} + +// A RemovexattrRequest asks to remove an extended attribute associated with r.Node. +type RemovexattrRequest struct { + Header `json:"-"` + Name string // name of extended attribute +} + +var _ = Request(&RemovexattrRequest{}) + +func (r *RemovexattrRequest) String() string { + return fmt.Sprintf("Removexattr [%s] %q", &r.Header, r.Name) +} + +// Respond replies to the request, indicating that the attribute was removed. +func (r *RemovexattrRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A SetxattrRequest asks to set an extended attribute associated with a file. +type SetxattrRequest struct { + Header `json:"-"` + + // Flags can make the request fail if attribute does/not already + // exist. Unfortunately, the constants are platform-specific and + // not exposed by Go1.2. Look for XATTR_CREATE, XATTR_REPLACE. + // + // TODO improve this later + // + // TODO XATTR_CREATE and exist -> EEXIST + // + // TODO XATTR_REPLACE and not exist -> ENODATA + Flags uint32 + + // Offset within extended attributes. + // + // Only valid for OS X, and then only with the resource fork + // attribute. + Position uint32 + + Name string + Xattr []byte +} + +var _ = Request(&SetxattrRequest{}) + +func trunc(b []byte, max int) ([]byte, string) { + if len(b) > max { + return b[:max], "..." + } + return b, "" +} + +func (r *SetxattrRequest) String() string { + xattr, tail := trunc(r.Xattr, 16) + return fmt.Sprintf("Setxattr [%s] %q %x%s fl=%v @%#x", &r.Header, r.Name, xattr, tail, r.Flags, r.Position) +} + +// Respond replies to the request, indicating that the extended attribute was set. +func (r *SetxattrRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A LookupRequest asks to look up the given name in the directory named by r.Node. +type LookupRequest struct { + Header `json:"-"` + Name string +} + +var _ = Request(&LookupRequest{}) + +func (r *LookupRequest) String() string { + return fmt.Sprintf("Lookup [%s] %q", &r.Header, r.Name) +} + +// Respond replies to the request with the given response. +func (r *LookupRequest) Respond(resp *LookupResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A LookupResponse is the response to a LookupRequest. +type LookupResponse struct { + Node NodeID + Generation uint64 + EntryValid time.Duration + Attr Attr +} + +func (r *LookupResponse) string() string { + return fmt.Sprintf("%v gen=%d valid=%v attr={%v}", r.Node, r.Generation, r.EntryValid, r.Attr) +} + +func (r *LookupResponse) String() string { + return fmt.Sprintf("Lookup %s", r.string()) +} + +// An OpenRequest asks to open a file or directory +type OpenRequest struct { + Header `json:"-"` + Dir bool // is this Opendir? + Flags OpenFlags +} + +var _ = Request(&OpenRequest{}) + +func (r *OpenRequest) String() string { + return fmt.Sprintf("Open [%s] dir=%v fl=%v", &r.Header, r.Dir, r.Flags) +} + +// Respond replies to the request with the given response. +func (r *OpenRequest) Respond(resp *OpenResponse) { + buf := newBuffer(unsafe.Sizeof(openOut{})) + out := (*openOut)(buf.alloc(unsafe.Sizeof(openOut{}))) + out.Fh = uint64(resp.Handle) + out.OpenFlags = uint32(resp.Flags) + r.respond(buf) +} + +// A OpenResponse is the response to a OpenRequest. +type OpenResponse struct { + Handle HandleID + Flags OpenResponseFlags +} + +func (r *OpenResponse) string() string { + return fmt.Sprintf("%v fl=%v", r.Handle, r.Flags) +} + +func (r *OpenResponse) String() string { + return fmt.Sprintf("Open %s", r.string()) +} + +// A CreateRequest asks to create and open a file (not a directory). +type CreateRequest struct { + Header `json:"-"` + Name string + Flags OpenFlags + Mode os.FileMode + // Umask of the request. Not supported on OS X. + Umask os.FileMode +} + +var _ = Request(&CreateRequest{}) + +func (r *CreateRequest) String() string { + return fmt.Sprintf("Create [%s] %q fl=%v mode=%v umask=%v", &r.Header, r.Name, r.Flags, r.Mode, r.Umask) +} + +// Respond replies to the request with the given response. +func (r *CreateRequest) Respond(resp *CreateResponse) { + eSize := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(eSize + unsafe.Sizeof(openOut{})) + + e := (*entryOut)(buf.alloc(eSize)) + e.Nodeid = uint64(resp.Node) + e.Generation = resp.Generation + e.EntryValid = uint64(resp.EntryValid / time.Second) + e.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + e.AttrValid = uint64(resp.Attr.Valid / time.Second) + e.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&e.Attr, r.Header.Conn.proto) + + o := (*openOut)(buf.alloc(unsafe.Sizeof(openOut{}))) + o.Fh = uint64(resp.Handle) + o.OpenFlags = uint32(resp.Flags) + + r.respond(buf) +} + +// A CreateResponse is the response to a CreateRequest. +// It describes the created node and opened handle. +type CreateResponse struct { + LookupResponse + OpenResponse +} + +func (r *CreateResponse) String() string { + return fmt.Sprintf("Create {%s} {%s}", r.LookupResponse.string(), r.OpenResponse.string()) +} + +// A MkdirRequest asks to create (but not open) a directory. +type MkdirRequest struct { + Header `json:"-"` + Name string + Mode os.FileMode + // Umask of the request. Not supported on OS X. + Umask os.FileMode +} + +var _ = Request(&MkdirRequest{}) + +func (r *MkdirRequest) String() string { + return fmt.Sprintf("Mkdir [%s] %q mode=%v umask=%v", &r.Header, r.Name, r.Mode, r.Umask) +} + +// Respond replies to the request with the given response. +func (r *MkdirRequest) Respond(resp *MkdirResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A MkdirResponse is the response to a MkdirRequest. +type MkdirResponse struct { + LookupResponse +} + +func (r *MkdirResponse) String() string { + return fmt.Sprintf("Mkdir %v", r.LookupResponse.string()) +} + +// A ReadRequest asks to read from an open file. +type ReadRequest struct { + Header `json:"-"` + Dir bool // is this Readdir? + Handle HandleID + Offset int64 + Size int + Flags ReadFlags + LockOwner uint64 + FileFlags OpenFlags +} + +var _ = Request(&ReadRequest{}) + +func (r *ReadRequest) String() string { + return fmt.Sprintf("Read [%s] %v %d @%#x dir=%v fl=%v lock=%d ffl=%v", &r.Header, r.Handle, r.Size, r.Offset, r.Dir, r.Flags, r.LockOwner, r.FileFlags) +} + +// Respond replies to the request with the given response. +func (r *ReadRequest) Respond(resp *ReadResponse) { + buf := newBuffer(uintptr(len(resp.Data))) + buf = append(buf, resp.Data...) + r.respond(buf) +} + +// A ReadResponse is the response to a ReadRequest. +type ReadResponse struct { + Data []byte +} + +func (r *ReadResponse) String() string { + return fmt.Sprintf("Read %d", len(r.Data)) +} + +type jsonReadResponse struct { + Len uint64 +} + +func (r *ReadResponse) MarshalJSON() ([]byte, error) { + j := jsonReadResponse{ + Len: uint64(len(r.Data)), + } + return json.Marshal(j) +} + +// A ReleaseRequest asks to release (close) an open file handle. +type ReleaseRequest struct { + Header `json:"-"` + Dir bool // is this Releasedir? + Handle HandleID + Flags OpenFlags // flags from OpenRequest + ReleaseFlags ReleaseFlags + LockOwner uint32 +} + +var _ = Request(&ReleaseRequest{}) + +func (r *ReleaseRequest) String() string { + return fmt.Sprintf("Release [%s] %v fl=%v rfl=%v owner=%#x", &r.Header, r.Handle, r.Flags, r.ReleaseFlags, r.LockOwner) +} + +// Respond replies to the request, indicating that the handle has been released. +func (r *ReleaseRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A DestroyRequest is sent by the kernel when unmounting the file system. +// No more requests will be received after this one, but it should still be +// responded to. +type DestroyRequest struct { + Header `json:"-"` +} + +var _ = Request(&DestroyRequest{}) + +func (r *DestroyRequest) String() string { + return fmt.Sprintf("Destroy [%s]", &r.Header) +} + +// Respond replies to the request. +func (r *DestroyRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A ForgetRequest is sent by the kernel when forgetting about r.Node +// as returned by r.N lookup requests. +type ForgetRequest struct { + Header `json:"-"` + N uint64 +} + +var _ = Request(&ForgetRequest{}) + +func (r *ForgetRequest) String() string { + return fmt.Sprintf("Forget [%s] %d", &r.Header, r.N) +} + +// Respond replies to the request, indicating that the forgetfulness has been recorded. +func (r *ForgetRequest) Respond() { + // Don't reply to forget messages. + r.noResponse() +} + +// A Dirent represents a single directory entry. +type Dirent struct { + // Inode this entry names. + Inode uint64 + + // Type of the entry, for example DT_File. + // + // Setting this is optional. The zero value (DT_Unknown) means + // callers will just need to do a Getattr when the type is + // needed. Providing a type can speed up operations + // significantly. + Type DirentType + + // Name of the entry + Name string +} + +// Type of an entry in a directory listing. +type DirentType uint32 + +const ( + // These don't quite match os.FileMode; especially there's an + // explicit unknown, instead of zero value meaning file. They + // are also not quite syscall.DT_*; nothing says the FUSE + // protocol follows those, and even if they were, we don't + // want each fs to fiddle with syscall. + + // The shift by 12 is hardcoded in the FUSE userspace + // low-level C library, so it's safe here. + + DT_Unknown DirentType = 0 + DT_Socket DirentType = syscall.S_IFSOCK >> 12 + DT_Link DirentType = syscall.S_IFLNK >> 12 + DT_File DirentType = syscall.S_IFREG >> 12 + DT_Block DirentType = syscall.S_IFBLK >> 12 + DT_Dir DirentType = syscall.S_IFDIR >> 12 + DT_Char DirentType = syscall.S_IFCHR >> 12 + DT_FIFO DirentType = syscall.S_IFIFO >> 12 +) + +func (t DirentType) String() string { + switch t { + case DT_Unknown: + return "unknown" + case DT_Socket: + return "socket" + case DT_Link: + return "link" + case DT_File: + return "file" + case DT_Block: + return "block" + case DT_Dir: + return "dir" + case DT_Char: + return "char" + case DT_FIFO: + return "fifo" + } + return "invalid" +} + +// AppendDirent appends the encoded form of a directory entry to data +// and returns the resulting slice. +func AppendDirent(data []byte, dir Dirent) []byte { + de := dirent{ + Ino: dir.Inode, + Namelen: uint32(len(dir.Name)), + Type: uint32(dir.Type), + } + de.Off = uint64(len(data) + direntSize + (len(dir.Name)+7)&^7) + data = append(data, (*[direntSize]byte)(unsafe.Pointer(&de))[:]...) + data = append(data, dir.Name...) + n := direntSize + uintptr(len(dir.Name)) + if n%8 != 0 { + var pad [8]byte + data = append(data, pad[:8-n%8]...) + } + return data +} + +// A WriteRequest asks to write to an open file. +type WriteRequest struct { + Header + Handle HandleID + Offset int64 + Data []byte + Flags WriteFlags + LockOwner uint64 + FileFlags OpenFlags +} + +var _ = Request(&WriteRequest{}) + +func (r *WriteRequest) String() string { + return fmt.Sprintf("Write [%s] %v %d @%d fl=%v lock=%d ffl=%v", &r.Header, r.Handle, len(r.Data), r.Offset, r.Flags, r.LockOwner, r.FileFlags) +} + +type jsonWriteRequest struct { + Handle HandleID + Offset int64 + Len uint64 + Flags WriteFlags +} + +func (r *WriteRequest) MarshalJSON() ([]byte, error) { + j := jsonWriteRequest{ + Handle: r.Handle, + Offset: r.Offset, + Len: uint64(len(r.Data)), + Flags: r.Flags, + } + return json.Marshal(j) +} + +// Respond replies to the request with the given response. +func (r *WriteRequest) Respond(resp *WriteResponse) { + buf := newBuffer(unsafe.Sizeof(writeOut{})) + out := (*writeOut)(buf.alloc(unsafe.Sizeof(writeOut{}))) + out.Size = uint32(resp.Size) + r.respond(buf) +} + +// A WriteResponse replies to a write indicating how many bytes were written. +type WriteResponse struct { + Size int +} + +func (r *WriteResponse) String() string { + return fmt.Sprintf("Write %d", r.Size) +} + +// A SetattrRequest asks to change one or more attributes associated with a file, +// as indicated by Valid. +type SetattrRequest struct { + Header `json:"-"` + Valid SetattrValid + Handle HandleID + Size uint64 + Atime time.Time + Mtime time.Time + Mode os.FileMode + Uid uint32 + Gid uint32 + + // OS X only + Bkuptime time.Time + Chgtime time.Time + Crtime time.Time + Flags uint32 // see chflags(2) +} + +var _ = Request(&SetattrRequest{}) + +func (r *SetattrRequest) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "Setattr [%s]", &r.Header) + if r.Valid.Mode() { + fmt.Fprintf(&buf, " mode=%v", r.Mode) + } + if r.Valid.Uid() { + fmt.Fprintf(&buf, " uid=%d", r.Uid) + } + if r.Valid.Gid() { + fmt.Fprintf(&buf, " gid=%d", r.Gid) + } + if r.Valid.Size() { + fmt.Fprintf(&buf, " size=%d", r.Size) + } + if r.Valid.Atime() { + fmt.Fprintf(&buf, " atime=%v", r.Atime) + } + if r.Valid.AtimeNow() { + fmt.Fprintf(&buf, " atime=now") + } + if r.Valid.Mtime() { + fmt.Fprintf(&buf, " mtime=%v", r.Mtime) + } + if r.Valid.MtimeNow() { + fmt.Fprintf(&buf, " mtime=now") + } + if r.Valid.Handle() { + fmt.Fprintf(&buf, " handle=%v", r.Handle) + } else { + fmt.Fprintf(&buf, " handle=INVALID-%v", r.Handle) + } + if r.Valid.LockOwner() { + fmt.Fprintf(&buf, " lockowner") + } + if r.Valid.Crtime() { + fmt.Fprintf(&buf, " crtime=%v", r.Crtime) + } + if r.Valid.Chgtime() { + fmt.Fprintf(&buf, " chgtime=%v", r.Chgtime) + } + if r.Valid.Bkuptime() { + fmt.Fprintf(&buf, " bkuptime=%v", r.Bkuptime) + } + if r.Valid.Flags() { + fmt.Fprintf(&buf, " flags=%v", r.Flags) + } + return buf.String() +} + +// Respond replies to the request with the given response, +// giving the updated attributes. +func (r *SetattrRequest) Respond(resp *SetattrResponse) { + size := attrOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*attrOut)(buf.alloc(size)) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A SetattrResponse is the response to a SetattrRequest. +type SetattrResponse struct { + Attr Attr // file attributes +} + +func (r *SetattrResponse) String() string { + return fmt.Sprintf("Setattr %v", r.Attr) +} + +// A FlushRequest asks for the current state of an open file to be flushed +// to storage, as when a file descriptor is being closed. A single opened Handle +// may receive multiple FlushRequests over its lifetime. +type FlushRequest struct { + Header `json:"-"` + Handle HandleID + Flags uint32 + LockOwner uint64 +} + +var _ = Request(&FlushRequest{}) + +func (r *FlushRequest) String() string { + return fmt.Sprintf("Flush [%s] %v fl=%#x lk=%#x", &r.Header, r.Handle, r.Flags, r.LockOwner) +} + +// Respond replies to the request, indicating that the flush succeeded. +func (r *FlushRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A RemoveRequest asks to remove a file or directory from the +// directory r.Node. +type RemoveRequest struct { + Header `json:"-"` + Name string // name of the entry to remove + Dir bool // is this rmdir? +} + +var _ = Request(&RemoveRequest{}) + +func (r *RemoveRequest) String() string { + return fmt.Sprintf("Remove [%s] %q dir=%v", &r.Header, r.Name, r.Dir) +} + +// Respond replies to the request, indicating that the file was removed. +func (r *RemoveRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// A SymlinkRequest is a request to create a symlink making NewName point to Target. +type SymlinkRequest struct { + Header `json:"-"` + NewName, Target string +} + +var _ = Request(&SymlinkRequest{}) + +func (r *SymlinkRequest) String() string { + return fmt.Sprintf("Symlink [%s] from %q to target %q", &r.Header, r.NewName, r.Target) +} + +// Respond replies to the request, indicating that the symlink was created. +func (r *SymlinkRequest) Respond(resp *SymlinkResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A SymlinkResponse is the response to a SymlinkRequest. +type SymlinkResponse struct { + LookupResponse +} + +func (r *SymlinkResponse) String() string { + return fmt.Sprintf("Symlink %v", r.LookupResponse.string()) +} + +// A ReadlinkRequest is a request to read a symlink's target. +type ReadlinkRequest struct { + Header `json:"-"` +} + +var _ = Request(&ReadlinkRequest{}) + +func (r *ReadlinkRequest) String() string { + return fmt.Sprintf("Readlink [%s]", &r.Header) +} + +func (r *ReadlinkRequest) Respond(target string) { + buf := newBuffer(uintptr(len(target))) + buf = append(buf, target...) + r.respond(buf) +} + +// A LinkRequest is a request to create a hard link. +type LinkRequest struct { + Header `json:"-"` + OldNode NodeID + NewName string +} + +var _ = Request(&LinkRequest{}) + +func (r *LinkRequest) String() string { + return fmt.Sprintf("Link [%s] node %d to %q", &r.Header, r.OldNode, r.NewName) +} + +func (r *LinkRequest) Respond(resp *LookupResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +// A RenameRequest is a request to rename a file. +type RenameRequest struct { + Header `json:"-"` + NewDir NodeID + OldName, NewName string +} + +var _ = Request(&RenameRequest{}) + +func (r *RenameRequest) String() string { + return fmt.Sprintf("Rename [%s] from %q to dirnode %v %q", &r.Header, r.OldName, r.NewDir, r.NewName) +} + +func (r *RenameRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +type MknodRequest struct { + Header `json:"-"` + Name string + Mode os.FileMode + Rdev uint32 + // Umask of the request. Not supported on OS X. + Umask os.FileMode +} + +var _ = Request(&MknodRequest{}) + +func (r *MknodRequest) String() string { + return fmt.Sprintf("Mknod [%s] Name %q mode=%v umask=%v rdev=%d", &r.Header, r.Name, r.Mode, r.Umask, r.Rdev) +} + +func (r *MknodRequest) Respond(resp *LookupResponse) { + size := entryOutSize(r.Header.Conn.proto) + buf := newBuffer(size) + out := (*entryOut)(buf.alloc(size)) + out.Nodeid = uint64(resp.Node) + out.Generation = resp.Generation + out.EntryValid = uint64(resp.EntryValid / time.Second) + out.EntryValidNsec = uint32(resp.EntryValid % time.Second / time.Nanosecond) + out.AttrValid = uint64(resp.Attr.Valid / time.Second) + out.AttrValidNsec = uint32(resp.Attr.Valid % time.Second / time.Nanosecond) + resp.Attr.attr(&out.Attr, r.Header.Conn.proto) + r.respond(buf) +} + +type FsyncRequest struct { + Header `json:"-"` + Handle HandleID + // TODO bit 1 is datasync, not well documented upstream + Flags uint32 + Dir bool +} + +var _ = Request(&FsyncRequest{}) + +func (r *FsyncRequest) String() string { + return fmt.Sprintf("Fsync [%s] Handle %v Flags %v", &r.Header, r.Handle, r.Flags) +} + +func (r *FsyncRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} + +// An InterruptRequest is a request to interrupt another pending request. The +// response to that request should return an error status of EINTR. +type InterruptRequest struct { + Header `json:"-"` + IntrID RequestID // ID of the request to be interrupt. +} + +var _ = Request(&InterruptRequest{}) + +func (r *InterruptRequest) Respond() { + // nothing to do here + r.noResponse() +} + +func (r *InterruptRequest) String() string { + return fmt.Sprintf("Interrupt [%s] ID %v", &r.Header, r.IntrID) +} + +// An ExchangeDataRequest is a request to exchange the contents of two +// files, while leaving most metadata untouched. +// +// This request comes from OS X exchangedata(2) and represents its +// specific semantics. Crucially, it is very different from Linux +// renameat(2) RENAME_EXCHANGE. +// +// https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/exchangedata.2.html +type ExchangeDataRequest struct { + Header `json:"-"` + OldDir, NewDir NodeID + OldName, NewName string + // TODO options +} + +var _ = Request(&ExchangeDataRequest{}) + +func (r *ExchangeDataRequest) String() string { + // TODO options + return fmt.Sprintf("ExchangeData [%s] %v %q and %v %q", &r.Header, r.OldDir, r.OldName, r.NewDir, r.NewName) +} + +func (r *ExchangeDataRequest) Respond() { + buf := newBuffer(0) + r.respond(buf) +} diff --git a/vendor/bazil.org/fuse/fuse_darwin.go b/vendor/bazil.org/fuse/fuse_darwin.go new file mode 100644 index 000000000..b58dca97d --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_darwin.go @@ -0,0 +1,9 @@ +package fuse + +// Maximum file write size we are prepared to receive from the kernel. +// +// This value has to be >=16MB or OSXFUSE (3.4.0 observed) will +// forcibly close the /dev/fuse file descriptor on a Setxattr with a +// 16MB value. See TestSetxattr16MB and +// https://github.com/bazil/fuse/issues/42 +const maxWrite = 16 * 1024 * 1024 diff --git a/vendor/bazil.org/fuse/fuse_freebsd.go b/vendor/bazil.org/fuse/fuse_freebsd.go new file mode 100644 index 000000000..4aa83a0d4 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_freebsd.go @@ -0,0 +1,6 @@ +package fuse + +// Maximum file write size we are prepared to receive from the kernel. +// +// This number is just a guess. +const maxWrite = 128 * 1024 diff --git a/vendor/bazil.org/fuse/fuse_kernel.go b/vendor/bazil.org/fuse/fuse_kernel.go new file mode 100644 index 000000000..87c5ca1dc --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel.go @@ -0,0 +1,774 @@ +// See the file LICENSE for copyright and licensing information. + +// Derived from FUSE's fuse_kernel.h, which carries this notice: +/* + This file defines the kernel interface of FUSE + Copyright (C) 2001-2007 Miklos Szeredi + + + This -- and only this -- header file may also be distributed under + the terms of the BSD Licence as follows: + + Copyright (C) 2001-2007 Miklos Szeredi. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. +*/ + +package fuse + +import ( + "fmt" + "syscall" + "unsafe" +) + +// The FUSE version implemented by the package. +const ( + protoVersionMinMajor = 7 + protoVersionMinMinor = 8 + protoVersionMaxMajor = 7 + protoVersionMaxMinor = 12 +) + +const ( + rootID = 1 +) + +type kstatfs struct { + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Bsize uint32 + Namelen uint32 + Frsize uint32 + _ uint32 + Spare [6]uint32 +} + +type fileLock struct { + Start uint64 + End uint64 + Type uint32 + Pid uint32 +} + +// GetattrFlags are bit flags that can be seen in GetattrRequest. +type GetattrFlags uint32 + +const ( + // Indicates the handle is valid. + GetattrFh GetattrFlags = 1 << 0 +) + +var getattrFlagsNames = []flagName{ + {uint32(GetattrFh), "GetattrFh"}, +} + +func (fl GetattrFlags) String() string { + return flagString(uint32(fl), getattrFlagsNames) +} + +// The SetattrValid are bit flags describing which fields in the SetattrRequest +// are included in the change. +type SetattrValid uint32 + +const ( + SetattrMode SetattrValid = 1 << 0 + SetattrUid SetattrValid = 1 << 1 + SetattrGid SetattrValid = 1 << 2 + SetattrSize SetattrValid = 1 << 3 + SetattrAtime SetattrValid = 1 << 4 + SetattrMtime SetattrValid = 1 << 5 + SetattrHandle SetattrValid = 1 << 6 + + // Linux only(?) + SetattrAtimeNow SetattrValid = 1 << 7 + SetattrMtimeNow SetattrValid = 1 << 8 + SetattrLockOwner SetattrValid = 1 << 9 // http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html + + // OS X only + SetattrCrtime SetattrValid = 1 << 28 + SetattrChgtime SetattrValid = 1 << 29 + SetattrBkuptime SetattrValid = 1 << 30 + SetattrFlags SetattrValid = 1 << 31 +) + +func (fl SetattrValid) Mode() bool { return fl&SetattrMode != 0 } +func (fl SetattrValid) Uid() bool { return fl&SetattrUid != 0 } +func (fl SetattrValid) Gid() bool { return fl&SetattrGid != 0 } +func (fl SetattrValid) Size() bool { return fl&SetattrSize != 0 } +func (fl SetattrValid) Atime() bool { return fl&SetattrAtime != 0 } +func (fl SetattrValid) Mtime() bool { return fl&SetattrMtime != 0 } +func (fl SetattrValid) Handle() bool { return fl&SetattrHandle != 0 } +func (fl SetattrValid) AtimeNow() bool { return fl&SetattrAtimeNow != 0 } +func (fl SetattrValid) MtimeNow() bool { return fl&SetattrMtimeNow != 0 } +func (fl SetattrValid) LockOwner() bool { return fl&SetattrLockOwner != 0 } +func (fl SetattrValid) Crtime() bool { return fl&SetattrCrtime != 0 } +func (fl SetattrValid) Chgtime() bool { return fl&SetattrChgtime != 0 } +func (fl SetattrValid) Bkuptime() bool { return fl&SetattrBkuptime != 0 } +func (fl SetattrValid) Flags() bool { return fl&SetattrFlags != 0 } + +func (fl SetattrValid) String() string { + return flagString(uint32(fl), setattrValidNames) +} + +var setattrValidNames = []flagName{ + {uint32(SetattrMode), "SetattrMode"}, + {uint32(SetattrUid), "SetattrUid"}, + {uint32(SetattrGid), "SetattrGid"}, + {uint32(SetattrSize), "SetattrSize"}, + {uint32(SetattrAtime), "SetattrAtime"}, + {uint32(SetattrMtime), "SetattrMtime"}, + {uint32(SetattrHandle), "SetattrHandle"}, + {uint32(SetattrAtimeNow), "SetattrAtimeNow"}, + {uint32(SetattrMtimeNow), "SetattrMtimeNow"}, + {uint32(SetattrLockOwner), "SetattrLockOwner"}, + {uint32(SetattrCrtime), "SetattrCrtime"}, + {uint32(SetattrChgtime), "SetattrChgtime"}, + {uint32(SetattrBkuptime), "SetattrBkuptime"}, + {uint32(SetattrFlags), "SetattrFlags"}, +} + +// Flags that can be seen in OpenRequest.Flags. +const ( + // Access modes. These are not 1-bit flags, but alternatives where + // only one can be chosen. See the IsReadOnly etc convenience + // methods. + OpenReadOnly OpenFlags = syscall.O_RDONLY + OpenWriteOnly OpenFlags = syscall.O_WRONLY + OpenReadWrite OpenFlags = syscall.O_RDWR + + // File was opened in append-only mode, all writes will go to end + // of file. OS X does not provide this information. + OpenAppend OpenFlags = syscall.O_APPEND + OpenCreate OpenFlags = syscall.O_CREAT + OpenDirectory OpenFlags = syscall.O_DIRECTORY + OpenExclusive OpenFlags = syscall.O_EXCL + OpenNonblock OpenFlags = syscall.O_NONBLOCK + OpenSync OpenFlags = syscall.O_SYNC + OpenTruncate OpenFlags = syscall.O_TRUNC +) + +// OpenAccessModeMask is a bitmask that separates the access mode +// from the other flags in OpenFlags. +const OpenAccessModeMask OpenFlags = syscall.O_ACCMODE + +// OpenFlags are the O_FOO flags passed to open/create/etc calls. For +// example, os.O_WRONLY | os.O_APPEND. +type OpenFlags uint32 + +func (fl OpenFlags) String() string { + // O_RDONLY, O_RWONLY, O_RDWR are not flags + s := accModeName(fl & OpenAccessModeMask) + flags := uint32(fl &^ OpenAccessModeMask) + if flags != 0 { + s = s + "+" + flagString(flags, openFlagNames) + } + return s +} + +// Return true if OpenReadOnly is set. +func (fl OpenFlags) IsReadOnly() bool { + return fl&OpenAccessModeMask == OpenReadOnly +} + +// Return true if OpenWriteOnly is set. +func (fl OpenFlags) IsWriteOnly() bool { + return fl&OpenAccessModeMask == OpenWriteOnly +} + +// Return true if OpenReadWrite is set. +func (fl OpenFlags) IsReadWrite() bool { + return fl&OpenAccessModeMask == OpenReadWrite +} + +func accModeName(flags OpenFlags) string { + switch flags { + case OpenReadOnly: + return "OpenReadOnly" + case OpenWriteOnly: + return "OpenWriteOnly" + case OpenReadWrite: + return "OpenReadWrite" + default: + return "" + } +} + +var openFlagNames = []flagName{ + {uint32(OpenAppend), "OpenAppend"}, + {uint32(OpenCreate), "OpenCreate"}, + {uint32(OpenDirectory), "OpenDirectory"}, + {uint32(OpenExclusive), "OpenExclusive"}, + {uint32(OpenNonblock), "OpenNonblock"}, + {uint32(OpenSync), "OpenSync"}, + {uint32(OpenTruncate), "OpenTruncate"}, +} + +// The OpenResponseFlags are returned in the OpenResponse. +type OpenResponseFlags uint32 + +const ( + OpenDirectIO OpenResponseFlags = 1 << 0 // bypass page cache for this open file + OpenKeepCache OpenResponseFlags = 1 << 1 // don't invalidate the data cache on open + OpenNonSeekable OpenResponseFlags = 1 << 2 // mark the file as non-seekable (not supported on OS X) + + OpenPurgeAttr OpenResponseFlags = 1 << 30 // OS X + OpenPurgeUBC OpenResponseFlags = 1 << 31 // OS X +) + +func (fl OpenResponseFlags) String() string { + return flagString(uint32(fl), openResponseFlagNames) +} + +var openResponseFlagNames = []flagName{ + {uint32(OpenDirectIO), "OpenDirectIO"}, + {uint32(OpenKeepCache), "OpenKeepCache"}, + {uint32(OpenNonSeekable), "OpenNonSeekable"}, + {uint32(OpenPurgeAttr), "OpenPurgeAttr"}, + {uint32(OpenPurgeUBC), "OpenPurgeUBC"}, +} + +// The InitFlags are used in the Init exchange. +type InitFlags uint32 + +const ( + InitAsyncRead InitFlags = 1 << 0 + InitPosixLocks InitFlags = 1 << 1 + InitFileOps InitFlags = 1 << 2 + InitAtomicTrunc InitFlags = 1 << 3 + InitExportSupport InitFlags = 1 << 4 + InitBigWrites InitFlags = 1 << 5 + // Do not mask file access modes with umask. Not supported on OS X. + InitDontMask InitFlags = 1 << 6 + InitSpliceWrite InitFlags = 1 << 7 + InitSpliceMove InitFlags = 1 << 8 + InitSpliceRead InitFlags = 1 << 9 + InitFlockLocks InitFlags = 1 << 10 + InitHasIoctlDir InitFlags = 1 << 11 + InitAutoInvalData InitFlags = 1 << 12 + InitDoReaddirplus InitFlags = 1 << 13 + InitReaddirplusAuto InitFlags = 1 << 14 + InitAsyncDIO InitFlags = 1 << 15 + InitWritebackCache InitFlags = 1 << 16 + InitNoOpenSupport InitFlags = 1 << 17 + + InitCaseSensitive InitFlags = 1 << 29 // OS X only + InitVolRename InitFlags = 1 << 30 // OS X only + InitXtimes InitFlags = 1 << 31 // OS X only +) + +type flagName struct { + bit uint32 + name string +} + +var initFlagNames = []flagName{ + {uint32(InitAsyncRead), "InitAsyncRead"}, + {uint32(InitPosixLocks), "InitPosixLocks"}, + {uint32(InitFileOps), "InitFileOps"}, + {uint32(InitAtomicTrunc), "InitAtomicTrunc"}, + {uint32(InitExportSupport), "InitExportSupport"}, + {uint32(InitBigWrites), "InitBigWrites"}, + {uint32(InitDontMask), "InitDontMask"}, + {uint32(InitSpliceWrite), "InitSpliceWrite"}, + {uint32(InitSpliceMove), "InitSpliceMove"}, + {uint32(InitSpliceRead), "InitSpliceRead"}, + {uint32(InitFlockLocks), "InitFlockLocks"}, + {uint32(InitHasIoctlDir), "InitHasIoctlDir"}, + {uint32(InitAutoInvalData), "InitAutoInvalData"}, + {uint32(InitDoReaddirplus), "InitDoReaddirplus"}, + {uint32(InitReaddirplusAuto), "InitReaddirplusAuto"}, + {uint32(InitAsyncDIO), "InitAsyncDIO"}, + {uint32(InitWritebackCache), "InitWritebackCache"}, + {uint32(InitNoOpenSupport), "InitNoOpenSupport"}, + + {uint32(InitCaseSensitive), "InitCaseSensitive"}, + {uint32(InitVolRename), "InitVolRename"}, + {uint32(InitXtimes), "InitXtimes"}, +} + +func (fl InitFlags) String() string { + return flagString(uint32(fl), initFlagNames) +} + +func flagString(f uint32, names []flagName) string { + var s string + + if f == 0 { + return "0" + } + + for _, n := range names { + if f&n.bit != 0 { + s += "+" + n.name + f &^= n.bit + } + } + if f != 0 { + s += fmt.Sprintf("%+#x", f) + } + return s[1:] +} + +// The ReleaseFlags are used in the Release exchange. +type ReleaseFlags uint32 + +const ( + ReleaseFlush ReleaseFlags = 1 << 0 +) + +func (fl ReleaseFlags) String() string { + return flagString(uint32(fl), releaseFlagNames) +} + +var releaseFlagNames = []flagName{ + {uint32(ReleaseFlush), "ReleaseFlush"}, +} + +// Opcodes +const ( + opLookup = 1 + opForget = 2 // no reply + opGetattr = 3 + opSetattr = 4 + opReadlink = 5 + opSymlink = 6 + opMknod = 8 + opMkdir = 9 + opUnlink = 10 + opRmdir = 11 + opRename = 12 + opLink = 13 + opOpen = 14 + opRead = 15 + opWrite = 16 + opStatfs = 17 + opRelease = 18 + opFsync = 20 + opSetxattr = 21 + opGetxattr = 22 + opListxattr = 23 + opRemovexattr = 24 + opFlush = 25 + opInit = 26 + opOpendir = 27 + opReaddir = 28 + opReleasedir = 29 + opFsyncdir = 30 + opGetlk = 31 + opSetlk = 32 + opSetlkw = 33 + opAccess = 34 + opCreate = 35 + opInterrupt = 36 + opBmap = 37 + opDestroy = 38 + opIoctl = 39 // Linux? + opPoll = 40 // Linux? + + // OS X + opSetvolname = 61 + opGetxtimes = 62 + opExchange = 63 +) + +type entryOut struct { + Nodeid uint64 // Inode ID + Generation uint64 // Inode generation + EntryValid uint64 // Cache timeout for the name + AttrValid uint64 // Cache timeout for the attributes + EntryValidNsec uint32 + AttrValidNsec uint32 + Attr attr +} + +func entryOutSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(entryOut{}.Attr) + unsafe.Offsetof(entryOut{}.Attr.Blksize) + default: + return unsafe.Sizeof(entryOut{}) + } +} + +type forgetIn struct { + Nlookup uint64 +} + +type getattrIn struct { + GetattrFlags uint32 + _ uint32 + Fh uint64 +} + +type attrOut struct { + AttrValid uint64 // Cache timeout for the attributes + AttrValidNsec uint32 + _ uint32 + Attr attr +} + +func attrOutSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(attrOut{}.Attr) + unsafe.Offsetof(attrOut{}.Attr.Blksize) + default: + return unsafe.Sizeof(attrOut{}) + } +} + +// OS X +type getxtimesOut struct { + Bkuptime uint64 + Crtime uint64 + BkuptimeNsec uint32 + CrtimeNsec uint32 +} + +type mknodIn struct { + Mode uint32 + Rdev uint32 + Umask uint32 + _ uint32 + // "filename\x00" follows. +} + +func mknodInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 12}): + return unsafe.Offsetof(mknodIn{}.Umask) + default: + return unsafe.Sizeof(mknodIn{}) + } +} + +type mkdirIn struct { + Mode uint32 + Umask uint32 + // filename follows +} + +func mkdirInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 12}): + return unsafe.Offsetof(mkdirIn{}.Umask) + 4 + default: + return unsafe.Sizeof(mkdirIn{}) + } +} + +type renameIn struct { + Newdir uint64 + // "oldname\x00newname\x00" follows +} + +// OS X +type exchangeIn struct { + Olddir uint64 + Newdir uint64 + Options uint64 + // "oldname\x00newname\x00" follows +} + +type linkIn struct { + Oldnodeid uint64 +} + +type setattrInCommon struct { + Valid uint32 + _ uint32 + Fh uint64 + Size uint64 + LockOwner uint64 // unused on OS X? + Atime uint64 + Mtime uint64 + Unused2 uint64 + AtimeNsec uint32 + MtimeNsec uint32 + Unused3 uint32 + Mode uint32 + Unused4 uint32 + Uid uint32 + Gid uint32 + Unused5 uint32 +} + +type openIn struct { + Flags uint32 + Unused uint32 +} + +type openOut struct { + Fh uint64 + OpenFlags uint32 + _ uint32 +} + +type createIn struct { + Flags uint32 + Mode uint32 + Umask uint32 + _ uint32 +} + +func createInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 12}): + return unsafe.Offsetof(createIn{}.Umask) + default: + return unsafe.Sizeof(createIn{}) + } +} + +type releaseIn struct { + Fh uint64 + Flags uint32 + ReleaseFlags uint32 + LockOwner uint32 +} + +type flushIn struct { + Fh uint64 + FlushFlags uint32 + _ uint32 + LockOwner uint64 +} + +type readIn struct { + Fh uint64 + Offset uint64 + Size uint32 + ReadFlags uint32 + LockOwner uint64 + Flags uint32 + _ uint32 +} + +func readInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(readIn{}.ReadFlags) + 4 + default: + return unsafe.Sizeof(readIn{}) + } +} + +// The ReadFlags are passed in ReadRequest. +type ReadFlags uint32 + +const ( + // LockOwner field is valid. + ReadLockOwner ReadFlags = 1 << 1 +) + +var readFlagNames = []flagName{ + {uint32(ReadLockOwner), "ReadLockOwner"}, +} + +func (fl ReadFlags) String() string { + return flagString(uint32(fl), readFlagNames) +} + +type writeIn struct { + Fh uint64 + Offset uint64 + Size uint32 + WriteFlags uint32 + LockOwner uint64 + Flags uint32 + _ uint32 +} + +func writeInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(writeIn{}.LockOwner) + default: + return unsafe.Sizeof(writeIn{}) + } +} + +type writeOut struct { + Size uint32 + _ uint32 +} + +// The WriteFlags are passed in WriteRequest. +type WriteFlags uint32 + +const ( + WriteCache WriteFlags = 1 << 0 + // LockOwner field is valid. + WriteLockOwner WriteFlags = 1 << 1 +) + +var writeFlagNames = []flagName{ + {uint32(WriteCache), "WriteCache"}, + {uint32(WriteLockOwner), "WriteLockOwner"}, +} + +func (fl WriteFlags) String() string { + return flagString(uint32(fl), writeFlagNames) +} + +const compatStatfsSize = 48 + +type statfsOut struct { + St kstatfs +} + +type fsyncIn struct { + Fh uint64 + FsyncFlags uint32 + _ uint32 +} + +type setxattrInCommon struct { + Size uint32 + Flags uint32 +} + +func (setxattrInCommon) position() uint32 { + return 0 +} + +type getxattrInCommon struct { + Size uint32 + _ uint32 +} + +func (getxattrInCommon) position() uint32 { + return 0 +} + +type getxattrOut struct { + Size uint32 + _ uint32 +} + +type lkIn struct { + Fh uint64 + Owner uint64 + Lk fileLock + LkFlags uint32 + _ uint32 +} + +func lkInSize(p Protocol) uintptr { + switch { + case p.LT(Protocol{7, 9}): + return unsafe.Offsetof(lkIn{}.LkFlags) + default: + return unsafe.Sizeof(lkIn{}) + } +} + +type lkOut struct { + Lk fileLock +} + +type accessIn struct { + Mask uint32 + _ uint32 +} + +type initIn struct { + Major uint32 + Minor uint32 + MaxReadahead uint32 + Flags uint32 +} + +const initInSize = int(unsafe.Sizeof(initIn{})) + +type initOut struct { + Major uint32 + Minor uint32 + MaxReadahead uint32 + Flags uint32 + Unused uint32 + MaxWrite uint32 +} + +type interruptIn struct { + Unique uint64 +} + +type bmapIn struct { + Block uint64 + BlockSize uint32 + _ uint32 +} + +type bmapOut struct { + Block uint64 +} + +type inHeader struct { + Len uint32 + Opcode uint32 + Unique uint64 + Nodeid uint64 + Uid uint32 + Gid uint32 + Pid uint32 + _ uint32 +} + +const inHeaderSize = int(unsafe.Sizeof(inHeader{})) + +type outHeader struct { + Len uint32 + Error int32 + Unique uint64 +} + +type dirent struct { + Ino uint64 + Off uint64 + Namelen uint32 + Type uint32 + Name [0]byte +} + +const direntSize = 8 + 8 + 4 + 4 + +const ( + notifyCodePoll int32 = 1 + notifyCodeInvalInode int32 = 2 + notifyCodeInvalEntry int32 = 3 +) + +type notifyInvalInodeOut struct { + Ino uint64 + Off int64 + Len int64 +} + +type notifyInvalEntryOut struct { + Parent uint64 + Namelen uint32 + _ uint32 +} diff --git a/vendor/bazil.org/fuse/fuse_kernel_darwin.go b/vendor/bazil.org/fuse/fuse_kernel_darwin.go new file mode 100644 index 000000000..b9873fdf3 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel_darwin.go @@ -0,0 +1,88 @@ +package fuse + +import ( + "time" +) + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + Crtime_ uint64 // OS X only + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + CrtimeNsec uint32 // OS X only + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Flags_ uint32 // OS X only; see chflags(2) + Blksize uint32 + padding uint32 +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + a.Crtime_, a.CrtimeNsec = s, ns +} + +func (a *attr) SetFlags(f uint32) { + a.Flags_ = f +} + +type setattrIn struct { + setattrInCommon + + // OS X only + Bkuptime_ uint64 + Chgtime_ uint64 + Crtime uint64 + BkuptimeNsec uint32 + ChgtimeNsec uint32 + CrtimeNsec uint32 + Flags_ uint32 // see chflags(2) +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Unix(int64(in.Bkuptime_), int64(in.BkuptimeNsec)) +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Unix(int64(in.Chgtime_), int64(in.ChgtimeNsec)) +} + +func (in *setattrIn) Flags() uint32 { + return in.Flags_ +} + +func openFlags(flags uint32) OpenFlags { + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon + + // OS X only + Position uint32 + Padding uint32 +} + +func (g *getxattrIn) position() uint32 { + return g.Position +} + +type setxattrIn struct { + setxattrInCommon + + // OS X only + Position uint32 + Padding uint32 +} + +func (s *setxattrIn) position() uint32 { + return s.Position +} diff --git a/vendor/bazil.org/fuse/fuse_kernel_freebsd.go b/vendor/bazil.org/fuse/fuse_kernel_freebsd.go new file mode 100644 index 000000000..b1141e41d --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel_freebsd.go @@ -0,0 +1,62 @@ +package fuse + +import "time" + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Blksize uint32 + padding uint32 +} + +func (a *attr) Crtime() time.Time { + return time.Time{} +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + // ignored on freebsd +} + +func (a *attr) SetFlags(f uint32) { + // ignored on freebsd +} + +type setattrIn struct { + setattrInCommon +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Flags() uint32 { + return 0 +} + +func openFlags(flags uint32) OpenFlags { + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon +} + +type setxattrIn struct { + setxattrInCommon +} diff --git a/vendor/bazil.org/fuse/fuse_kernel_linux.go b/vendor/bazil.org/fuse/fuse_kernel_linux.go new file mode 100644 index 000000000..d3ba86617 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel_linux.go @@ -0,0 +1,70 @@ +package fuse + +import "time" + +type attr struct { + Ino uint64 + Size uint64 + Blocks uint64 + Atime uint64 + Mtime uint64 + Ctime uint64 + AtimeNsec uint32 + MtimeNsec uint32 + CtimeNsec uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint32 + Blksize uint32 + padding uint32 +} + +func (a *attr) Crtime() time.Time { + return time.Time{} +} + +func (a *attr) SetCrtime(s uint64, ns uint32) { + // Ignored on Linux. +} + +func (a *attr) SetFlags(f uint32) { + // Ignored on Linux. +} + +type setattrIn struct { + setattrInCommon +} + +func (in *setattrIn) BkupTime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Chgtime() time.Time { + return time.Time{} +} + +func (in *setattrIn) Flags() uint32 { + return 0 +} + +func openFlags(flags uint32) OpenFlags { + // on amd64, the 32-bit O_LARGEFILE flag is always seen; + // on i386, the flag probably depends on the app + // requesting, but in any case should be utterly + // uninteresting to us here; our kernel protocol messages + // are not directly related to the client app's kernel + // API/ABI + flags &^= 0x8000 + + return OpenFlags(flags) +} + +type getxattrIn struct { + getxattrInCommon +} + +type setxattrIn struct { + setxattrInCommon +} diff --git a/vendor/bazil.org/fuse/fuse_kernel_std.go b/vendor/bazil.org/fuse/fuse_kernel_std.go new file mode 100644 index 000000000..074cfd322 --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_kernel_std.go @@ -0,0 +1 @@ +package fuse diff --git a/vendor/bazil.org/fuse/fuse_linux.go b/vendor/bazil.org/fuse/fuse_linux.go new file mode 100644 index 000000000..5fb96f9ae --- /dev/null +++ b/vendor/bazil.org/fuse/fuse_linux.go @@ -0,0 +1,7 @@ +package fuse + +// Maximum file write size we are prepared to receive from the kernel. +// +// Linux 4.2.0 has been observed to cap this value at 128kB +// (FUSE_MAX_PAGES_PER_REQ=32, 4kB pages). +const maxWrite = 128 * 1024 diff --git a/vendor/bazil.org/fuse/fuseutil/fuseutil.go b/vendor/bazil.org/fuse/fuseutil/fuseutil.go new file mode 100644 index 000000000..b3f52b73b --- /dev/null +++ b/vendor/bazil.org/fuse/fuseutil/fuseutil.go @@ -0,0 +1,20 @@ +package fuseutil // import "bazil.org/fuse/fuseutil" + +import ( + "bazil.org/fuse" +) + +// HandleRead handles a read request assuming that data is the entire file content. +// It adjusts the amount returned in resp according to req.Offset and req.Size. +func HandleRead(req *fuse.ReadRequest, resp *fuse.ReadResponse, data []byte) { + if req.Offset >= int64(len(data)) { + data = nil + } else { + data = data[req.Offset:] + } + if len(data) > req.Size { + data = data[:req.Size] + } + n := copy(resp.Data[:req.Size], data) + resp.Data = resp.Data[:n] +} diff --git a/vendor/bazil.org/fuse/mount.go b/vendor/bazil.org/fuse/mount.go new file mode 100644 index 000000000..8054e9021 --- /dev/null +++ b/vendor/bazil.org/fuse/mount.go @@ -0,0 +1,38 @@ +package fuse + +import ( + "bufio" + "errors" + "io" + "log" + "sync" +) + +var ( + // ErrOSXFUSENotFound is returned from Mount when the OSXFUSE + // installation is not detected. + // + // Only happens on OS X. Make sure OSXFUSE is installed, or see + // OSXFUSELocations for customization. + ErrOSXFUSENotFound = errors.New("cannot locate OSXFUSE") +) + +func neverIgnoreLine(line string) bool { + return false +} + +func lineLogger(wg *sync.WaitGroup, prefix string, ignore func(line string) bool, r io.ReadCloser) { + defer wg.Done() + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + if ignore(line) { + continue + } + log.Printf("%s: %s", prefix, line) + } + if err := scanner.Err(); err != nil { + log.Printf("%s, error reading: %v", prefix, err) + } +} diff --git a/vendor/bazil.org/fuse/mount_darwin.go b/vendor/bazil.org/fuse/mount_darwin.go new file mode 100644 index 000000000..c1c36e62b --- /dev/null +++ b/vendor/bazil.org/fuse/mount_darwin.go @@ -0,0 +1,208 @@ +package fuse + +import ( + "errors" + "fmt" + "log" + "os" + "os/exec" + "path" + "strconv" + "strings" + "sync" + "syscall" +) + +var ( + errNoAvail = errors.New("no available fuse devices") + errNotLoaded = errors.New("osxfuse is not loaded") +) + +func loadOSXFUSE(bin string) error { + cmd := exec.Command(bin) + cmd.Dir = "/" + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + return err +} + +func openOSXFUSEDev(devPrefix string) (*os.File, error) { + var f *os.File + var err error + for i := uint64(0); ; i++ { + path := devPrefix + strconv.FormatUint(i, 10) + f, err = os.OpenFile(path, os.O_RDWR, 0000) + if os.IsNotExist(err) { + if i == 0 { + // not even the first device was found -> fuse is not loaded + return nil, errNotLoaded + } + + // we've run out of kernel-provided devices + return nil, errNoAvail + } + + if err2, ok := err.(*os.PathError); ok && err2.Err == syscall.EBUSY { + // try the next one + continue + } + + if err != nil { + return nil, err + } + return f, nil + } +} + +func handleMountOSXFUSE(helperName string, errCh chan<- error) func(line string) (ignore bool) { + var noMountpointPrefix = helperName + `: ` + const noMountpointSuffix = `: No such file or directory` + return func(line string) (ignore bool) { + if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { + // re-extract it from the error message in case some layer + // changed the path + mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] + err := &MountpointDoesNotExistError{ + Path: mountpoint, + } + select { + case errCh <- err: + return true + default: + // not the first error; fall back to logging it + return false + } + } + + return false + } +} + +// isBoringMountOSXFUSEError returns whether the Wait error is +// uninteresting; exit status 64 is. +func isBoringMountOSXFUSEError(err error) bool { + if err, ok := err.(*exec.ExitError); ok && err.Exited() { + if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 64 { + return true + } + } + return false +} + +func callMount(bin string, daemonVar string, dir string, conf *mountConfig, f *os.File, ready chan<- struct{}, errp *error) error { + for k, v := range conf.options { + if strings.Contains(k, ",") || strings.Contains(v, ",") { + // Silly limitation but the mount helper does not + // understand any escaping. See TestMountOptionCommaError. + return fmt.Errorf("mount options cannot contain commas on darwin: %q=%q", k, v) + } + } + cmd := exec.Command( + bin, + "-o", conf.getOptions(), + // Tell osxfuse-kext how large our buffer is. It must split + // writes larger than this into multiple writes. + // + // OSXFUSE seems to ignore InitResponse.MaxWrite, and uses + // this instead. + "-o", "iosize="+strconv.FormatUint(maxWrite, 10), + // refers to fd passed in cmd.ExtraFiles + "3", + dir, + ) + cmd.ExtraFiles = []*os.File{f} + cmd.Env = os.Environ() + // OSXFUSE <3.3.0 + cmd.Env = append(cmd.Env, "MOUNT_FUSEFS_CALL_BY_LIB=") + // OSXFUSE >=3.3.0 + cmd.Env = append(cmd.Env, "MOUNT_OSXFUSE_CALL_BY_LIB=") + + daemon := os.Args[0] + if daemonVar != "" { + cmd.Env = append(cmd.Env, daemonVar+"="+daemon) + } + + stdout, err := cmd.StdoutPipe() + if err != nil { + return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + return fmt.Errorf("setting up mount_osxfusefs stderr: %v", err) + } + + if err := cmd.Start(); err != nil { + return fmt.Errorf("mount_osxfusefs: %v", err) + } + helperErrCh := make(chan error, 1) + go func() { + var wg sync.WaitGroup + wg.Add(2) + go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) + helperName := path.Base(bin) + go lineLogger(&wg, "mount helper error", handleMountOSXFUSE(helperName, helperErrCh), stderr) + wg.Wait() + if err := cmd.Wait(); err != nil { + // see if we have a better error to report + select { + case helperErr := <-helperErrCh: + // log the Wait error if it's not what we expected + if !isBoringMountOSXFUSEError(err) { + log.Printf("mount helper failed: %v", err) + } + // and now return what we grabbed from stderr as the real + // error + *errp = helperErr + close(ready) + return + default: + // nope, fall back to generic message + } + + *errp = fmt.Errorf("mount_osxfusefs: %v", err) + close(ready) + return + } + + *errp = nil + close(ready) + }() + return nil +} + +func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) { + locations := conf.osxfuseLocations + if locations == nil { + locations = []OSXFUSEPaths{ + OSXFUSELocationV3, + OSXFUSELocationV2, + } + } + for _, loc := range locations { + if _, err := os.Stat(loc.Mount); os.IsNotExist(err) { + // try the other locations + continue + } + + f, err := openOSXFUSEDev(loc.DevicePrefix) + if err == errNotLoaded { + err = loadOSXFUSE(loc.Load) + if err != nil { + return nil, err + } + // try again + f, err = openOSXFUSEDev(loc.DevicePrefix) + } + if err != nil { + return nil, err + } + err = callMount(loc.Mount, loc.DaemonVar, dir, conf, f, ready, errp) + if err != nil { + f.Close() + return nil, err + } + return f, nil + } + return nil, ErrOSXFUSENotFound +} diff --git a/vendor/bazil.org/fuse/mount_freebsd.go b/vendor/bazil.org/fuse/mount_freebsd.go new file mode 100644 index 000000000..70bb41024 --- /dev/null +++ b/vendor/bazil.org/fuse/mount_freebsd.go @@ -0,0 +1,111 @@ +package fuse + +import ( + "fmt" + "log" + "os" + "os/exec" + "strings" + "sync" + "syscall" +) + +func handleMountFusefsStderr(errCh chan<- error) func(line string) (ignore bool) { + return func(line string) (ignore bool) { + const ( + noMountpointPrefix = `mount_fusefs: ` + noMountpointSuffix = `: No such file or directory` + ) + if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { + // re-extract it from the error message in case some layer + // changed the path + mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] + err := &MountpointDoesNotExistError{ + Path: mountpoint, + } + select { + case errCh <- err: + return true + default: + // not the first error; fall back to logging it + return false + } + } + + return false + } +} + +// isBoringMountFusefsError returns whether the Wait error is +// uninteresting; exit status 1 is. +func isBoringMountFusefsError(err error) bool { + if err, ok := err.(*exec.ExitError); ok && err.Exited() { + if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 { + return true + } + } + return false +} + +func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (*os.File, error) { + for k, v := range conf.options { + if strings.Contains(k, ",") || strings.Contains(v, ",") { + // Silly limitation but the mount helper does not + // understand any escaping. See TestMountOptionCommaError. + return nil, fmt.Errorf("mount options cannot contain commas on FreeBSD: %q=%q", k, v) + } + } + + f, err := os.OpenFile("/dev/fuse", os.O_RDWR, 0000) + if err != nil { + *errp = err + return nil, err + } + + cmd := exec.Command( + "/sbin/mount_fusefs", + "--safe", + "-o", conf.getOptions(), + "3", + dir, + ) + cmd.ExtraFiles = []*os.File{f} + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("setting up mount_fusefs stderr: %v", err) + } + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("mount_fusefs: %v", err) + } + helperErrCh := make(chan error, 1) + var wg sync.WaitGroup + wg.Add(2) + go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) + go lineLogger(&wg, "mount helper error", handleMountFusefsStderr(helperErrCh), stderr) + wg.Wait() + if err := cmd.Wait(); err != nil { + // see if we have a better error to report + select { + case helperErr := <-helperErrCh: + // log the Wait error if it's not what we expected + if !isBoringMountFusefsError(err) { + log.Printf("mount helper failed: %v", err) + } + // and now return what we grabbed from stderr as the real + // error + return nil, helperErr + default: + // nope, fall back to generic message + } + return nil, fmt.Errorf("mount_fusefs: %v", err) + } + + close(ready) + return f, nil +} diff --git a/vendor/bazil.org/fuse/mount_linux.go b/vendor/bazil.org/fuse/mount_linux.go new file mode 100644 index 000000000..197d1044e --- /dev/null +++ b/vendor/bazil.org/fuse/mount_linux.go @@ -0,0 +1,150 @@ +package fuse + +import ( + "fmt" + "log" + "net" + "os" + "os/exec" + "strings" + "sync" + "syscall" +) + +func handleFusermountStderr(errCh chan<- error) func(line string) (ignore bool) { + return func(line string) (ignore bool) { + if line == `fusermount: failed to open /etc/fuse.conf: Permission denied` { + // Silence this particular message, it occurs way too + // commonly and isn't very relevant to whether the mount + // succeeds or not. + return true + } + + const ( + noMountpointPrefix = `fusermount: failed to access mountpoint ` + noMountpointSuffix = `: No such file or directory` + ) + if strings.HasPrefix(line, noMountpointPrefix) && strings.HasSuffix(line, noMountpointSuffix) { + // re-extract it from the error message in case some layer + // changed the path + mountpoint := line[len(noMountpointPrefix) : len(line)-len(noMountpointSuffix)] + err := &MountpointDoesNotExistError{ + Path: mountpoint, + } + select { + case errCh <- err: + return true + default: + // not the first error; fall back to logging it + return false + } + } + + return false + } +} + +// isBoringFusermountError returns whether the Wait error is +// uninteresting; exit status 1 is. +func isBoringFusermountError(err error) bool { + if err, ok := err.(*exec.ExitError); ok && err.Exited() { + if status, ok := err.Sys().(syscall.WaitStatus); ok && status.ExitStatus() == 1 { + return true + } + } + return false +} + +func mount(dir string, conf *mountConfig, ready chan<- struct{}, errp *error) (fusefd *os.File, err error) { + // linux mount is never delayed + close(ready) + + fds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0) + if err != nil { + return nil, fmt.Errorf("socketpair error: %v", err) + } + + writeFile := os.NewFile(uintptr(fds[0]), "fusermount-child-writes") + defer writeFile.Close() + + readFile := os.NewFile(uintptr(fds[1]), "fusermount-parent-reads") + defer readFile.Close() + + cmd := exec.Command( + "fusermount", + "-o", conf.getOptions(), + "--", + dir, + ) + cmd.Env = append(os.Environ(), "_FUSE_COMMFD=3") + + cmd.ExtraFiles = []*os.File{writeFile} + + var wg sync.WaitGroup + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, fmt.Errorf("setting up fusermount stderr: %v", err) + } + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("setting up fusermount stderr: %v", err) + } + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("fusermount: %v", err) + } + helperErrCh := make(chan error, 1) + wg.Add(2) + go lineLogger(&wg, "mount helper output", neverIgnoreLine, stdout) + go lineLogger(&wg, "mount helper error", handleFusermountStderr(helperErrCh), stderr) + wg.Wait() + if err := cmd.Wait(); err != nil { + // see if we have a better error to report + select { + case helperErr := <-helperErrCh: + // log the Wait error if it's not what we expected + if !isBoringFusermountError(err) { + log.Printf("mount helper failed: %v", err) + } + // and now return what we grabbed from stderr as the real + // error + return nil, helperErr + default: + // nope, fall back to generic message + } + + return nil, fmt.Errorf("fusermount: %v", err) + } + + c, err := net.FileConn(readFile) + if err != nil { + return nil, fmt.Errorf("FileConn from fusermount socket: %v", err) + } + defer c.Close() + + uc, ok := c.(*net.UnixConn) + if !ok { + return nil, fmt.Errorf("unexpected FileConn type; expected UnixConn, got %T", c) + } + + buf := make([]byte, 32) // expect 1 byte + oob := make([]byte, 32) // expect 24 bytes + _, oobn, _, _, err := uc.ReadMsgUnix(buf, oob) + scms, err := syscall.ParseSocketControlMessage(oob[:oobn]) + if err != nil { + return nil, fmt.Errorf("ParseSocketControlMessage: %v", err) + } + if len(scms) != 1 { + return nil, fmt.Errorf("expected 1 SocketControlMessage; got scms = %#v", scms) + } + scm := scms[0] + gotFds, err := syscall.ParseUnixRights(&scm) + if err != nil { + return nil, fmt.Errorf("syscall.ParseUnixRights: %v", err) + } + if len(gotFds) != 1 { + return nil, fmt.Errorf("wanted 1 fd; got %#v", gotFds) + } + f := os.NewFile(uintptr(gotFds[0]), "/dev/fuse") + return f, nil +} diff --git a/vendor/bazil.org/fuse/options.go b/vendor/bazil.org/fuse/options.go new file mode 100644 index 000000000..65ce8a541 --- /dev/null +++ b/vendor/bazil.org/fuse/options.go @@ -0,0 +1,310 @@ +package fuse + +import ( + "errors" + "strings" +) + +func dummyOption(conf *mountConfig) error { + return nil +} + +// mountConfig holds the configuration for a mount operation. +// Use it by passing MountOption values to Mount. +type mountConfig struct { + options map[string]string + maxReadahead uint32 + initFlags InitFlags + osxfuseLocations []OSXFUSEPaths +} + +func escapeComma(s string) string { + s = strings.Replace(s, `\`, `\\`, -1) + s = strings.Replace(s, `,`, `\,`, -1) + return s +} + +// getOptions makes a string of options suitable for passing to FUSE +// mount flag `-o`. Returns an empty string if no options were set. +// Any platform specific adjustments should happen before the call. +func (m *mountConfig) getOptions() string { + var opts []string + for k, v := range m.options { + k = escapeComma(k) + if v != "" { + k += "=" + escapeComma(v) + } + opts = append(opts, k) + } + return strings.Join(opts, ",") +} + +type mountOption func(*mountConfig) error + +// MountOption is passed to Mount to change the behavior of the mount. +type MountOption mountOption + +// FSName sets the file system name (also called source) that is +// visible in the list of mounted file systems. +// +// FreeBSD ignores this option. +func FSName(name string) MountOption { + return func(conf *mountConfig) error { + conf.options["fsname"] = name + return nil + } +} + +// Subtype sets the subtype of the mount. The main type is always +// `fuse`. The type in a list of mounted file systems will look like +// `fuse.foo`. +// +// OS X ignores this option. +// FreeBSD ignores this option. +func Subtype(fstype string) MountOption { + return func(conf *mountConfig) error { + conf.options["subtype"] = fstype + return nil + } +} + +// LocalVolume sets the volume to be local (instead of network), +// changing the behavior of Finder, Spotlight, and such. +// +// OS X only. Others ignore this option. +func LocalVolume() MountOption { + return localVolume +} + +// VolumeName sets the volume name shown in Finder. +// +// OS X only. Others ignore this option. +func VolumeName(name string) MountOption { + return volumeName(name) +} + +// NoAppleDouble makes OSXFUSE disallow files with names used by OS X +// to store extended attributes on file systems that do not support +// them natively. +// +// Such file names are: +// +// ._* +// .DS_Store +// +// OS X only. Others ignore this option. +func NoAppleDouble() MountOption { + return noAppleDouble +} + +// NoAppleXattr makes OSXFUSE disallow extended attributes with the +// prefix "com.apple.". This disables persistent Finder state and +// other such information. +// +// OS X only. Others ignore this option. +func NoAppleXattr() MountOption { + return noAppleXattr +} + +// ExclCreate causes O_EXCL flag to be set for only "truly" exclusive creates, +// i.e. create calls for which the initiator explicitly set the O_EXCL flag. +// +// OSXFUSE expects all create calls to return EEXIST in case the file +// already exists, regardless of whether O_EXCL was specified or not. +// To ensure this behavior, it normally sets OpenExclusive for all +// Create calls, regardless of whether the original call had it set. +// For distributed filesystems, that may force every file create to be +// a distributed consensus action, causing undesirable delays. +// +// This option makes the FUSE filesystem see the original flag value, +// and better decide when to ensure global consensus. +// +// Note that returning EEXIST on existing file create is still +// expected with OSXFUSE, regardless of the presence of the +// OpenExclusive flag. +// +// For more information, see +// https://github.com/osxfuse/osxfuse/issues/209 +// +// OS X only. Others ignore this options. +// Requires OSXFUSE 3.4.1 or newer. +func ExclCreate() MountOption { + return exclCreate +} + +// DaemonTimeout sets the time in seconds between a request and a reply before +// the FUSE mount is declared dead. +// +// OS X and FreeBSD only. Others ignore this option. +func DaemonTimeout(name string) MountOption { + return daemonTimeout(name) +} + +var ErrCannotCombineAllowOtherAndAllowRoot = errors.New("cannot combine AllowOther and AllowRoot") + +// AllowOther allows other users to access the file system. +// +// Only one of AllowOther or AllowRoot can be used. +func AllowOther() MountOption { + return func(conf *mountConfig) error { + if _, ok := conf.options["allow_root"]; ok { + return ErrCannotCombineAllowOtherAndAllowRoot + } + conf.options["allow_other"] = "" + return nil + } +} + +// AllowRoot allows other users to access the file system. +// +// Only one of AllowOther or AllowRoot can be used. +// +// FreeBSD ignores this option. +func AllowRoot() MountOption { + return func(conf *mountConfig) error { + if _, ok := conf.options["allow_other"]; ok { + return ErrCannotCombineAllowOtherAndAllowRoot + } + conf.options["allow_root"] = "" + return nil + } +} + +// AllowDev enables interpreting character or block special devices on the +// filesystem. +func AllowDev() MountOption { + return func(conf *mountConfig) error { + conf.options["dev"] = "" + return nil + } +} + +// AllowSUID allows set-user-identifier or set-group-identifier bits to take +// effect. +func AllowSUID() MountOption { + return func(conf *mountConfig) error { + conf.options["suid"] = "" + return nil + } +} + +// DefaultPermissions makes the kernel enforce access control based on +// the file mode (as in chmod). +// +// Without this option, the Node itself decides what is and is not +// allowed. This is normally ok because FUSE file systems cannot be +// accessed by other users without AllowOther/AllowRoot. +// +// FreeBSD ignores this option. +func DefaultPermissions() MountOption { + return func(conf *mountConfig) error { + conf.options["default_permissions"] = "" + return nil + } +} + +// ReadOnly makes the mount read-only. +func ReadOnly() MountOption { + return func(conf *mountConfig) error { + conf.options["ro"] = "" + return nil + } +} + +// MaxReadahead sets the number of bytes that can be prefetched for +// sequential reads. The kernel can enforce a maximum value lower than +// this. +// +// This setting makes the kernel perform speculative reads that do not +// originate from any client process. This usually tremendously +// improves read performance. +func MaxReadahead(n uint32) MountOption { + return func(conf *mountConfig) error { + conf.maxReadahead = n + return nil + } +} + +// AsyncRead enables multiple outstanding read requests for the same +// handle. Without this, there is at most one request in flight at a +// time. +func AsyncRead() MountOption { + return func(conf *mountConfig) error { + conf.initFlags |= InitAsyncRead + return nil + } +} + +// WritebackCache enables the kernel to buffer writes before sending +// them to the FUSE server. Without this, writethrough caching is +// used. +func WritebackCache() MountOption { + return func(conf *mountConfig) error { + conf.initFlags |= InitWritebackCache + return nil + } +} + +// OSXFUSEPaths describes the paths used by an installed OSXFUSE +// version. See OSXFUSELocationV3 for typical values. +type OSXFUSEPaths struct { + // Prefix for the device file. At mount time, an incrementing + // number is suffixed until a free FUSE device is found. + DevicePrefix string + // Path of the load helper, used to load the kernel extension if + // no device files are found. + Load string + // Path of the mount helper, used for the actual mount operation. + Mount string + // Environment variable used to pass the path to the executable + // calling the mount helper. + DaemonVar string +} + +// Default paths for OSXFUSE. See OSXFUSELocations. +var ( + OSXFUSELocationV3 = OSXFUSEPaths{ + DevicePrefix: "/dev/osxfuse", + Load: "/Library/Filesystems/osxfuse.fs/Contents/Resources/load_osxfuse", + Mount: "/Library/Filesystems/osxfuse.fs/Contents/Resources/mount_osxfuse", + DaemonVar: "MOUNT_OSXFUSE_DAEMON_PATH", + } + OSXFUSELocationV2 = OSXFUSEPaths{ + DevicePrefix: "/dev/osxfuse", + Load: "/Library/Filesystems/osxfusefs.fs/Support/load_osxfusefs", + Mount: "/Library/Filesystems/osxfusefs.fs/Support/mount_osxfusefs", + DaemonVar: "MOUNT_FUSEFS_DAEMON_PATH", + } +) + +// OSXFUSELocations sets where to look for OSXFUSE files. The +// arguments are all the possible locations. The previous locations +// are replaced. +// +// Without this option, OSXFUSELocationV3 and OSXFUSELocationV2 are +// used. +// +// OS X only. Others ignore this option. +func OSXFUSELocations(paths ...OSXFUSEPaths) MountOption { + return func(conf *mountConfig) error { + if len(paths) == 0 { + return errors.New("must specify at least one location for OSXFUSELocations") + } + // replace previous values, but make a copy so there's no + // worries about caller mutating their slice + conf.osxfuseLocations = append(conf.osxfuseLocations[:0], paths...) + return nil + } +} + +// AllowNonEmptyMount allows the mounting over a non-empty directory. +// +// The files in it will be shadowed by the freshly created mount. By +// default these mounts are rejected to prevent accidental covering up +// of data, which could for example prevent automatic backup. +func AllowNonEmptyMount() MountOption { + return func(conf *mountConfig) error { + conf.options["nonempty"] = "" + return nil + } +} diff --git a/vendor/bazil.org/fuse/options_darwin.go b/vendor/bazil.org/fuse/options_darwin.go new file mode 100644 index 000000000..faa9d78e7 --- /dev/null +++ b/vendor/bazil.org/fuse/options_darwin.go @@ -0,0 +1,35 @@ +package fuse + +func localVolume(conf *mountConfig) error { + conf.options["local"] = "" + return nil +} + +func volumeName(name string) MountOption { + return func(conf *mountConfig) error { + conf.options["volname"] = name + return nil + } +} + +func daemonTimeout(name string) MountOption { + return func(conf *mountConfig) error { + conf.options["daemon_timeout"] = name + return nil + } +} + +func noAppleXattr(conf *mountConfig) error { + conf.options["noapplexattr"] = "" + return nil +} + +func noAppleDouble(conf *mountConfig) error { + conf.options["noappledouble"] = "" + return nil +} + +func exclCreate(conf *mountConfig) error { + conf.options["excl_create"] = "" + return nil +} diff --git a/vendor/bazil.org/fuse/options_freebsd.go b/vendor/bazil.org/fuse/options_freebsd.go new file mode 100644 index 000000000..7c164b136 --- /dev/null +++ b/vendor/bazil.org/fuse/options_freebsd.go @@ -0,0 +1,28 @@ +package fuse + +func localVolume(conf *mountConfig) error { + return nil +} + +func volumeName(name string) MountOption { + return dummyOption +} + +func daemonTimeout(name string) MountOption { + return func(conf *mountConfig) error { + conf.options["timeout"] = name + return nil + } +} + +func noAppleXattr(conf *mountConfig) error { + return nil +} + +func noAppleDouble(conf *mountConfig) error { + return nil +} + +func exclCreate(conf *mountConfig) error { + return nil +} diff --git a/vendor/bazil.org/fuse/options_linux.go b/vendor/bazil.org/fuse/options_linux.go new file mode 100644 index 000000000..13f0896d5 --- /dev/null +++ b/vendor/bazil.org/fuse/options_linux.go @@ -0,0 +1,25 @@ +package fuse + +func localVolume(conf *mountConfig) error { + return nil +} + +func volumeName(name string) MountOption { + return dummyOption +} + +func daemonTimeout(name string) MountOption { + return dummyOption +} + +func noAppleXattr(conf *mountConfig) error { + return nil +} + +func noAppleDouble(conf *mountConfig) error { + return nil +} + +func exclCreate(conf *mountConfig) error { + return nil +} diff --git a/vendor/bazil.org/fuse/protocol.go b/vendor/bazil.org/fuse/protocol.go new file mode 100644 index 000000000..a77bbf72f --- /dev/null +++ b/vendor/bazil.org/fuse/protocol.go @@ -0,0 +1,75 @@ +package fuse + +import ( + "fmt" +) + +// Protocol is a FUSE protocol version number. +type Protocol struct { + Major uint32 + Minor uint32 +} + +func (p Protocol) String() string { + return fmt.Sprintf("%d.%d", p.Major, p.Minor) +} + +// LT returns whether a is less than b. +func (a Protocol) LT(b Protocol) bool { + return a.Major < b.Major || + (a.Major == b.Major && a.Minor < b.Minor) +} + +// GE returns whether a is greater than or equal to b. +func (a Protocol) GE(b Protocol) bool { + return a.Major > b.Major || + (a.Major == b.Major && a.Minor >= b.Minor) +} + +func (a Protocol) is79() bool { + return a.GE(Protocol{7, 9}) +} + +// HasAttrBlockSize returns whether Attr.BlockSize is respected by the +// kernel. +func (a Protocol) HasAttrBlockSize() bool { + return a.is79() +} + +// HasReadWriteFlags returns whether ReadRequest/WriteRequest +// fields Flags and FileFlags are valid. +func (a Protocol) HasReadWriteFlags() bool { + return a.is79() +} + +// HasGetattrFlags returns whether GetattrRequest field Flags is +// valid. +func (a Protocol) HasGetattrFlags() bool { + return a.is79() +} + +func (a Protocol) is710() bool { + return a.GE(Protocol{7, 10}) +} + +// HasOpenNonSeekable returns whether OpenResponse field Flags flag +// OpenNonSeekable is supported. +func (a Protocol) HasOpenNonSeekable() bool { + return a.is710() +} + +func (a Protocol) is712() bool { + return a.GE(Protocol{7, 12}) +} + +// HasUmask returns whether CreateRequest/MkdirRequest/MknodRequest +// field Umask is valid. +func (a Protocol) HasUmask() bool { + return a.is712() +} + +// HasInvalidate returns whether InvalidateNode/InvalidateEntry are +// supported. +func (a Protocol) HasInvalidate() bool { + return a.is712() +} diff --git a/vendor/bazil.org/fuse/unmount.go b/vendor/bazil.org/fuse/unmount.go new file mode 100644 index 000000000..ffe3f155c --- /dev/null +++ b/vendor/bazil.org/fuse/unmount.go @@ -0,0 +1,6 @@ +package fuse + +// Unmount tries to unmount the filesystem mounted at dir. +func Unmount(dir string) error { + return unmount(dir) +} diff --git a/vendor/bazil.org/fuse/unmount_linux.go b/vendor/bazil.org/fuse/unmount_linux.go new file mode 100644 index 000000000..088f0cfee --- /dev/null +++ b/vendor/bazil.org/fuse/unmount_linux.go @@ -0,0 +1,21 @@ +package fuse + +import ( + "bytes" + "errors" + "os/exec" +) + +func unmount(dir string) error { + cmd := exec.Command("fusermount", "-u", dir) + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + output = bytes.TrimRight(output, "\n") + msg := err.Error() + ": " + string(output) + err = errors.New(msg) + } + return err + } + return nil +} diff --git a/vendor/bazil.org/fuse/unmount_std.go b/vendor/bazil.org/fuse/unmount_std.go new file mode 100644 index 000000000..d6efe276f --- /dev/null +++ b/vendor/bazil.org/fuse/unmount_std.go @@ -0,0 +1,17 @@ +// +build !linux + +package fuse + +import ( + "os" + "syscall" +) + +func unmount(dir string) error { + err := syscall.Unmount(dir, 0) + if err != nil { + err = &os.PathError{Op: "unmount", Path: dir, Err: err} + return err + } + return nil +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 000000000..a3c021d3f --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 000000000..d20f52b7d --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 000000000..d88bd1db1 --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 000000000..0f35592df --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 000000000..b105f80be --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/worker/db.go b/worker/db.go index 1c95d9763..ba2c7289f 100644 --- a/worker/db.go +++ b/worker/db.go @@ -18,27 +18,20 @@ package worker import ( "context" - "database/sql" - "io" "os" "path/filepath" - "runtime/trace" - "strings" + //"runtime/trace" "sync" "time" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" kt "github.com/CovenantSQL/CovenantSQL/kayak/types" kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/sqlchain" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" - "github.com/CovenantSQL/sqlparser" + "github.com/CovenantSQL/CovenantSQL/storage" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/pkg/errors" ) @@ -66,7 +59,6 @@ const ( type Database struct { cfg *DBConfig dbID proto.DatabaseID - storage *storage.Storage kayakWal *kl.LevelDBWal kayakRuntime *kayak.Runtime kayakConfig *kt.RuntimeConfig @@ -78,7 +70,7 @@ type Database struct { } // NewDatabase create a single database instance using config. -func NewDatabase(cfg *DBConfig, peers *proto.Peers, genesisBlock *ct.Block) (db *Database, err error) { +func NewDatabase(cfg *DBConfig, peers *proto.Peers, genesisBlock *types.Block) (db *Database, err error) { // ensure dir exists if err = os.MkdirAll(cfg.DataDir, 0755); err != nil { return @@ -109,11 +101,6 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, genesisBlock *ct.Block) (db if db.chain != nil { db.chain.Stop() } - - // close storage - if db.storage != nil { - db.storage.Close() - } } }() @@ -128,10 +115,6 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, genesisBlock *ct.Block) (db storageDSN.AddParam("_crypto_key", cfg.EncryptionKey) } - if db.storage, err = storage.New(storageDSN.Format()); err != nil { - return - } - // init chain chainFile := filepath.Join(cfg.DataDir, SQLChainFileName) if db.nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -140,10 +123,11 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, genesisBlock *ct.Block) (db // TODO(xq262144): make sqlchain config use of global config object chainCfg := &sqlchain.Config{ - DatabaseID: cfg.DatabaseID, - DataFile: chainFile, - Genesis: genesisBlock, - Peers: peers, + DatabaseID: cfg.DatabaseID, + ChainFilePrefix: chainFile, + DataFile: storageDSN.Format(), + Genesis: genesisBlock, + Peers: peers, // TODO(xq262144): should refactor server/node definition to conf/proto package // currently sqlchain package only use Server.ID as node id @@ -209,16 +193,16 @@ func (db *Database) UpdatePeers(peers *proto.Peers) (err error) { } // Query defines database query interface. -func (db *Database) Query(request *wt.Request) (response *wt.Response, err error) { +func (db *Database) Query(request *types.Request) (response *types.Response, err error) { // Just need to verify signature in db.saveAck //if err = request.Verify(); err != nil { // return //} switch request.Header.QueryType { - case wt.ReadQuery: - return db.readQuery(request) - case wt.WriteQuery: + case types.ReadQuery: + return db.chain.Query(request) + case types.WriteQuery: return db.writeQuery(request) default: // TODO(xq262144): verbose errors with custom error structure @@ -227,7 +211,7 @@ func (db *Database) Query(request *wt.Request) (response *wt.Response, err error } // Ack defines client response ack interface. -func (db *Database) Ack(ack *wt.Ack) (err error) { +func (db *Database) Ack(ack *types.Ack) (err error) { // Just need to verify signature in db.saveAck //if err = ack.Verify(); err != nil { // return @@ -260,13 +244,6 @@ func (db *Database) Shutdown() (err error) { } } - if db.storage != nil { - // stop storage - if err = db.storage.Close(); err != nil { - return - } - } - if db.connSeqEvictCh != nil { // stop connection sequence evictions select { @@ -294,11 +271,11 @@ func (db *Database) Destroy() (err error) { return } -func (db *Database) writeQuery(request *wt.Request) (response *wt.Response, err error) { - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "writeQuery") - defer task.End() - defer trace.StartRegion(ctx, "writeQueryRegion").End() +func (db *Database) writeQuery(request *types.Request) (response *types.Response, err error) { + //ctx := context.Background() + //ctx, task := trace.NewTask(ctx, "writeQuery") + //defer task.End() + //defer trace.StartRegion(ctx, "writeQueryRegion").End() // check database size first, wal/kayak/chain database size is not included if db.cfg.SpaceLimit > 0 { @@ -318,174 +295,25 @@ func (db *Database) writeQuery(request *wt.Request) (response *wt.Response, err } // call kayak runtime Process - var logOffset uint64 var result interface{} - result, logOffset, err = db.kayakRuntime.Apply(context.Background(), request) - - if err != nil { - return - } - - // get affected rows and last insert id - var affectedRows, lastInsertID int64 - - if execResult, ok := result.(storage.ExecResult); ok { - affectedRows = execResult.RowsAffected - lastInsertID = execResult.LastInsertID - } - - return db.buildQueryResponse(request, logOffset, []string{}, []string{}, [][]interface{}{}, lastInsertID, affectedRows) -} - -func (db *Database) readQuery(request *wt.Request) (response *wt.Response, err error) { - // call storage query directly - // TODO(xq262144): add timeout logic basic of client options - var columns, types []string - var data [][]interface{} - var queries []storage.Query - - // sanitize dangerous queries - if queries, err = convertAndSanitizeQuery(request.Payload.Queries); err != nil { - return - } - - columns, types, data, err = db.storage.Query(context.Background(), queries) - if err != nil { - return - } - - return db.buildQueryResponse(request, 0, columns, types, data, 0, 0) -} - -func (db *Database) getLog(index uint64) (data interface{}, err error) { - var l *kt.Log - if l, err = db.kayakWal.Get(index); err != nil || l == nil { - err = errors.Wrap(err, "get log from kayak pool failed") - return - } - - // decode log - data, err = db.DecodePayload(l.Data) - - return -} - -func (db *Database) buildQueryResponse(request *wt.Request, offset uint64, - columns []string, types []string, data [][]interface{}, lastInsertID int64, affectedRows int64) (response *wt.Response, err error) { - // build response - response = new(wt.Response) - response.Header.Request = request.Header - if response.Header.NodeID, err = kms.GetLocalNodeID(); err != nil { + if result, _, err = db.kayakRuntime.Apply(context.Background(), request); err != nil { + err = errors.Wrap(err, "apply failed") return } - response.Header.LogOffset = offset - response.Header.Timestamp = getLocalTime() - response.Header.RowCount = uint64(len(data)) - response.Header.LastInsertID = lastInsertID - response.Header.AffectedRows = affectedRows - - // set payload - response.Payload.Columns = columns - response.Payload.DeclTypes = types - response.Payload.Rows = make([]wt.ResponseRow, len(data)) - - for i, d := range data { - response.Payload.Rows[i].Values = d - } - // sign fields - var privateKey *asymmetric.PrivateKey - if privateKey, err = getLocalPrivateKey(); err != nil { - return - } - if err = response.Sign(privateKey); err != nil { + var ok bool + if response, ok = (result).(*types.Response); !ok { + err = errors.Wrap(err, "invalid response type") return } - // record response for future ack process - err = db.saveResponse(&response.Header) return } -func (db *Database) saveResponse(respHeader *wt.SignedResponseHeader) (err error) { - return db.chain.VerifyAndPushResponsedQuery(respHeader) -} - -func (db *Database) saveAck(ackHeader *wt.SignedAckHeader) (err error) { +func (db *Database) saveAck(ackHeader *types.SignedAckHeader) (err error) { return db.chain.VerifyAndPushAckedQuery(ackHeader) } func getLocalTime() time.Time { return time.Now().UTC() } - -func getLocalPrivateKey() (privateKey *asymmetric.PrivateKey, err error) { - return kms.GetLocalPrivateKey() -} - -func convertAndSanitizeQuery(inQuery []wt.Query) (outQuery []storage.Query, err error) { - outQuery = make([]storage.Query, len(inQuery)) - for i, q := range inQuery { - tokenizer := sqlparser.NewStringTokenizer(q.Pattern) - var stmt sqlparser.Statement - var lastPos int - var query string - var originalQueries []string - - for { - stmt, err = sqlparser.ParseNext(tokenizer) - - if err != nil && err != io.EOF { - return - } - - if err == io.EOF { - err = nil - break - } - - query = q.Pattern[lastPos : tokenizer.Position-1] - lastPos = tokenizer.Position + 1 - - // translate show statement - if showStmt, ok := stmt.(*sqlparser.Show); ok { - origQuery := query - - switch showStmt.Type { - case "table": - if showStmt.ShowCreate { - query = "SELECT sql FROM sqlite_master WHERE type = \"table\" AND tbl_name = \"" + - showStmt.OnTable.Name.String() + "\"" - } else { - query = "PRAGMA table_info(" + showStmt.OnTable.Name.String() + ")" - } - case "index": - query = "SELECT name FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"" + - showStmt.OnTable.Name.String() + "\"" - case "tables": - query = "SELECT name FROM sqlite_master WHERE type = \"table\"" - } - - log.WithFields(log.Fields{ - "from": origQuery, - "to": query, - }).Debug("query translated") - } - - originalQueries = append(originalQueries, query) - } - - // covert args - var args []sql.NamedArg - - for _, v := range q.Args { - args = append(args, sql.Named(v.Name, v.Value)) - } - - outQuery[i] = storage.Query{ - Pattern: strings.Join(originalQueries, "; "), - Args: args, - } - } - return -} diff --git a/worker/db_storage.go b/worker/db_storage.go index 6c7ca1ac3..b56f6bbee 100644 --- a/worker/db_storage.go +++ b/worker/db_storage.go @@ -19,11 +19,9 @@ package worker import ( "bytes" "container/list" - "context" - "github.com/CovenantSQL/CovenantSQL/sqlchain/storage" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/pkg/errors" ) @@ -44,7 +42,7 @@ func (db *Database) EncodePayload(request interface{}) (data []byte, err error) // DecodePayload implements kayak.types.Handler.DecodePayload. func (db *Database) DecodePayload(data []byte) (request interface{}, err error) { - var req *wt.Request + var req *types.Request if err = utils.DecodeMsgPack(data, &req); err != nil { err = errors.Wrap(err, "decode request failed") @@ -58,9 +56,9 @@ func (db *Database) DecodePayload(data []byte) (request interface{}, err error) // Check implements kayak.types.Handler.Check. func (db *Database) Check(rawReq interface{}) (err error) { - var req *wt.Request + var req *types.Request var ok bool - if req, ok = rawReq.(*wt.Request); !ok || req == nil { + if req, ok = rawReq.(*types.Request); !ok || req == nil { err = errors.Wrap(ErrInvalidRequest, "invalid request payload") return } @@ -91,24 +89,18 @@ func (db *Database) Check(rawReq interface{}) (err error) { return } -// Commit implements kayak.types.Handler.Commmit. +// Commit implements kayak.types.Handler.Commit. func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { // convert query and check syntax - var req *wt.Request + var req *types.Request var ok bool - if req, ok = rawReq.(*wt.Request); !ok || req == nil { + if req, ok = rawReq.(*types.Request); !ok || req == nil { err = errors.Wrap(ErrInvalidRequest, "invalid request payload") return } - var queries []storage.Query - if queries, err = convertAndSanitizeQuery(req.Payload.Queries); err != nil { - // return original parser error - return - } - // execute - return db.storage.Exec(context.Background(), queries) + return db.chain.Query(req) } func (db *Database) recordSequence(connID uint64, seqNo uint64) { diff --git a/worker/db_test.go b/worker/db_test.go index 782dd1cde..36561d5bc 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -29,7 +29,6 @@ import ( "testing" "time" - bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/consistent" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -40,10 +39,9 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/sqlchain" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/fortytw2/leaktest" . "github.com/smartystreets/goconvey/convey" ) @@ -88,7 +86,7 @@ func TestSingleDatabase(t *testing.T) { } // create genesis block - var block *ct.Block + var block *types.Block block, err = createRandomBlock(rootHash, true) So(err, ShouldBeNil) @@ -99,9 +97,9 @@ func TestSingleDatabase(t *testing.T) { Convey("test query rewrite", func() { // test query rewrite - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (col1 int, col2 string)", "create index test_index on test (col1)", }) @@ -113,8 +111,8 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) // test show tables query - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "show tables", }) So(err, ShouldBeNil) @@ -130,7 +128,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows[0].Values[0], ShouldResemble, []byte("test")) // test show full tables query - readQuery, err = buildQuery(wt.ReadQuery, 1, 3, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 3, []string{ "show full tables", }) So(err, ShouldBeNil) @@ -146,7 +144,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows[0].Values[0], ShouldResemble, []byte("test")) // test show create table - readQuery, err = buildQuery(wt.ReadQuery, 1, 4, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 4, []string{ "show create table test", }) So(err, ShouldBeNil) @@ -164,7 +162,7 @@ func TestSingleDatabase(t *testing.T) { So(strings.ToUpper(string(byteStr)), ShouldContainSubstring, "CREATE") // test show table - readQuery, err = buildQuery(wt.ReadQuery, 1, 5, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 5, []string{ "show table test", }) So(err, ShouldBeNil) @@ -182,7 +180,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows[1].Values[1], ShouldResemble, []byte("col2")) // test desc table - readQuery, err = buildQuery(wt.ReadQuery, 1, 6, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 6, []string{ "desc test", }) So(err, ShouldBeNil) @@ -200,7 +198,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows[1].Values[1], ShouldResemble, []byte("col2")) // test show index from table - readQuery, err = buildQuery(wt.ReadQuery, 1, 7, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 7, []string{ "show index from table test", }) So(err, ShouldBeNil) @@ -218,9 +216,9 @@ func TestSingleDatabase(t *testing.T) { Convey("test read write", func() { // test write query - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (test int)", "insert into test values(1)", }) @@ -233,8 +231,8 @@ func TestSingleDatabase(t *testing.T) { So(res.Header.RowCount, ShouldEqual, 0) // test select query - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -256,9 +254,9 @@ func TestSingleDatabase(t *testing.T) { }) Convey("test invalid request", func() { - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (test int)", "insert into test values(1)", }) @@ -272,43 +270,43 @@ func TestSingleDatabase(t *testing.T) { So(res.Header.RowCount, ShouldEqual, 0) // request again with same sequence - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "insert into test values(2)", }) res, err = db.Query(writeQuery) So(err, ShouldNotBeNil) // request again with low sequence - writeQuery, err = buildQuery(wt.WriteQuery, 1, 0, []string{ + writeQuery, err = buildQuery(types.WriteQuery, 1, 0, []string{ "insert into test values(3)", }) res, err = db.Query(writeQuery) So(err, ShouldNotBeNil) // request with invalid timestamp - writeQuery, err = buildQueryWithTimeShift(wt.WriteQuery, 1, 2, time.Second*100, []string{ + writeQuery, err = buildQueryWithTimeShift(types.WriteQuery, 1, 2, time.Second*100, []string{ "insert into test values(4)", }) res, err = db.Query(writeQuery) So(err, ShouldNotBeNil) // request with invalid timestamp - writeQuery, err = buildQueryWithTimeShift(wt.WriteQuery, 1, 2, -time.Second*100, []string{ + writeQuery, err = buildQueryWithTimeShift(types.WriteQuery, 1, 2, -time.Second*100, []string{ "insert into test values(5)", }) res, err = db.Query(writeQuery) So(err, ShouldNotBeNil) // request with different connection id - writeQuery, err = buildQuery(wt.WriteQuery, 2, 1, []string{ + writeQuery, err = buildQuery(types.WriteQuery, 2, 1, []string{ "insert into test values(6)", }) res, err = db.Query(writeQuery) So(err, ShouldBeNil) // read query, test records - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -331,7 +329,7 @@ func TestSingleDatabase(t *testing.T) { }) Convey("corner case", func() { - var req *wt.Request + var req *types.Request var err error req, err = buildQuery(-1, 1, 1, []string{ "create table test (test int)", @@ -340,9 +338,9 @@ func TestSingleDatabase(t *testing.T) { _, err = db.Query(req) So(err, ShouldNotBeNil) - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (test int)", }) So(err, ShouldBeNil) @@ -350,8 +348,8 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) // read query, test records - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -366,7 +364,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows, ShouldBeEmpty) // write query, test failed - writeQuery, err = buildQuery(wt.WriteQuery, 1, 3, []string{ + writeQuery, err = buildQuery(types.WriteQuery, 1, 3, []string{ "insert into test2 values(1)", // table should not exists }) So(err, ShouldBeNil) @@ -374,7 +372,7 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldNotBeNil) // read query, test dynamic fields - readQuery, err = buildQuery(wt.ReadQuery, 1, 4, []string{ + readQuery, err = buildQuery(types.ReadQuery, 1, 4, []string{ "select 1 as test", }) So(err, ShouldBeNil) @@ -389,7 +387,7 @@ func TestSingleDatabase(t *testing.T) { So(res.Payload.Rows, ShouldNotBeEmpty) // test ack - var ack *wt.Ack + var ack *types.Ack ack, err = buildAck(res) So(err, ShouldBeNil) @@ -449,7 +447,7 @@ func TestInitFailed(t *testing.T) { } // create genesis block - var block *ct.Block + var block *types.Block block, err = createRandomBlock(rootHash, true) So(err, ShouldBeNil) @@ -502,7 +500,7 @@ func TestDatabaseRecycle(t *testing.T) { } // create genesis block - var block *ct.Block + var block *types.Block block, err = createRandomBlock(rootHash, true) So(err, ShouldBeNil) @@ -512,9 +510,9 @@ func TestDatabaseRecycle(t *testing.T) { So(err, ShouldBeNil) // do some query - var writeQuery *wt.Request - var res *wt.Response - writeQuery, err = buildQuery(wt.WriteQuery, 1, 1, []string{ + var writeQuery *types.Request + var res *types.Response + writeQuery, err = buildQuery(types.WriteQuery, 1, 1, []string{ "create table test (test int)", "insert into test values(1)", }) @@ -527,8 +525,8 @@ func TestDatabaseRecycle(t *testing.T) { So(res.Header.RowCount, ShouldEqual, 0) // test select query - var readQuery *wt.Request - readQuery, err = buildQuery(wt.ReadQuery, 1, 2, []string{ + var readQuery *types.Request + readQuery, err = buildQuery(types.ReadQuery, 1, 2, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -553,7 +551,7 @@ func TestDatabaseRecycle(t *testing.T) { }) } -func buildAck(res *wt.Response) (ack *wt.Ack, err error) { +func buildAck(res *types.Response) (ack *types.Ack, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -567,9 +565,9 @@ func buildAck(res *wt.Response) (ack *wt.Ack, err error) { return } - ack = &wt.Ack{ - Header: wt.SignedAckHeader{ - AckHeader: wt.AckHeader{ + ack = &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ Response: res.Header, NodeID: nodeID, Timestamp: getLocalTime(), @@ -582,19 +580,19 @@ func buildAck(res *wt.Response) (ack *wt.Ack, err error) { return } -func buildQuery(queryType wt.QueryType, connID uint64, seqNo uint64, queries []string) (query *wt.Request, err error) { +func buildQuery(queryType types.QueryType, connID uint64, seqNo uint64, queries []string) (query *types.Request, err error) { return buildQueryEx(queryType, connID, seqNo, time.Duration(0), proto.DatabaseID(""), queries) } -func buildQueryWithDatabaseID(queryType wt.QueryType, connID uint64, seqNo uint64, databaseID proto.DatabaseID, queries []string) (query *wt.Request, err error) { +func buildQueryWithDatabaseID(queryType types.QueryType, connID uint64, seqNo uint64, databaseID proto.DatabaseID, queries []string) (query *types.Request, err error) { return buildQueryEx(queryType, connID, seqNo, time.Duration(0), databaseID, queries) } -func buildQueryWithTimeShift(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift time.Duration, queries []string) (query *wt.Request, err error) { +func buildQueryWithTimeShift(queryType types.QueryType, connID uint64, seqNo uint64, timeShift time.Duration, queries []string) (query *types.Request, err error) { return buildQueryEx(queryType, connID, seqNo, timeShift, proto.DatabaseID(""), queries) } -func buildQueryEx(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift time.Duration, databaseID proto.DatabaseID, queries []string) (query *wt.Request, err error) { +func buildQueryEx(queryType types.QueryType, connID uint64, seqNo uint64, timeShift time.Duration, databaseID proto.DatabaseID, queries []string) (query *types.Request, err error) { // get node id var nodeID proto.NodeID if nodeID, err = kms.GetLocalNodeID(); err != nil { @@ -612,15 +610,15 @@ func buildQueryEx(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift tm = tm.Add(-timeShift) // build queries - realQueries := make([]wt.Query, len(queries)) + realQueries := make([]types.Query, len(queries)) for i, v := range queries { realQueries[i].Pattern = v } - query = &wt.Request{ - Header: wt.SignedRequestHeader{ - RequestHeader: wt.RequestHeader{ + query = &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ DatabaseID: databaseID, QueryType: queryType, NodeID: nodeID, @@ -629,7 +627,7 @@ func buildQueryEx(queryType wt.QueryType, connID uint64, seqNo uint64, timeShift Timestamp: tm, }, }, - Payload: wt.RequestPayload{ + Payload: types.RequestPayload{ Queries: realQueries, }, } @@ -742,7 +740,7 @@ func initNode() (cleanupFunc func(), server *rpc.Server, err error) { } // copied from sqlchain.xxx_test. -func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error) { +func createRandomBlock(parent hash.Hash, isGenesis bool) (b *types.Block, err error) { // Generate key pair priv, pub, err := asymmetric.GenSecp256k1KeyPair() @@ -753,9 +751,9 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error h := hash.Hash{} rand.Read(h[:]) - b = &ct.Block{ - SignedHeader: ct.SignedHeader{ - Header: ct.Header{ + b = &types.Block{ + SignedHeader: types.SignedHeader{ + Header: types.Header{ Version: 0x01000000, Producer: proto.NodeID(h.String()), GenesisHash: rootHash, @@ -763,12 +761,6 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error Timestamp: time.Now().UTC(), }, }, - Queries: make([]*hash.Hash, rand.Intn(10)+10), - } - - for i := range b.Queries { - b.Queries[i] = new(hash.Hash) - rand.Read(b.Queries[i][:]) } if isGenesis { @@ -801,7 +793,7 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error // fake BPDB service type stubBPDBService struct{} -func (s *stubBPDBService) CreateDatabase(req *bp.CreateDatabaseRequest, resp *bp.CreateDatabaseResponse) (err error) { +func (s *stubBPDBService) CreateDatabase(req *types.CreateDatabaseRequest, resp *types.CreateDatabaseResponse) (err error) { if resp.Header.InstanceMeta, err = s.getInstanceMeta("db2"); err != nil { return } @@ -816,11 +808,11 @@ func (s *stubBPDBService) CreateDatabase(req *bp.CreateDatabaseRequest, resp *bp return } -func (s *stubBPDBService) DropDatabase(req *bp.DropDatabaseRequest, resp *bp.DropDatabaseRequest) (err error) { +func (s *stubBPDBService) DropDatabase(req *types.DropDatabaseRequest, resp *types.DropDatabaseRequest) (err error) { return } -func (s *stubBPDBService) GetDatabase(req *bp.GetDatabaseRequest, resp *bp.GetDatabaseResponse) (err error) { +func (s *stubBPDBService) GetDatabase(req *types.GetDatabaseRequest, resp *types.GetDatabaseResponse) (err error) { if resp.Header.InstanceMeta, err = s.getInstanceMeta(req.Header.DatabaseID); err != nil { return } @@ -835,8 +827,8 @@ func (s *stubBPDBService) GetDatabase(req *bp.GetDatabaseRequest, resp *bp.GetDa return } -func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitServiceResponse) (err error) { - resp.Header.Instances = make([]wt.ServiceInstance, 1) +func (s *stubBPDBService) GetNodeDatabases(req *types.InitService, resp *types.InitServiceResponse) (err error) { + resp.Header.Instances = make([]types.ServiceInstance, 1) resp.Header.Instances[0], err = s.getInstanceMeta("db2") if resp.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { return @@ -851,7 +843,7 @@ func (s *stubBPDBService) GetNodeDatabases(req *wt.InitService, resp *wt.InitSer return } -func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance wt.ServiceInstance, err error) { +func (s *stubBPDBService) getInstanceMeta(dbID proto.DatabaseID) (instance types.ServiceInstance, err error) { var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { return diff --git a/worker/dbms.go b/worker/dbms.go index 769c6d45e..647d4d786 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -23,14 +23,13 @@ import ( "path/filepath" "sync" - kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/sqlchain" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" "github.com/pkg/errors" ) @@ -132,7 +131,7 @@ func (dbms *DBMS) Init() (err error) { } // load current peers info from block producer - var dbMapping []wt.ServiceInstance + var dbMapping []types.ServiceInstance if dbMapping, err = dbms.getMappedInstances(); err != nil { err = errors.Wrap(err, "get mapped instances failed") return @@ -147,7 +146,7 @@ func (dbms *DBMS) Init() (err error) { return } -func (dbms *DBMS) initDatabases(meta *DBMSMeta, conf []wt.ServiceInstance) (err error) { +func (dbms *DBMS) initDatabases(meta *DBMSMeta, conf []types.ServiceInstance) (err error) { currentInstance := make(map[proto.DatabaseID]bool) for _, instanceConf := range conf { @@ -177,7 +176,7 @@ func (dbms *DBMS) initDatabases(meta *DBMSMeta, conf []wt.ServiceInstance) (err } // Create add new database to the miner dbms. -func (dbms *DBMS) Create(instance *wt.ServiceInstance, cleanup bool) (err error) { +func (dbms *DBMS) Create(instance *types.ServiceInstance, cleanup bool) (err error) { if _, alreadyExists := dbms.getMeta(instance.DatabaseID); alreadyExists { return ErrAlreadyExists } @@ -245,7 +244,7 @@ func (dbms *DBMS) Drop(dbID proto.DatabaseID) (err error) { } // Update apply the new peers config to dbms. -func (dbms *DBMS) Update(instance *wt.ServiceInstance) (err error) { +func (dbms *DBMS) Update(instance *types.ServiceInstance) (err error) { var db *Database var exists bool @@ -258,7 +257,7 @@ func (dbms *DBMS) Update(instance *wt.ServiceInstance) (err error) { } // Query handles query request in dbms. -func (dbms *DBMS) Query(req *wt.Request) (res *wt.Response, err error) { +func (dbms *DBMS) Query(req *types.Request) (res *types.Response, err error) { var db *Database var exists bool @@ -273,7 +272,7 @@ func (dbms *DBMS) Query(req *wt.Request) (res *wt.Response, err error) { } // Ack handles ack of previous response. -func (dbms *DBMS) Ack(ack *wt.Ack) (err error) { +func (dbms *DBMS) Ack(ack *types.Ack) (err error) { var db *Database var exists bool @@ -287,32 +286,6 @@ func (dbms *DBMS) Ack(ack *wt.Ack) (err error) { return db.Ack(ack) } -// GetRequest handles fetching original request of previous transactions. -func (dbms *DBMS) GetRequest(dbID proto.DatabaseID, offset uint64) (query *wt.Request, err error) { - var db *Database - var exists bool - - if db, exists = dbms.getMeta(dbID); !exists { - err = ErrNotExists - return - } - - var req interface{} - if req, err = db.getLog(offset); err != nil { - err = errors.Wrap(err, "get log failed") - return - } - - // decode requests - var ok bool - if query, ok = req.(*wt.Request); !ok { - err = errors.Wrap(kt.ErrInvalidLog, "convert log to request failed") - return - } - - return -} - func (dbms *DBMS) getMeta(dbID proto.DatabaseID) (db *Database, exists bool) { var rawDB interface{} @@ -338,14 +311,14 @@ func (dbms *DBMS) removeMeta(dbID proto.DatabaseID) (err error) { return dbms.writeMeta() } -func (dbms *DBMS) getMappedInstances() (instances []wt.ServiceInstance, err error) { +func (dbms *DBMS) getMappedInstances() (instances []types.ServiceInstance, err error) { var bpNodeID proto.NodeID if bpNodeID, err = rpc.GetCurrentBP(); err != nil { return } - req := &wt.InitService{} - res := new(wt.InitServiceResponse) + req := &types.InitService{} + res := new(types.InitServiceResponse) if err = rpc.NewCaller().CallNode(bpNodeID, route.BPDBGetNodeDatabases.String(), req, res); err != nil { return diff --git a/worker/dbms_config.go b/worker/dbms_config.go index d4f671659..bc8fcded7 100644 --- a/worker/dbms_config.go +++ b/worker/dbms_config.go @@ -24,7 +24,7 @@ import ( var ( // DefaultMaxReqTimeGap defines max time gap between request and server. - DefaultMaxReqTimeGap = time.Second * 5 + DefaultMaxReqTimeGap = time.Minute ) // DBMSConfig defines the local multi-database management system config. diff --git a/worker/dbms_rpc.go b/worker/dbms_rpc.go index 1daffa148..0bd8a5deb 100644 --- a/worker/dbms_rpc.go +++ b/worker/dbms_rpc.go @@ -17,12 +17,12 @@ package worker import ( - "context" - "runtime/trace" + //"context" + //"runtime/trace" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/pkg/errors" "github.com/rcrowley/go-metrics" ) @@ -53,16 +53,16 @@ func NewDBMSRPCService(serviceName string, server *rpc.Server, dbms *DBMS) (serv } // Query rpc, called by client to issue read/write query. -func (rpc *DBMSRPCService) Query(req *wt.Request, res *wt.Response) (err error) { +func (rpc *DBMSRPCService) Query(req *types.Request, res *types.Response) (err error) { // Just need to verify signature in db.saveAck //if err = req.Verify(); err != nil { // dbQueryFailCounter.Mark(1) // return //} - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "Query") - defer task.End() - defer trace.StartRegion(ctx, "QueryRegion").End() + //ctx := context.Background() + //ctx, task := trace.NewTask(ctx, "Query") + //defer task.End() + //defer trace.StartRegion(ctx, "QueryRegion").End() // verify query is sent from the request node if req.Envelope.NodeID.String() != string(req.Header.NodeID) { // node id mismatch @@ -71,7 +71,7 @@ func (rpc *DBMSRPCService) Query(req *wt.Request, res *wt.Response) (err error) return } - var r *wt.Response + var r *types.Response if r, err = rpc.dbms.Query(req); err != nil { dbQueryFailCounter.Mark(1) return @@ -84,15 +84,15 @@ func (rpc *DBMSRPCService) Query(req *wt.Request, res *wt.Response) (err error) } // Ack rpc, called by client to confirm read request. -func (rpc *DBMSRPCService) Ack(ack *wt.Ack, _ *wt.AckResponse) (err error) { +func (rpc *DBMSRPCService) Ack(ack *types.Ack, _ *types.AckResponse) (err error) { // Just need to verify signature in db.saveAck //if err = ack.Verify(); err != nil { // return //} - ctx := context.Background() - ctx, task := trace.NewTask(ctx, "Ack") - defer task.End() - defer trace.StartRegion(ctx, "AckRegion").End() + //ctx := context.Background() + //ctx, task := trace.NewTask(ctx, "Ack") + //defer task.End() + //defer trace.StartRegion(ctx, "AckRegion").End() // verify if ack node is the original ack node if ack.Envelope.NodeID.String() != string(ack.Header.Response.Request.NodeID) { @@ -107,7 +107,7 @@ func (rpc *DBMSRPCService) Ack(ack *wt.Ack, _ *wt.AckResponse) (err error) { } // Deploy rpc, called by BP to create/drop database and update peers. -func (rpc *DBMSRPCService) Deploy(req *wt.UpdateService, _ *wt.UpdateServiceResponse) (err error) { +func (rpc *DBMSRPCService) Deploy(req *types.UpdateService, _ *types.UpdateServiceResponse) (err error) { // verify request node is block producer if !route.IsPermitted(&req.Envelope, route.DBSDeploy) { err = errors.Wrap(ErrInvalidRequest, "node not permitted for deploy request") @@ -121,20 +121,13 @@ func (rpc *DBMSRPCService) Deploy(req *wt.UpdateService, _ *wt.UpdateServiceResp // create/drop/update switch req.Header.Op { - case wt.CreateDB: + case types.CreateDB: err = rpc.dbms.Create(&req.Header.Instance, true) - case wt.UpdateDB: + case types.UpdateDB: err = rpc.dbms.Update(&req.Header.Instance) - case wt.DropDB: + case types.DropDB: err = rpc.dbms.Drop(req.Header.Instance.DatabaseID) } return } - -// GetRequest rpc, called by observer to fetch original request by log offset. -func (rpc *DBMSRPCService) GetRequest(req *wt.GetRequestReq, resp *wt.GetRequestResp) (err error) { - // TODO(xq262144), check permission - resp.Request, err = rpc.dbms.GetRequest(req.DatabaseID, req.LogOffset) - return -} diff --git a/worker/dbms_test.go b/worker/dbms_test.go index 7b8d43fd6..a424828cf 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -27,8 +27,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" - wt "github.com/CovenantSQL/CovenantSQL/worker/types" + "github.com/CovenantSQL/CovenantSQL/types" . "github.com/smartystreets/goconvey/convey" ) @@ -63,10 +62,10 @@ func TestDBMS(t *testing.T) { So(err, ShouldBeNil) // add database - var req *wt.UpdateService - var res wt.UpdateServiceResponse + var req *types.UpdateService + var res types.UpdateServiceResponse var peers *proto.Peers - var block *ct.Block + var block *types.Block dbID := proto.DatabaseID("db") @@ -79,9 +78,9 @@ func TestDBMS(t *testing.T) { So(err, ShouldBeNil) // call with no BP privilege - req = new(wt.UpdateService) - req.Header.Op = wt.CreateDB - req.Header.Instance = wt.ServiceInstance{ + req = new(types.UpdateService) + req.Header.Op = types.CreateDB + req.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, Peers: peers, GenesisBlock: block, @@ -96,9 +95,9 @@ func TestDBMS(t *testing.T) { Convey("queries", func() { // sending write query - var writeQuery *wt.Request - var queryRes *wt.Response - writeQuery, err = buildQueryWithDatabaseID(wt.WriteQuery, 1, 1, dbID, []string{ + var writeQuery *types.Request + var queryRes *types.Response + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, dbID, []string{ "create table test (test int)", "insert into test values(1)", }) @@ -109,20 +108,10 @@ func TestDBMS(t *testing.T) { err = queryRes.Verify() So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) - So(queryRes.Header.LogOffset, ShouldEqual, 0) - - var reqGetRequest wt.GetRequestReq - var respGetRequest *wt.GetRequestResp - - reqGetRequest.DatabaseID = dbID - reqGetRequest.LogOffset = queryRes.Header.LogOffset - err = testRequest(route.DBSGetRequest, reqGetRequest, &respGetRequest) - So(err, ShouldBeNil) - So(respGetRequest.Request.Header.Hash, ShouldResemble, writeQuery.Header.Hash) // sending read query - var readQuery *wt.Request - readQuery, err = buildQueryWithDatabaseID(wt.ReadQuery, 1, 2, dbID, []string{ + var readQuery *types.Request + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 2, dbID, []string{ "select * from test", }) So(err, ShouldBeNil) @@ -139,20 +128,20 @@ func TestDBMS(t *testing.T) { So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) // sending read ack - var ack *wt.Ack + var ack *types.Ack ack, err = buildAck(queryRes) So(err, ShouldBeNil) - var ackRes wt.AckResponse + var ackRes types.AckResponse err = testRequest(route.DBSAck, ack, &ackRes) So(err, ShouldBeNil) }) Convey("query non-existent database", func() { // sending write query - var writeQuery *wt.Request - var queryRes *wt.Response - writeQuery, err = buildQueryWithDatabaseID(wt.WriteQuery, 1, 1, + var writeQuery *types.Request + var queryRes *types.Response + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, proto.DatabaseID("db_not_exists"), []string{ "create table test (test int)", "insert into test values(1)", @@ -168,9 +157,9 @@ func TestDBMS(t *testing.T) { peers, err = getPeers(2) So(err, ShouldBeNil) - req = new(wt.UpdateService) - req.Header.Op = wt.UpdateDB - req.Header.Instance = wt.ServiceInstance{ + req = new(types.UpdateService) + req.Header.Op = types.UpdateDB + req.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, Peers: peers, } @@ -183,9 +172,9 @@ func TestDBMS(t *testing.T) { Convey("drop database before shutdown", func() { // drop database - req = new(wt.UpdateService) - req.Header.Op = wt.DropDB - req.Header.Instance = wt.ServiceInstance{ + req = new(types.UpdateService) + req.Header.Op = types.DropDB + req.Header.Instance = types.ServiceInstance{ DatabaseID: dbID, } err = req.Sign(privateKey) diff --git a/worker/types/ack_type.go b/worker/otypes/ack_type.go similarity index 99% rename from worker/types/ack_type.go rename to worker/otypes/ack_type.go index f386e7556..795c99e1a 100644 --- a/worker/types/ack_type.go +++ b/worker/otypes/ack_type.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "time" diff --git a/worker/types/ack_type_gen.go b/worker/otypes/ack_type_gen.go similarity index 99% rename from worker/types/ack_type_gen.go rename to worker/otypes/ack_type_gen.go index 47511dcee..9c4a845c0 100644 --- a/worker/types/ack_type_gen.go +++ b/worker/otypes/ack_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/worker/otypes/ack_type_gen_test.go b/worker/otypes/ack_type_gen_test.go new file mode 100644 index 000000000..3fc0faf87 --- /dev/null +++ b/worker/otypes/ack_type_gen_test.go @@ -0,0 +1,158 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashAck(t *testing.T) { + v := Ack{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashAck(b *testing.B) { + v := Ack{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgAck(b *testing.B) { + v := Ack{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashAckHeader(t *testing.T) { + v := AckHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashAckHeader(b *testing.B) { + v := AckHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgAckHeader(b *testing.B) { + v := AckHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashAckResponse(t *testing.T) { + v := AckResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashAckResponse(b *testing.B) { + v := AckResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgAckResponse(b *testing.B) { + v := AckResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedAckHeader(t *testing.T) { + v := SignedAckHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedAckHeader(b *testing.B) { + v := SignedAckHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedAckHeader(b *testing.B) { + v := SignedAckHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/chain/doc.go b/worker/otypes/doc.go similarity index 89% rename from chain/doc.go rename to worker/otypes/doc.go index 2fa161437..089862dc6 100644 --- a/chain/doc.go +++ b/worker/otypes/doc.go @@ -14,5 +14,7 @@ * limitations under the License. */ -// Package chain defines commonly types for block chain. -package chain +/* +Package otypes defines miner node export types. +*/ +package otypes diff --git a/worker/types/errors.go b/worker/otypes/errors.go similarity index 98% rename from worker/types/errors.go rename to worker/otypes/errors.go index b95037572..5c44a3c9d 100644 --- a/worker/types/errors.go +++ b/worker/otypes/errors.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import "errors" diff --git a/worker/types/get_request.go b/worker/otypes/get_request.go similarity index 98% rename from worker/types/get_request.go rename to worker/otypes/get_request.go index 5f19cef99..cd1bd4567 100644 --- a/worker/types/get_request.go +++ b/worker/otypes/get_request.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import "github.com/CovenantSQL/CovenantSQL/proto" diff --git a/worker/types/init_service_type.go b/worker/otypes/init_service_type.go similarity index 97% rename from worker/types/init_service_type.go rename to worker/otypes/init_service_type.go index 6007729db..54d2af65a 100644 --- a/worker/types/init_service_type.go +++ b/worker/otypes/init_service_type.go @@ -14,13 +14,13 @@ * limitations under the License. */ -package types +package otypes import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - ct "github.com/CovenantSQL/CovenantSQL/sqlchain/types" + ct "github.com/CovenantSQL/CovenantSQL/sqlchain/otypes" ) //go:generate hsp diff --git a/worker/types/init_service_type_gen.go b/worker/otypes/init_service_type_gen.go similarity index 99% rename from worker/types/init_service_type_gen.go rename to worker/otypes/init_service_type_gen.go index 1fcce43a4..a538613bb 100644 --- a/worker/types/init_service_type_gen.go +++ b/worker/otypes/init_service_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/worker/otypes/init_service_type_gen_test.go b/worker/otypes/init_service_type_gen_test.go new file mode 100644 index 000000000..ee88b0eb6 --- /dev/null +++ b/worker/otypes/init_service_type_gen_test.go @@ -0,0 +1,232 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashInitService(t *testing.T) { + v := InitService{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashInitService(b *testing.B) { + v := InitService{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgInitService(b *testing.B) { + v := InitService{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashInitServiceResponse(t *testing.T) { + v := InitServiceResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashInitServiceResponse(b *testing.B) { + v := InitServiceResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgInitServiceResponse(b *testing.B) { + v := InitServiceResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashInitServiceResponseHeader(t *testing.T) { + v := InitServiceResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashInitServiceResponseHeader(b *testing.B) { + v := InitServiceResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgInitServiceResponseHeader(b *testing.B) { + v := InitServiceResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashResourceMeta(t *testing.T) { + v := ResourceMeta{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResourceMeta(b *testing.B) { + v := ResourceMeta{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResourceMeta(b *testing.B) { + v := ResourceMeta{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashServiceInstance(t *testing.T) { + v := ServiceInstance{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashServiceInstance(b *testing.B) { + v := ServiceInstance{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgServiceInstance(b *testing.B) { + v := ServiceInstance{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedInitServiceResponseHeader(t *testing.T) { + v := SignedInitServiceResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedInitServiceResponseHeader(b *testing.B) { + v := SignedInitServiceResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedInitServiceResponseHeader(b *testing.B) { + v := SignedInitServiceResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/no_ack_report_type.go b/worker/otypes/no_ack_report_type.go similarity index 99% rename from worker/types/no_ack_report_type.go rename to worker/otypes/no_ack_report_type.go index bda6aee91..24b0fbfc4 100644 --- a/worker/types/no_ack_report_type.go +++ b/worker/otypes/no_ack_report_type.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "time" diff --git a/worker/types/no_ack_report_type_gen.go b/worker/otypes/no_ack_report_type_gen.go similarity index 99% rename from worker/types/no_ack_report_type_gen.go rename to worker/otypes/no_ack_report_type_gen.go index 8b1e57cd9..d2a3408b7 100644 --- a/worker/types/no_ack_report_type_gen.go +++ b/worker/otypes/no_ack_report_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/worker/otypes/no_ack_report_type_gen_test.go b/worker/otypes/no_ack_report_type_gen_test.go new file mode 100644 index 000000000..bf3e1bb8b --- /dev/null +++ b/worker/otypes/no_ack_report_type_gen_test.go @@ -0,0 +1,232 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashAggrNoAckReport(t *testing.T) { + v := AggrNoAckReport{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashAggrNoAckReport(b *testing.B) { + v := AggrNoAckReport{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgAggrNoAckReport(b *testing.B) { + v := AggrNoAckReport{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashAggrNoAckReportHeader(t *testing.T) { + v := AggrNoAckReportHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashAggrNoAckReportHeader(b *testing.B) { + v := AggrNoAckReportHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgAggrNoAckReportHeader(b *testing.B) { + v := AggrNoAckReportHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashNoAckReport(t *testing.T) { + v := NoAckReport{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashNoAckReport(b *testing.B) { + v := NoAckReport{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgNoAckReport(b *testing.B) { + v := NoAckReport{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashNoAckReportHeader(t *testing.T) { + v := NoAckReportHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashNoAckReportHeader(b *testing.B) { + v := NoAckReportHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgNoAckReportHeader(b *testing.B) { + v := NoAckReportHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedAggrNoAckReportHeader(t *testing.T) { + v := SignedAggrNoAckReportHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedAggrNoAckReportHeader(b *testing.B) { + v := SignedAggrNoAckReportHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedAggrNoAckReportHeader(b *testing.B) { + v := SignedAggrNoAckReportHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedNoAckReportHeader(t *testing.T) { + v := SignedNoAckReportHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedNoAckReportHeader(b *testing.B) { + v := SignedNoAckReportHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedNoAckReportHeader(b *testing.B) { + v := SignedNoAckReportHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/request_type.go b/worker/otypes/request_type.go similarity index 99% rename from worker/types/request_type.go rename to worker/otypes/request_type.go index 01d94cb85..768c3165a 100644 --- a/worker/types/request_type.go +++ b/worker/otypes/request_type.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "time" diff --git a/worker/types/request_type_gen.go b/worker/otypes/request_type_gen.go similarity index 99% rename from worker/types/request_type_gen.go rename to worker/otypes/request_type_gen.go index c20cd58ba..0dd0e6375 100644 --- a/worker/types/request_type_gen.go +++ b/worker/otypes/request_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/worker/otypes/request_type_gen_test.go b/worker/otypes/request_type_gen_test.go new file mode 100644 index 000000000..c1371bf13 --- /dev/null +++ b/worker/otypes/request_type_gen_test.go @@ -0,0 +1,269 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashNamedArg(t *testing.T) { + v := NamedArg{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashNamedArg(b *testing.B) { + v := NamedArg{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgNamedArg(b *testing.B) { + v := NamedArg{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQuery(t *testing.T) { + v := Query{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQuery(b *testing.B) { + v := Query{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQuery(b *testing.B) { + v := Query{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashQueryKey(t *testing.T) { + v := QueryKey{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashQueryKey(b *testing.B) { + v := QueryKey{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgQueryKey(b *testing.B) { + v := QueryKey{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequest(t *testing.T) { + v := Request{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequest(b *testing.B) { + v := Request{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequest(b *testing.B) { + v := Request{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequestHeader(t *testing.T) { + v := RequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequestHeader(b *testing.B) { + v := RequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequestHeader(b *testing.B) { + v := RequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashRequestPayload(t *testing.T) { + v := RequestPayload{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRequestPayload(b *testing.B) { + v := RequestPayload{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRequestPayload(b *testing.B) { + v := RequestPayload{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedRequestHeader(t *testing.T) { + v := SignedRequestHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedRequestHeader(b *testing.B) { + v := SignedRequestHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedRequestHeader(b *testing.B) { + v := SignedRequestHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/response_type.go b/worker/otypes/response_type.go similarity index 99% rename from worker/types/response_type.go rename to worker/otypes/response_type.go index de55945af..931eaa06d 100644 --- a/worker/types/response_type.go +++ b/worker/otypes/response_type.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "time" diff --git a/worker/types/response_type_gen.go b/worker/otypes/response_type_gen.go similarity index 99% rename from worker/types/response_type_gen.go rename to worker/otypes/response_type_gen.go index 898cbfa6e..b3d6e68ce 100644 --- a/worker/types/response_type_gen.go +++ b/worker/otypes/response_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/worker/otypes/response_type_gen_test.go b/worker/otypes/response_type_gen_test.go new file mode 100644 index 000000000..3b263b1ed --- /dev/null +++ b/worker/otypes/response_type_gen_test.go @@ -0,0 +1,195 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashResponse(t *testing.T) { + v := Response{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResponse(b *testing.B) { + v := Response{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResponse(b *testing.B) { + v := Response{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashResponseHeader(t *testing.T) { + v := ResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResponseHeader(b *testing.B) { + v := ResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResponseHeader(b *testing.B) { + v := ResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashResponsePayload(t *testing.T) { + v := ResponsePayload{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResponsePayload(b *testing.B) { + v := ResponsePayload{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResponsePayload(b *testing.B) { + v := ResponsePayload{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashResponseRow(t *testing.T) { + v := ResponseRow{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashResponseRow(b *testing.B) { + v := ResponseRow{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgResponseRow(b *testing.B) { + v := ResponseRow{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedResponseHeader(t *testing.T) { + v := SignedResponseHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedResponseHeader(b *testing.B) { + v := SignedResponseHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedResponseHeader(b *testing.B) { + v := SignedResponseHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/types_test.go b/worker/otypes/types_test.go similarity index 99% rename from worker/types/types_test.go rename to worker/otypes/types_test.go index 89996e21b..aa498a453 100644 --- a/worker/types/types_test.go +++ b/worker/otypes/types_test.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "testing" diff --git a/worker/types/update_service_type.go b/worker/otypes/update_service_type.go similarity index 99% rename from worker/types/update_service_type.go rename to worker/otypes/update_service_type.go index 7d8689afa..24c83c49b 100644 --- a/worker/types/update_service_type.go +++ b/worker/otypes/update_service_type.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" diff --git a/worker/types/update_service_type_gen.go b/worker/otypes/update_service_type_gen.go similarity index 99% rename from worker/types/update_service_type_gen.go rename to worker/otypes/update_service_type_gen.go index c92211de3..e48777622 100644 --- a/worker/types/update_service_type_gen.go +++ b/worker/otypes/update_service_type_gen.go @@ -1,4 +1,4 @@ -package types +package otypes // Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. diff --git a/worker/otypes/update_service_type_gen_test.go b/worker/otypes/update_service_type_gen_test.go new file mode 100644 index 000000000..d78a63158 --- /dev/null +++ b/worker/otypes/update_service_type_gen_test.go @@ -0,0 +1,158 @@ +package otypes + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashSignedUpdateServiceHeader(t *testing.T) { + v := SignedUpdateServiceHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedUpdateServiceHeader(b *testing.B) { + v := SignedUpdateServiceHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedUpdateServiceHeader(b *testing.B) { + v := SignedUpdateServiceHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashUpdateService(t *testing.T) { + v := UpdateService{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUpdateService(b *testing.B) { + v := UpdateService{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUpdateService(b *testing.B) { + v := UpdateService{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashUpdateServiceHeader(t *testing.T) { + v := UpdateServiceHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUpdateServiceHeader(b *testing.B) { + v := UpdateServiceHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUpdateServiceHeader(b *testing.B) { + v := UpdateServiceHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashUpdateServiceResponse(t *testing.T) { + v := UpdateServiceResponse{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUpdateServiceResponse(b *testing.B) { + v := UpdateServiceResponse{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUpdateServiceResponse(b *testing.B) { + v := UpdateServiceResponse{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/worker/types/util.go b/worker/otypes/util.go similarity index 98% rename from worker/types/util.go rename to worker/otypes/util.go index a049bc07b..0108aec1e 100644 --- a/worker/types/util.go +++ b/worker/otypes/util.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package types +package otypes import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" diff --git a/xenomint/chain.go b/xenomint/chain.go new file mode 100644 index 000000000..5b24c5956 --- /dev/null +++ b/xenomint/chain.go @@ -0,0 +1,101 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" + xt "github.com/CovenantSQL/CovenantSQL/xenomint/types" +) + +const ( + inCommandBufferLength = 100000 + outCommandBufferLength = 100000 +) + +type applyRequest struct { + request *types.Request + response *types.Response +} + +type blockNode struct { + parent *blockNode + // Cached block fields + hash hash.Hash + count int32 + height int32 + // Cached block object, may be nil + block *xt.Block +} + +// Chain defines the xenomint chain structure. +type Chain struct { + state *State + // Cached fields + priv *ca.PrivateKey +} + +// NewChain returns new chain instance. +func NewChain(filename string) (c *Chain, err error) { + var ( + strg xi.Storage + state *State + priv *ca.PrivateKey + ) + // generate empty nodeId + nodeID := proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") + + // TODO(leventeliu): add multiple storage engine support. + if strg, err = xs.NewSqlite(filename); err != nil { + return + } + if state, err = NewState(nodeID, strg); err != nil { + return + } + if priv, err = kms.GetLocalPrivateKey(); err != nil { + return + } + c = &Chain{ + state: state, + priv: priv, + } + return +} + +// Query queries req from local chain state and returns the query results in resp. +func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { + var ref *QueryTracker + if ref, resp, err = c.state.Query(req); err != nil { + return + } + if err = resp.Sign(c.priv); err != nil { + return + } + ref.UpdateResp(resp) + return +} + +// Stop stops chain workers and RPC service. +func (c *Chain) Stop() (err error) { + // Close all opened resources + return c.state.Close(true) +} diff --git a/xenomint/chain_test.go b/xenomint/chain_test.go new file mode 100644 index 000000000..a20fa4fa1 --- /dev/null +++ b/xenomint/chain_test.go @@ -0,0 +1,170 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "database/sql" + "fmt" + "math/rand" + "os" + "path" + "testing" + + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/types" +) + +func setupBenchmarkChain(b *testing.B) (c *Chain, n int, r []*types.Request) { + // Setup chain state + var ( + fl = path.Join(testingDataDir, b.Name()) + err error + stmt *sql.Stmt + ) + if c, err = NewChain(fmt.Sprint("file:", fl)); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if _, err = c.state.strg.Writer().Exec( + `CREATE TABLE "bench" ("k" INT, "v1" TEXT, "v2" TEXT, "v3" TEXT, PRIMARY KEY("k"))`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if stmt, err = c.state.strg.Writer().Prepare( + `INSERT INTO "bench" VALUES (?, ?, ?, ?)`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + for i := 0; i < benchmarkKeySpace; i++ { + var ( + vals [benchmarkVNum][benchmarkVLen]byte + args [benchmarkVNum + 1]interface{} + ) + args[0] = i + for i := range vals { + rand.Read(vals[i][:]) + args[i+1] = string(vals[i][:]) + } + if _, err = stmt.Exec(args[:]...); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + } + n = benchmarkKeySpace + // Setup query requests + var ( + sel = `SELECT "v1", "v2", "v3" FROM "bench" WHERE "k"=?` + ins = `INSERT INTO "bench" VALUES (?, ?, ?, ?) + ON CONFLICT("k") DO UPDATE SET + "v1"="excluded"."v1", + "v2"="excluded"."v2", + "v3"="excluded"."v3" +` + priv *ca.PrivateKey + src = make([][]interface{}, benchmarkKeySpace) + ) + if priv, err = kms.GetLocalPrivateKey(); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + r = make([]*types.Request, 2*benchmarkKeySpace) + // Read query key space [0, n-1] + for i := 0; i < benchmarkKeySpace; i++ { + r[i] = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(sel, i), + }) + if err = r[i].Sign(priv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + } + // Write query key space [n, 2n-1] + for i := range src { + var vals [benchmarkVNum][benchmarkVLen]byte + src[i] = make([]interface{}, benchmarkVNum+1) + src[i][0] = i + benchmarkKeySpace + for j := range vals { + rand.Read(vals[j][:]) + src[i][j+1] = string(vals[j][:]) + } + } + for i := 0; i < benchmarkKeySpace; i++ { + r[benchmarkKeySpace+i] = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(ins, src[i]...), + }) + if err = r[i+benchmarkKeySpace].Sign(priv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + } + + b.ResetTimer() + return +} + +func teardownBenchmarkChain(b *testing.B, c *Chain) { + b.StopTimer() + + var ( + fl = path.Join(testingDataDir, b.Name()) + err error + ) + if err = c.Stop(); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fl); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-shm")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-wal")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } +} + +func BenchmarkChainParallelWrite(b *testing.B) { + var c, n, r = setupBenchmarkChain(b) + b.RunParallel(func(pb *testing.PB) { + var err error + for i := 0; pb.Next(); i++ { + if _, err = c.Query(r[n+rand.Intn(n)]); err != nil { + b.Fatalf("Failed to execute: %v", err) + } + if (i+1)%benchmarkQueriesPerBlock == 0 { + if err = c.state.commit(); err != nil { + b.Fatalf("Failed to commit block: %v", err) + } + } + } + }) + teardownBenchmarkChain(b, c) +} + +func BenchmarkChainParallelMixRW(b *testing.B) { + var c, n, r = setupBenchmarkChain(b) + b.RunParallel(func(pb *testing.PB) { + var err error + for i := 0; pb.Next(); i++ { + if _, err = c.Query(r[rand.Intn(2*n)]); err != nil { + b.Fatalf("Failed to execute: %v", err) + } + if (i+1)%benchmarkQueriesPerBlock == 0 { + if err = c.state.commit(); err != nil { + b.Fatalf("Failed to commit block: %v", err) + } + } + } + }) + teardownBenchmarkChain(b, c) +} diff --git a/xenomint/doc.go b/xenomint/doc.go new file mode 100644 index 000000000..3ee1ded09 --- /dev/null +++ b/xenomint/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package xenomint provides an eventual consistency implementation of the blockchain database. +package xenomint diff --git a/xenomint/errors.go b/xenomint/errors.go new file mode 100644 index 000000000..725b140cb --- /dev/null +++ b/xenomint/errors.go @@ -0,0 +1,38 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "errors" +) + +var ( + // ErrMissingParent indicates the parent of the current query attempt is missing. + ErrMissingParent = errors.New("query missing parent") + // ErrInvalidRequest indicates the query is invalid. + ErrInvalidRequest = errors.New("invalid request") + // ErrQueryExists indicates the query already exists in pool. + ErrQueryExists = errors.New("query already exists") + // ErrStateClosed indicates the state is closed. + ErrStateClosed = errors.New("state is closed") + // ErrQueryConflict indicates the there is a conflict on query replay. + ErrQueryConflict = errors.New("query conflict") + // ErrLocalBehindRemote indicates the local state is behind the remote. + ErrLocalBehindRemote = errors.New("local state is behind the remote") + // ErrMuxServiceNotFound indicates that the multiplexing service endpoint is not found. + ErrMuxServiceNotFound = errors.New("mux service not found") +) diff --git a/chain/interfaces/doc.go b/xenomint/interfaces/doc.go similarity index 88% rename from chain/interfaces/doc.go rename to xenomint/interfaces/doc.go index 222335fed..c873b128c 100644 --- a/chain/interfaces/doc.go +++ b/xenomint/interfaces/doc.go @@ -14,5 +14,5 @@ * limitations under the License. */ -// Package interfaces defines commonly used interfaces for block chain. +// Package interfaces defines common used interfaces of the xenomint package. package interfaces diff --git a/xenomint/interfaces/interfaces.go b/xenomint/interfaces/interfaces.go new file mode 100644 index 000000000..17427cde9 --- /dev/null +++ b/xenomint/interfaces/interfaces.go @@ -0,0 +1,30 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package interfaces + +import ( + "database/sql" +) + +// Storage is the interface implemented by an object that returns standard *sql.DB as DirtyReader, +// Reader, or Writer and can be closed by Close. +type Storage interface { + DirtyReader() *sql.DB + Reader() *sql.DB + Writer() *sql.DB + Close() error +} diff --git a/xenomint/mux.go b/xenomint/mux.go new file mode 100644 index 000000000..b1bd308ee --- /dev/null +++ b/xenomint/mux.go @@ -0,0 +1,124 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + //"context" + //"runtime/trace" + "sync" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" +) + +// MuxService defines multiplexing service of xenomint chain. +type MuxService struct { + ServiceName string + // serviceMap maps DatabaseID to *Chain. + serviceMap sync.Map +} + +// NewMuxService returns a new MuxService instance and registers it to server. +func NewMuxService(name string, server *rpc.Server) (service *MuxService, err error) { + var s = &MuxService{ + ServiceName: name, + } + if err = server.RegisterService(name, s); err != nil { + return + } + service = s + return +} + +func (s *MuxService) register(id proto.DatabaseID, c *Chain) { + s.serviceMap.Store(id, c) +} + +func (s *MuxService) unregister(id proto.DatabaseID) { + s.serviceMap.Delete(id) +} + +func (s *MuxService) route(id proto.DatabaseID) (c *Chain, err error) { + var ( + i interface{} + ok bool + ) + if i, ok = s.serviceMap.Load(id); !ok { + err = ErrMuxServiceNotFound + return + } + if c, ok = i.(*Chain); !ok { + err = ErrMuxServiceNotFound + return + } + return +} + +// MuxQueryRequest defines a request of the Query RPC method. +type MuxQueryRequest struct { + proto.DatabaseID + proto.Envelope + Request *types.Request +} + +// MuxQueryResponse defines a response of the Query RPC method. +type MuxQueryResponse struct { + proto.DatabaseID + proto.Envelope + Response *types.Response +} + +// Query is the RPC method to process database query on mux service. +func (s *MuxService) Query(req *MuxQueryRequest, resp *MuxQueryResponse) (err error) { + //var ctx, task = trace.NewTask(context.Background(), "MuxService.Query") + //defer task.End() + //defer trace.StartRegion(ctx, "Total").End() + var ( + c *Chain + r *types.Response + ) + if c, err = s.route(req.DatabaseID); err != nil { + return + } + if r, err = c.Query(req.Request); err != nil { + return + } + resp = &MuxQueryResponse{ + Envelope: req.Envelope, + DatabaseID: req.DatabaseID, + Response: r, + } + return +} + +// MuxLeaderCommitRequest a request of the MuxLeaderCommitResponse RPC method. +type MuxLeaderCommitRequest struct { + proto.DatabaseID + proto.Envelope + // Height is the expected block height of this commit. + Height int32 +} + +// MuxLeaderCommitResponse a response of the MuxLeaderCommitResponse RPC method. +type MuxLeaderCommitResponse struct { + proto.DatabaseID + proto.Envelope + // Height is the expected block height of this commit. + Height int32 + Offset uint64 +} diff --git a/xenomint/mux_test.go b/xenomint/mux_test.go new file mode 100644 index 000000000..e4385ac5a --- /dev/null +++ b/xenomint/mux_test.go @@ -0,0 +1,289 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "database/sql" + "fmt" + "math/rand" + "os" + "path" + "strings" + "testing" + + "github.com/CovenantSQL/CovenantSQL/conf" + con "github.com/CovenantSQL/CovenantSQL/consistent" + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" +) + +type nodeRPCInfo struct { + node proto.Node + server *rpc.Server +} + +func setupBenchmarkMuxParallel(b *testing.B) ( + bp, miner *nodeRPCInfo, ms *MuxService, r []*MuxQueryRequest, +) { + var ( + priv *ca.PrivateKey + nis []proto.Node + dht *route.DHTService + bpSv, mnSv *rpc.Server + err error + ) + // Use testing private key to create several nodes + if priv, err = kms.GetLocalPrivateKey(); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if nis, err = createNodesWithPublicKey(priv.PubKey(), testingNonceDifficulty, 3); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } else if l := len(nis); l != 3 { + b.Fatalf("Failed to setup bench environment: unexpected length %d", l) + } + // Setup block producer RPC and register server address + bpSv = rpc.NewServer() + if err = bpSv.InitRPCServer( + "localhost:0", testingPrivateKeyFile, testingMasterKey, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + nis[0].Addr = bpSv.Listener.Addr().String() + nis[0].Role = proto.Leader + // Setup miner RPC and register server address + mnSv = rpc.NewServer() + if err = mnSv.InitRPCServer( + "localhost:0", testingPrivateKeyFile, testingMasterKey, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + nis[1].Addr = mnSv.Listener.Addr().String() + nis[1].Role = proto.Miner + // Setup client + nis[2].Role = proto.Client + // Setup global config + conf.GConf = &conf.Config{ + IsTestMode: true, + GenerateKeyPair: false, + MinNodeIDDifficulty: testingNonceDifficulty, + BP: &conf.BPInfo{ + PublicKey: priv.PubKey(), + NodeID: nis[0].ID, + Nonce: nis[0].Nonce, + }, + KnownNodes: nis, + } + // Register DHT service, this will also initialize the public key store + if dht, err = route.NewDHTService( + testingPublicKeyStoreFile, &con.KMSStorage{}, true, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } else if err = bpSv.RegisterService(route.DHTRPCName, dht); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + kms.SetLocalNodeIDNonce(nis[2].ID.ToRawNodeID().CloneBytes(), &nis[2].Nonce) + for i := range nis { + route.SetNodeAddrCache(nis[i].ID.ToRawNodeID(), nis[i].Addr) + kms.SetNode(&nis[i]) + } + // Register mux service + if ms, err = NewMuxService(benchmarkRPCName, mnSv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + + // Setup query requests + var ( + sel = `SELECT "v1", "v2", "v3" FROM "bench" WHERE "k"=?` + ins = `INSERT INTO "bench" VALUES (?, ?, ?, ?) + ON CONFLICT("k") DO UPDATE SET + "v1"="excluded"."v1", + "v2"="excluded"."v2", + "v3"="excluded"."v3" +` + src = make([][]interface{}, benchmarkKeySpace) + ) + r = make([]*MuxQueryRequest, 2*benchmarkKeySpace) + // Read query key space [0, n-1] + for i := 0; i < benchmarkKeySpace; i++ { + var req = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(sel, i), + }) + if err = req.Sign(priv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + r[i] = &MuxQueryRequest{ + DatabaseID: benchmarkDatabaseID, + Request: req, + } + } + // Write query key space [n, 2n-1] + for i := range src { + var vals [benchmarkVNum][benchmarkVLen]byte + src[i] = make([]interface{}, benchmarkVNum+1) + src[i][0] = i + benchmarkKeySpace + for j := range vals { + rand.Read(vals[j][:]) + src[i][j+1] = string(vals[j][:]) + } + } + for i := 0; i < benchmarkKeySpace; i++ { + var req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(ins, src[i]...), + }) + if err = req.Sign(priv); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + r[benchmarkKeySpace+i] = &MuxQueryRequest{ + DatabaseID: benchmarkDatabaseID, + Request: req, + } + } + + bp = &nodeRPCInfo{ + node: nis[0], + server: bpSv, + } + miner = &nodeRPCInfo{ + node: nis[1], + server: mnSv, + } + + go bpSv.Serve() + go mnSv.Serve() + //ca.BypassSignature = true + return +} + +func teardownBenchmarkMuxParallel(b *testing.B, bpSv, mnSv *rpc.Server) { + //ca.BypassSignature = false + mnSv.Stop() + bpSv.Stop() +} + +func setupSubBenchmarkMuxParallel(b *testing.B, ms *MuxService) (c *Chain) { + // Setup chain state + var ( + fl = path.Join(testingDataDir, strings.Replace(b.Name(), "/", "-", -1)) + err error + stmt *sql.Stmt + ) + if c, err = NewChain(fmt.Sprint("file:", fl)); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if _, err = c.state.strg.Writer().Exec( + `CREATE TABLE "bench" ("k" INT, "v1" TEXT, "v2" TEXT, "v3" TEXT, PRIMARY KEY("k"))`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if stmt, err = c.state.strg.Writer().Prepare( + `INSERT INTO "bench" VALUES (?, ?, ?, ?)`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + for i := 0; i < benchmarkKeySpace; i++ { + var ( + vals [benchmarkVNum][benchmarkVLen]byte + args [benchmarkVNum + 1]interface{} + ) + args[0] = i + for i := range vals { + rand.Read(vals[i][:]) + args[i+1] = string(vals[i][:]) + } + if _, err = stmt.Exec(args[:]...); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + } + ms.register(benchmarkDatabaseID, c) + + b.ResetTimer() + return +} + +func teardownSubBenchmarkMuxParallel(b *testing.B, ms *MuxService) { + b.StopTimer() + + var ( + fl = path.Join(testingDataDir, strings.Replace(b.Name(), "/", "-", -1)) + err error + c *Chain + ) + // Stop RPC server + if c, err = ms.route(benchmarkDatabaseID); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + ms.unregister(benchmarkDatabaseID) + // Close chain + if err = c.Stop(); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fl); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-shm")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-wal")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } +} + +func BenchmarkMuxParallel(b *testing.B) { + var bp, s, ms, r = setupBenchmarkMuxParallel(b) + defer teardownBenchmarkMuxParallel(b, bp.server, s.server) + var benchmarks = []struct { + name string + randkey func(n int) int // Returns a random key from given key space + }{ + { + name: "Write", + randkey: func(n int) int { return n + rand.Intn(n) }, + }, { + name: "MixRW", + randkey: func(n int) int { return rand.Intn(2 * n) }, + }, + } + for _, bm := range benchmarks { + b.Run(bm.name, func(b *testing.B) { + var c = setupSubBenchmarkMuxParallel(b, ms) + defer teardownSubBenchmarkMuxParallel(b, ms) + b.RunParallel(func(pb *testing.PB) { + var ( + err error + method = fmt.Sprintf("%s.%s", benchmarkRPCName, "Query") + caller = rpc.NewPersistentCaller(s.node.ID) + ) + for i := 0; pb.Next(); i++ { + if err = caller.Call( + method, &r[bm.randkey(benchmarkKeySpace)], &MuxQueryResponse{}, + ); err != nil { + b.Fatalf("Failed to execute: %v", err) + } + if (i+1)%benchmarkQueriesPerBlock == 0 { + if err = c.state.commit(); err != nil { + b.Fatalf("Failed to commit block: %v", err) + } + } + } + }) + }) + } +} diff --git a/xenomint/pool.go b/xenomint/pool.go new file mode 100644 index 000000000..38a1f6511 --- /dev/null +++ b/xenomint/pool.go @@ -0,0 +1,141 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "sync" + "sync/atomic" + + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/types" +) + +// QueryTracker defines an object to track query as a request - response pair. +type QueryTracker struct { + sync.RWMutex + Req *types.Request + Resp *types.Response +} + +// UpdateResp updates response of the QueryTracker within locking scope. +func (q *QueryTracker) UpdateResp(resp *types.Response) { + q.Lock() + defer q.Unlock() + q.Resp = resp +} + +// Ready reports whether the query is ready for block producing. It is assumed that all objects +// should be ready shortly. +func (q *QueryTracker) Ready() bool { + q.RLock() + defer q.RUnlock() + return q.Resp != nil +} + +type pool struct { + // Failed queries: hash => Request + failed map[hash.Hash]*types.Request + // Succeeded queries and their index + queries []*QueryTracker + index map[uint64]int + // Atomic counters for stats + failedRequestCount int32 + trackerCount int32 +} + +func newPool() *pool { + return &pool{ + failed: make(map[hash.Hash]*types.Request), + queries: make([]*QueryTracker, 0), + index: make(map[uint64]int), + } +} + +func (p *pool) enqueue(sp uint64, q *QueryTracker) { + var pos = len(p.queries) + p.queries = append(p.queries, q) + p.index[sp] = pos + atomic.StoreInt32(&p.trackerCount, int32(len(p.queries))) + return +} + +func (p *pool) setFailed(req *types.Request) { + p.failed[req.Header.Hash()] = req + atomic.StoreInt32(&p.failedRequestCount, int32(len(p.failed))) +} + +func (p *pool) failedList() (reqs []*types.Request) { + reqs = make([]*types.Request, 0, len(p.failed)) + for _, v := range p.failed { + reqs = append(reqs, v) + } + return +} + +func (p *pool) removeFailed(req *types.Request) { + delete(p.failed, req.Header.Hash()) + atomic.StoreInt32(&p.failedRequestCount, int32(len(p.failed))) +} + +func (p *pool) match(sp uint64, req *types.Request) bool { + var ( + pos int + ok bool + ) + if pos, ok = p.index[sp]; !ok { + return false + } + if p.queries[pos].Req.Header.Hash() != req.Header.Hash() { + return false + } + return true +} + +func (p *pool) matchLast(sp uint64) bool { + var ( + pos int + ok bool + ) + if pos, ok = p.index[sp]; !ok { + return false + } + if pos != len(p.queries)-1 { + return false + } + return true +} + +func (p *pool) truncate(sp uint64) { + var ( + pos int + ok bool + ni map[uint64]int + ) + if pos, ok = p.index[sp]; !ok { + return + } + // Rebuild index + ni = make(map[uint64]int) + for k, v := range p.index { + if k > sp { + ni[k] = v - (pos + 1) + } + } + p.index = ni + p.queries = p.queries[pos+1:] + atomic.StoreInt32(&p.trackerCount, int32(len(p.queries))) +} diff --git a/xenomint/pool_test.go b/xenomint/pool_test.go new file mode 100644 index 000000000..f68ede27a --- /dev/null +++ b/xenomint/pool_test.go @@ -0,0 +1,17 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint diff --git a/xenomint/sqlite/doc.go b/xenomint/sqlite/doc.go new file mode 100644 index 000000000..bd513d369 --- /dev/null +++ b/xenomint/sqlite/doc.go @@ -0,0 +1,18 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package sqlite introduces a sqlite3 implementation of the xenomint/interfaces.Storage interface. +package sqlite diff --git a/xenomint/sqlite/sqlite.go b/xenomint/sqlite/sqlite.go new file mode 100644 index 000000000..0ea8bf846 --- /dev/null +++ b/xenomint/sqlite/sqlite.go @@ -0,0 +1,120 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlite + +import ( + "database/sql" + + "github.com/CovenantSQL/CovenantSQL/storage" + "github.com/CovenantSQL/go-sqlite3-encrypt" +) + +const ( + serializableDriver = "sqlite3" + dirtyReadDriver = "sqlite3-dirty-reader" +) + +func init() { + sql.Register(dirtyReadDriver, &sqlite3.SQLiteDriver{ + ConnectHook: func(c *sqlite3.SQLiteConn) (err error) { + if _, err = c.Exec("PRAGMA read_uncommitted=1", nil); err != nil { + return + } + return + }, + }) +} + +// SQLite3 is the sqlite3 implementation of the xenomint/interfaces.Storage interface. +type SQLite3 struct { + filename string + dirtyReader *sql.DB + reader *sql.DB + writer *sql.DB +} + +// NewSqlite returns a new SQLite3 instance attached to filename. +func NewSqlite(filename string) (s *SQLite3, err error) { + var ( + instance = &SQLite3{filename: filename} + shmRODSN string + privRODSN string + shmRWDSN string + dsn *storage.DSN + ) + + if dsn, err = storage.NewDSN(filename); err != nil { + return + } + + dsnRO := dsn.Clone() + dsnRO.AddParam("_journal_mode", "WAL") + dsnRO.AddParam("_query_only", "on") + dsnRO.AddParam("cache", "shared") + shmRODSN = dsnRO.Format() + + dsnPrivRO := dsn.Clone() + dsnPrivRO.AddParam("_journal_mode", "WAL") + dsnPrivRO.AddParam("_query_only", "on") + privRODSN = dsnPrivRO.Format() + + dsnSHMRW := dsn.Clone() + dsnSHMRW.AddParam("_journal_mode", "WAL") + dsnSHMRW.AddParam("cache", "shared") + shmRWDSN = dsnSHMRW.Format() + + if instance.dirtyReader, err = sql.Open(dirtyReadDriver, shmRODSN); err != nil { + return + } + if instance.reader, err = sql.Open(serializableDriver, privRODSN); err != nil { + return + } + if instance.writer, err = sql.Open(serializableDriver, shmRWDSN); err != nil { + return + } + s = instance + return +} + +// DirtyReader implements DirtyReader method of the xenomint/interfaces.Storage interface. +func (s *SQLite3) DirtyReader() *sql.DB { + return s.dirtyReader +} + +// Reader implements Reader method of the xenomint/interfaces.Storage interface. +func (s *SQLite3) Reader() *sql.DB { + return s.reader +} + +// Writer implements Writer method of the xenomint/interfaces.Storage interface. +func (s *SQLite3) Writer() *sql.DB { + return s.writer +} + +// Close implements Close method of the xenomint/interfaces.Storage interface. +func (s *SQLite3) Close() (err error) { + if err = s.dirtyReader.Close(); err != nil { + return + } + if err = s.reader.Close(); err != nil { + return + } + if err = s.writer.Close(); err != nil { + return + } + return +} diff --git a/xenomint/sqlite/sqlite_test.go b/xenomint/sqlite/sqlite_test.go new file mode 100644 index 000000000..6288b8f51 --- /dev/null +++ b/xenomint/sqlite/sqlite_test.go @@ -0,0 +1,1072 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlite + +import ( + "database/sql" + "fmt" + "math/rand" + "os" + "path" + "sync" + "sync/atomic" + "testing" + "time" + + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + . "github.com/smartystreets/goconvey/convey" +) + +func TestStorage(t *testing.T) { + Convey("Given a sqlite storage implementation", t, func() { + const passes = 1000 + var ( + fl = path.Join(testingDataDir, t.Name()) + st xi.Storage + err error + ) + st, err = NewSqlite(fmt.Sprint("file:", fl)) + So(err, ShouldBeNil) + So(st, ShouldNotBeNil) + Reset(func() { + // Clean database file after each pass + err = st.Close() + So(err, ShouldBeNil) + err = os.Remove(fl) + So(err, ShouldBeNil) + err = os.Remove(fmt.Sprint(fl, "-shm")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + err = os.Remove(fmt.Sprint(fl, "-wal")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + }) + Convey("When a basic KV table is created", func(c C) { + // Create basic table for testing + _, err = st.Writer().Exec(`CREATE TABLE "t1" ("k" INT, "v" TEXT, PRIMARY KEY("k"))`) + So(err, ShouldBeNil) + Convey("When storage is closed", func() { + err = st.Close() + So(err, ShouldBeNil) + Convey("The storage should report error for any incoming query", func() { + err = st.DirtyReader().QueryRow(`SELECT "v" FROM "t1" WHERE "k"=?`, 1).Scan(nil) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "sql: database is closed") + err = st.Reader().QueryRow(`SELECT "v" FROM "t1" WHERE "k"=?`, 1).Scan(nil) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "sql: database is closed") + _, err = st.Writer().Exec(`INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, 1, "v1") + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "sql: database is closed") + }) + }) + Convey("The storage should report error when readers attempt to write", func() { + _, err = st.DirtyReader().Exec(`INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, 1, "v1") + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "attempt to write a readonly database") + _, err = st.Reader().Exec(`INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, 1, "v1") + So(err, ShouldNotBeNil) + So(err.Error(), ShouldEqual, "attempt to write a readonly database") + }) + Convey("The storage should work properly under concurrent reading/writing", func(c C) { + var ( + ec = make(chan error, passes) + sc = make(chan struct{}) + wg = &sync.WaitGroup{} + + abortReaders = func() { close(sc) } + ) + for i := 0; i < passes; i++ { + wg.Add(1) + go func(k int) { + var ticker = time.NewTicker(1 * time.Millisecond) + defer func() { + ticker.Stop() + wg.Done() + }() + for { + select { + case <-ticker.C: + var ( + err error + v string + ) + if err = st.Reader().QueryRow( + `SELECT "v" FROM "t1" WHERE "k"=?`, k, + ).Scan(&v); err != sql.ErrNoRows { + if err != nil { + ec <- err + } else { + c.Printf("\n Read pair from t1: k=%d v=%s ", k, v) + } + return + } + case <-sc: + return + } + } + }(i) + } + defer func() { + wg.Wait() + close(ec) + var errs = len(ec) + for err = range ec { + Printf("\n Get error from channel: %v ", err) + } + So(errs, ShouldBeZeroValue) + }() + for i := 0; i < passes; i++ { + if _, err = st.Writer().Exec( + `INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, i, fmt.Sprintf("v%d", i), + ); err != nil { + abortReaders() + } + So(err, ShouldBeNil) + c.Printf("\n Write pair to t1: k=%d v=v%d ", i, i) + } + }) + Convey("The storage should see uncommitted changes from dirty reader", func(c C) { + var ( + tx *sql.Tx + ec = make(chan error, passes) + sc = make(chan struct{}) + wg = &sync.WaitGroup{} + + abortReaders = func() { close(sc) } + ) + // Open transaction + tx, err = st.Writer().Begin() + So(err, ShouldBeNil) + So(tx, ShouldNotBeNil) + for i := 0; i < passes; i++ { + wg.Add(1) + go func(k int) { + var ticker = time.NewTicker(1 * time.Millisecond) + defer func() { + ticker.Stop() + wg.Done() + }() + for { + select { + case <-ticker.C: + var ( + err error + v string + ) + if err = st.DirtyReader().QueryRow( + `SELECT "v" FROM "t1" WHERE "k"=?`, k, + ).Scan(&v); err != sql.ErrNoRows { + if err != nil { + ec <- err + } else { + c.Printf("\n Dirty read pair from t1: k=%d v=%s ", + k, v) + } + return + } + case <-sc: + return + } + } + }(i) + } + defer func() { + wg.Wait() + close(ec) + var errs = len(ec) + for err = range ec { + Printf("\n Get error from channel: %v ", err) + } + So(errs, ShouldBeZeroValue) + err = tx.Commit() + So(err, ShouldBeNil) + }() + for i := 0; i < passes; i++ { + var ( + v = fmt.Sprintf("v%d", i) + rv string + ) + if _, err = tx.Exec( + `INSERT INTO "t1" ("k", "v") VALUES (?, ?)`, i, v, + ); err != nil { + abortReaders() + } + So(err, ShouldBeNil) + // No isolation between operations on the same database connection + if err = tx.QueryRow( + `SELECT "v" FROM "t1" WHERE "k"=?`, i, + ).Scan(&rv); err != nil || rv != v { + abortReaders() + } + So(err, ShouldBeNil) + So(rv, ShouldEqual, v) + c.Printf("\n Write pair to t1 in transaction: k=%d v=%s ", i, v) + } + // Reader connection should not see any uncommitted change + for i := 0; i < passes; i++ { + err = st.Reader().QueryRow(`SELECT "v" FROM "t1" WHERE "k"=?`, i).Scan(nil) + So(err, ShouldEqual, sql.ErrNoRows) + } + }) + }) + }) +} + +const ( + benchmarkQueriesPerTx = 100 + benchmarkVNum = 3 + benchmarkVLen = 333 + benchmarkKeySubspaceLength = 1000000 + + benchmarkReservedKeyOffset = iota * benchmarkKeySubspaceLength + benchmarkNewKeyOffset + benchmarkKeySpace +) + +type keygen interface { + next() int + reset() +} + +type randKeygen struct { + offset int + length int +} + +func newRandKeygen(offset, length int) *randKeygen { + return &randKeygen{ + offset: offset, + length: length, + } +} + +func newIndexRandKeygen(length int) *randKeygen { return newRandKeygen(0, length) } + +func (k *randKeygen) next() int { return rand.Intn(k.length) + k.offset } +func (k *randKeygen) reset() {} + +type permKeygen struct { + offset int + length int + perm []int + pos int32 +} + +func newPermKeygen(offset, length int) *permKeygen { + return &permKeygen{ + offset: offset, + length: length, + perm: rand.Perm(length), + } +} + +func newIndexPermKeygen(length int) *permKeygen { return newPermKeygen(0, length) } + +func (k *permKeygen) next() int { + var pos = atomic.AddInt32(&k.pos, 1) - 1 + if pos >= int32(k.length) { + panic("permKeygen: keys have been exhausted") + } + return k.perm[pos] + k.offset +} + +func (k *permKeygen) reset() { k.pos = 0 } + +var ( + irkg = newIndexRandKeygen(benchmarkKeySubspaceLength) + ipkg = newIndexPermKeygen(benchmarkKeySubspaceLength) + rrkg = newRandKeygen(benchmarkReservedKeyOffset, benchmarkKeySubspaceLength) + nrkg = newRandKeygen(benchmarkNewKeyOffset, benchmarkKeySubspaceLength) + trkg = newRandKeygen(0, benchmarkKeySpace) +) + +func setupBenchmarkStorage(b *testing.B) ( + st xi.Storage, + q string, makeDest func() []interface{}, + e string, src [][]interface{}, +) { + // Setup storage + var ( + fl = path.Join(testingDataDir, b.Name()) + err error + stmt *sql.Stmt + ) + if st, err = NewSqlite(fmt.Sprint("file:", fl)); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if _, err = st.Writer().Exec( + `CREATE TABLE "t2" ("k" INT, "v1" TEXT, "v2" TEXT, "v3" TEXT, PRIMARY KEY("k"))`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if stmt, err = st.Writer().Prepare( + `INSERT INTO "t2" VALUES (?, ?, ?, ?)`, + ); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + for i := 0; i < benchmarkKeySubspaceLength; i++ { + var ( + vals [benchmarkVNum][benchmarkVLen]byte + args [benchmarkVNum + 1]interface{} + ) + args[0] = benchmarkReservedKeyOffset + i + for i := range vals { + rand.Read(vals[i][:]) + args[i+1] = string(vals[i][:]) + } + if _, err = stmt.Exec(args[:]...); err != nil { + b.Fatalf("Failed to setup bench environment: %v", err) + } + if i%10000 == 0 { + fmt.Printf("Done setup key at %v\n", i) + } + } + // Setup query string and dest slice + q = `SELECT "v1", "v2", "v3" FROM "t2" WHERE "k"=?` + makeDest = func() (dest []interface{}) { + var outv [benchmarkVNum]string + dest = make([]interface{}, benchmarkVNum) + for i := range outv { + dest[i] = &outv[i] + } + return + } + // Setup execute string and src table + // + // NOTE(leventeliu): allowing IGNORE and REPLACE both have impact on benchmark result, + // while UPSERT is the best! + // + // e = `INSERT OR IGNORE INTO "t2" VALUES (?, ?, ?, ?)` + // e = `REPLACE INTO "t2" VALUES (?, ?, ?, ?)` + e = `INSERT INTO "t2" VALUES (?, ?, ?, ?) + ON CONFLICT("k") DO UPDATE SET + "v1"="excluded"."v1", + "v2"="excluded"."v2", + "v3"="excluded"."v3" +` + src = make([][]interface{}, benchmarkKeySubspaceLength) + for i := range src { + var vals [benchmarkVNum][benchmarkVLen]byte + src[i] = make([]interface{}, benchmarkVNum+1) + src[i][0] = benchmarkNewKeyOffset + i + for j := range vals { + rand.Read(vals[j][:]) + src[i][j+1] = string(vals[j][:]) + } + } + + return +} + +func teardownBenchmarkStorage(b *testing.B, st xi.Storage) { + var ( + fl = path.Join(testingDataDir, b.Name()) + err error + ) + if err = st.Close(); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fl); err != nil { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-shm")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } + if err = os.Remove(fmt.Sprint(fl, "-wal")); err != nil && !os.IsNotExist(err) { + b.Fatalf("Failed to teardown bench environment: %v", err) + } +} + +func setupSubBenchmarkStorage(b *testing.B, st xi.Storage) { + // Reset key generators + irkg.reset() + ipkg.reset() + rrkg.reset() + nrkg.reset() + trkg.reset() +} + +func teardownSubBenchmarkStorage(b *testing.B, st xi.Storage) { + var ( + d = `DELETE FROM "t2" WHERE "k">=?` + err error + ) + if _, err = st.Writer().Exec(d, benchmarkNewKeyOffset); err != nil { + b.Fatalf("Failed to teardown sub bench environment: %v", err) + } +} + +type benchmarkProfile struct { + name string + parall bool + proc func(*testing.B, int) + pproc func(*testing.PB) + bg func(*testing.B, *sync.WaitGroup, <-chan struct{}) +} + +func BenchmarkStorage(b *testing.B) { + var ( + st, q, dm, e, src = setupBenchmarkStorage(b) + + tx *sql.Tx + dest = dm() + read = func(b *testing.B, conn *sql.DB, dest []interface{}) { + var err error + if err = conn.QueryRow(q, rrkg.next()).Scan(dest...); err != nil { + b.Fatalf("Failed to query values: %v", err) + } + } + readTx = func(b *testing.B, i int, conn *sql.DB, dest []interface{}) { + var err error + if i%benchmarkQueriesPerTx == 0 { + if tx, err = conn.Begin(); err != nil { + b.Fatalf("Failed to begin transaction: %v", err) + } + } + // Query in [n, 2n-1] key space + if err = tx.QueryRow(q, nrkg.next()).Scan(dest...); err != nil && err != sql.ErrNoRows { + b.Fatalf("Failed to query values: %v", err) + } + if (i+1)%benchmarkQueriesPerTx == 0 || i == b.N-1 { + if err = tx.Rollback(); err != nil { + b.Fatalf("Failed to close transaction: %v", err) + } + } + } + write = func(b *testing.B, conn *sql.DB) { + var err error + if _, err = conn.Exec(e, src[ipkg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + } + writeTx = func(b *testing.B, i int, conn *sql.DB) { + var err error + if i%benchmarkQueriesPerTx == 0 { + if tx, err = st.Writer().Begin(); err != nil { + b.Errorf("Failed to begin transaction: %v", err) + } + } + if _, err = tx.Exec(e, src[ipkg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + if (i+1)%benchmarkQueriesPerTx == 0 || i == b.N-1 { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + } + } + mixRW = func(b *testing.B, rconn, wconn *sql.DB, dest []interface{}) { + if rand.Int()%2 == 0 { + read(b, rconn, dest) + } else { + write(b, wconn) + } + } + + bgw = func(b *testing.B, wg *sync.WaitGroup, sc <-chan struct{}) { + busyWrite(b, wg, sc, st, ipkg, e, src) + } + bgbwtx = func(b *testing.B, wg *sync.WaitGroup, sc <-chan struct{}) { + busyWriteTx(b, wg, sc, st, ipkg, e, src) + } + bgiwtx = func(b *testing.B, wg *sync.WaitGroup, sc <-chan struct{}) { + idleWriteTx(b, wg, sc, st, ipkg, e, src) + } + + pproc = func(pb *testing.PB, proc func()) { + for pb.Next() { + proc() + } + } + + profiles = [...]benchmarkProfile{ + { + name: "SequentialDirtyRead", + proc: func(b *testing.B, _ int) { read(b, st.DirtyReader(), dest) }, + }, { + name: "SequentialRead", + proc: func(b *testing.B, _ int) { read(b, st.Reader(), dest) }, + }, { + name: "SequentialWrite", + proc: func(b *testing.B, _ int) { write(b, st.Writer()) }, + }, { + name: "SequentialWriteTx", + proc: func(b *testing.B, i int) { writeTx(b, i, st.Writer()) }, + }, { + name: "SequentialMixDRW", + proc: func(b *testing.B, _ int) { mixRW(b, st.DirtyReader(), st.Writer(), dest) }, + }, { + name: "SequentialMixRW", + proc: func(b *testing.B, _ int) { mixRW(b, st.Reader(), st.Writer(), dest) }, + }, { + name: "SequentialDirtyReadWithBackgroundWriter", + proc: func(b *testing.B, _ int) { read(b, st.DirtyReader(), dest) }, + bg: bgw, + }, { + name: "SequentialReadWithBackgroundWriter", + proc: func(b *testing.B, _ int) { read(b, st.Reader(), dest) }, + bg: bgw, + }, { + name: "SequentialDirtyReadWithBackgroundBusyTxWriter", + proc: func(b *testing.B, _ int) { read(b, st.DirtyReader(), dest) }, + bg: bgbwtx, + }, { + name: "SequentialReadWithBackgroundBusyTxWriter", + proc: func(b *testing.B, _ int) { read(b, st.Reader(), dest) }, + bg: bgbwtx, + }, { + name: "SequentialDirtyReadWithBackgroundIdleTxWriter", + proc: func(b *testing.B, _ int) { read(b, st.DirtyReader(), dest) }, + bg: bgiwtx, + }, { + name: "SequentialReadWithBackgroundIdleTxWriter", + proc: func(b *testing.B, _ int) { read(b, st.Reader(), dest) }, + bg: bgiwtx, + }, { + name: "SequentialDirtyReadTxWithBackgroundWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.DirtyReader(), dest) }, + bg: bgw, + }, { + name: "SequentialReadTxWithBackgroundWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.Reader(), dest) }, + bg: bgw, + }, { + name: "SequentialDirtyReadTxWithBackgroundBusyTxWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.DirtyReader(), dest) }, + bg: bgbwtx, + }, { + name: "SequentialReadTxWithBackgroundBusyTxWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.Reader(), dest) }, + bg: bgbwtx, + }, { + name: "SequentialDirtyReadTxWithBackgroundIdleTxWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.DirtyReader(), dest) }, + bg: bgiwtx, + }, { + name: "SequentialReadTxWithBackgroundIdleTxWriter", + proc: func(b *testing.B, i int) { readTx(b, i, st.Reader(), dest) }, + bg: bgiwtx, + }, { + name: "ParallelDirtyRead", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { read(b, st.DirtyReader(), dm()) }) + }, + }, { + name: "ParallelRead", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { read(b, st.Reader(), dm()) }) + }, + }, { + name: "ParallelWrite", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { write(b, st.Writer()) }) + }, + }, { + name: "ParallelMixDRW", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { mixRW(b, st.DirtyReader(), st.Writer(), dm()) }) + }, + }, { + name: "ParallelMixRW", + parall: true, + pproc: func(pb *testing.PB) { + pproc(pb, func() { mixRW(b, st.Reader(), st.Writer(), dm()) }) + }, + }, + } + ) + defer teardownBenchmarkStorage(b, st) + // Run benchmark profiles + for _, v := range profiles { + b.Run(v.name, func(b *testing.B) { + // Setup environment for sub-benchmark + setupSubBenchmarkStorage(b, st) + defer teardownSubBenchmarkStorage(b, st) + // Start background goroutine + var ( + wg = &sync.WaitGroup{} + sc = make(chan struct{}) + ) + if v.bg != nil { + wg.Add(1) + go v.bg(b, wg, sc) + } + defer func() { + close(sc) + wg.Wait() + }() + // Test body + b.ResetTimer() + if v.parall { + // Run parallel + b.RunParallel(v.pproc) + } else { + // Run sequential + for i := 0; i < b.N; i++ { + v.proc(b, i) + } + } + b.StopTimer() + }) + } +} + +//func BenchmarkStorageSequentialDirtyRead(b *testing.B) { +// var ( +// st, q, dm, _, _ = setupBenchmarkStorage(b) +// dest = dm() +// err error +// ) +// for i := 0; i < b.N; i++ { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialRead(b *testing.B) { +// var ( +// st, q, dm, _, _ = setupBenchmarkStorage(b) +// dest = dm() +// err error +// ) +// for i := 0; i < b.N; i++ { +// if err = st.Reader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialWrite(b *testing.B) { +// var ( +// st, _, _, e, src = setupBenchmarkStorage(b) +// err error +// ) +// b.Run("BenchmarkStoargeSequentialWrite", func(b *testing.B) { +// deleteBenchmarkData(b, st) +// b.ResetTimer() +// for i := 0; i < b.N; i++ { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Errorf("Failed to execute: %v", err) +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialWriteTx(b *testing.B) { +// var ( +// st, _, _, e, src = setupBenchmarkStorage(b) +// tx *sql.Tx +// err error +// ) +// for i := 0; i < b.N; i++ { +// if i%benchmarkQueriesPerTx == 0 { +// if tx, err = st.Writer().Begin(); err != nil { +// b.Errorf("Failed to begin transaction: %v", err) +// } +// } +// if _, err = tx.Exec(e, src[ipkg.next()]...); err != nil { +// b.Errorf("Failed to execute: %v", err) +// } +// if (i+1)%benchmarkQueriesPerTx == 0 || i == b.N-1 { +// if err = tx.Commit(); err != nil { +// b.Errorf("Failed to commit transaction: %v", err) +// } +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +// BW is a background writer function passed to benchmark helper. +//type BW func( +// *testing.B, *sync.WaitGroup, <-chan struct{}, xi.Storage, keygen, string, [][]interface{}, +//) + +func busyWrite( + b *testing.B, + wg *sync.WaitGroup, sc <-chan struct{}, + st xi.Storage, kg keygen, e string, src [][]interface{}, +) { + defer wg.Done() + var err error + for { + select { + case <-sc: + return + default: + if _, err = st.Writer().Exec(e, src[kg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + } + } +} + +func busyWriteTx( + b *testing.B, + wg *sync.WaitGroup, sc <-chan struct{}, + st xi.Storage, kg keygen, e string, src [][]interface{}, +) { + defer wg.Done() + var ( + tx *sql.Tx + err error + ) + for i := 0; ; i++ { + // Begin + if i%benchmarkQueriesPerTx == 0 { + if tx, err = st.Writer().Begin(); err != nil { + b.Errorf("Failed to begin transaction: %v", err) + } + } + // Exec + select { + case <-sc: + // Also commit on exiting + if tx != nil { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + tx = nil + } + return + default: + // Exec + if _, err = tx.Exec(e, src[kg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + } + // Commit + if (i+1)%benchmarkQueriesPerTx == 0 { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + tx = nil + } + } +} + +func idleWriteTx( + b *testing.B, + wg *sync.WaitGroup, sc <-chan struct{}, + st xi.Storage, kg keygen, e string, src [][]interface{}, +) { + const writeIntlMS = 1 + var ( + tx *sql.Tx + err error + ticker = time.NewTicker(writeIntlMS * time.Millisecond) + ) + defer func() { + ticker.Stop() + wg.Done() + }() + for i := 0; ; i++ { + // Begin + if i%benchmarkQueriesPerTx == 0 { + if tx, err = st.Writer().Begin(); err != nil { + b.Errorf("Failed to begin transaction: %v", err) + } + } + // Exec + select { + case <-ticker.C: + // Exec + if _, err = tx.Exec(e, src[kg.next()]...); err != nil { + b.Errorf("Failed to execute: %v", err) + } + case <-sc: + // Also commit on exiting + if tx != nil { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + tx = nil + } + return + } + // Commit + if (i+1)%benchmarkQueriesPerTx == 0 { + if err = tx.Commit(); err != nil { + b.Errorf("Failed to commit transaction: %v", err) + } + tx = nil + } + } +} + +// GR is a get reader function passed to benchmark helper. +//type GR func(xi.Storage) *sql.DB +// +//func getDirtyReader(st xi.Storage) *sql.DB { return st.DirtyReader() } +//func getReader(st xi.Storage) *sql.DB { return st.Reader() } +// +//func benchmarkStorageSequentialReadWithBackgroundWriter(b *testing.B, getReader GR, write BW) { +// var ( +// st, q, dm, e, src = setupBenchmarkStorage(b) +// +// dest = dm() +// wg = &sync.WaitGroup{} +// sc = make(chan struct{}) +// +// err error +// ) +// +// // Start background writer +// wg.Add(1) +// go write(b, wg, sc, st, ipkg, e, src) +// +// for i := 0; i < b.N; i++ { +// if err = getReader(st).QueryRow( +// q, trkg.next(), +// ).Scan(dest...); err != nil && err != sql.ErrNoRows { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// +// // Exit background writer +// close(sc) +// wg.Wait() +// +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialDirtyReadWithBackgroundWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getDirtyReader, busyWrite) +//} +// +//func BenchmarkStoargeSequentialReadWithBackgroundWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getReader, busyWrite) +//} +// +//func BenchmarkStoargeSequentialDirtyReadWithBackgroundBusyTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getDirtyReader, busyWriteTx) +//} +// +//func BenchmarkStoargeSequentialReadWithBackgroundBusyTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getReader, busyWriteTx) +//} +// +//func BenchmarkStoargeSequentialDirtyReadWithBackgroundIdleTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getDirtyReader, idleWriteTx) +//} +// +//func BenchmarkStoargeSequentialReadWithBackgroundIdleTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadWithBackgroundWriter(b, getReader, idleWriteTx) +//} +// +//func benchmarkStorageSequentialReadTxWithBackgroundWriter(b *testing.B, getReader GR, write BW) { +// var ( +// st, q, dm, e, src = setupBenchmarkStorage(b) +// +// dest = dm() +// wg = &sync.WaitGroup{} +// sc = make(chan struct{}) +// +// err error +// tx *sql.Tx +// ) +// +// // Start background writer +// wg.Add(1) +// go write(b, wg, sc, st, ipkg, e, src) +// +// for i := 0; i < b.N; i++ { +// if i%benchmarkQueriesPerTx == 0 { +// if tx, err = getReader(st).Begin(); err != nil { +// b.Fatalf("Failed to begin transaction: %v", err) +// } +// } +// // Query in [n, 2n-1] key space +// if err = tx.QueryRow(q, nrkg.next()).Scan(dest...); err != nil && err != sql.ErrNoRows { +// b.Fatalf("Failed to query values: %v", err) +// } +// if (i+1)%benchmarkQueriesPerTx == 0 || i == b.N-1 { +// if err = tx.Rollback(); err != nil { +// b.Fatalf("Failed to close transaction: %v", err) +// } +// } +// } +// +// // Exit background writer +// close(sc) +// wg.Wait() +// +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialDirtyReadTxWithBackgroundWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getDirtyReader, busyWrite) +//} +// +//func BenchmarkStoargeSequentialReadTxWithBackgroundWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getReader, busyWrite) +//} +// +//func BenchmarkStoargeSequentialDirtyReadTxWithBackgroundBusyTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getDirtyReader, busyWriteTx) +//} +// +//func BenchmarkStoargeSequentialReadTxWithBackgroundBusyTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getReader, busyWriteTx) +//} +// +//func BenchmarkStoargeSequentialDirtyReadTxWithBackgroundIdleTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getDirtyReader, idleWriteTx) +//} +// +//func BenchmarkStoargeSequentialReadTxWithBackgroundIdleTxWriter(b *testing.B) { +// benchmarkStorageSequentialReadTxWithBackgroundWriter(b, getReader, idleWriteTx) +//} +// +//func BenchmarkStoargeSequentialMixDRW(b *testing.B) { +// var ( +// st, q, dm, e, src = setupBenchmarkStorage(b) +// dest = dm() +// err error +// ) +// for i := 0; i < b.N; i++ { +// if rand.Int()%2 == 0 { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } else { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeSequentialMixRW(b *testing.B) { +// var ( +// st, q, dm, e, src = setupBenchmarkStorage(b) +// dest = dm() +// err error +// ) +// for i := 0; i < b.N; i++ { +// if rand.Int()%2 == 0 { +// if err = st.Reader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } else { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// } +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStorageParallelDirtyRead(b *testing.B) { +// var ( +// st, q, dm, _, _ = setupBenchmarkStorage(b) +// ) +// b.RunParallel(func(pb *testing.PB) { +// var ( +// dest = dm() +// err error +// ) +// for pb.Next() { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStorageParallelRead(b *testing.B) { +// var ( +// st, q, dm, _, _ = setupBenchmarkStorage(b) +// ) +// b.RunParallel(func(pb *testing.PB) { +// var ( +// dest = dm() +// err error +// ) +// for pb.Next() { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStoargeParallelWrite(b *testing.B) { +// var st, _, _, e, src = setupBenchmarkStorage(b) +// b.RunParallel(func(pb *testing.PB) { +// var err error +// for pb.Next() { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStorageParallelMixDRW(b *testing.B) { +// var st, q, dm, e, src = setupBenchmarkStorage(b) +// b.RunParallel(func(pb *testing.PB) { +// var ( +// dest = dm() +// err error +// ) +// for pb.Next() { +// if rand.Int()%2 == 0 { +// if err = st.DirtyReader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } else { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} +// +//func BenchmarkStorageParallelMixRW(b *testing.B) { +// var st, q, dm, e, src = setupBenchmarkStorage(b) +// b.RunParallel(func(pb *testing.PB) { +// var ( +// dest = dm() +// err error +// ) +// for pb.Next() { +// if rand.Int()%2 == 0 { +// if err = st.Reader().QueryRow(q, rrkg.next()).Scan(dest...); err != nil { +// b.Fatalf("Failed to query values: %v", err) +// } +// } else { +// if _, err = st.Writer().Exec(e, src[ipkg.next()]...); err != nil { +// b.Fatalf("Failed to execute: %v", err) +// } +// } +// } +// }) +// teardownBenchmarkStorage(b, st) +//} diff --git a/xenomint/sqlite/xxx_test.go b/xenomint/sqlite/xxx_test.go new file mode 100644 index 000000000..3c211b975 --- /dev/null +++ b/xenomint/sqlite/xxx_test.go @@ -0,0 +1,74 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sqlite + +import ( + "io/ioutil" + "math/rand" + "os" + "syscall" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +var ( + testingDataDir string +) + +func setup() { + const minNoFile uint64 = 4096 + var ( + err error + lmt syscall.Rlimit + ) + + if testingDataDir, err = ioutil.TempDir("", "CovenantSQL"); err != nil { + panic(err) + } + + rand.Seed(time.Now().UnixNano()) + + if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + if lmt.Max < minNoFile { + panic("insufficient max RLIMIT_NOFILE") + } + lmt.Cur = lmt.Max + if err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) +} + +func teardown() { + if err := os.RemoveAll(testingDataDir); err != nil { + panic(err) + } +} + +func TestMain(m *testing.M) { + os.Exit(func() int { + setup() + defer teardown() + return m.Run() + }()) +} diff --git a/xenomint/state.go b/xenomint/state.go new file mode 100644 index 000000000..8599cf012 --- /dev/null +++ b/xenomint/state.go @@ -0,0 +1,635 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "database/sql" + "io" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + "github.com/CovenantSQL/sqlparser" + "github.com/pkg/errors" +) + +// State defines a xenomint state which is bound to a underlying storage. +type State struct { + sync.RWMutex + strg xi.Storage + pool *pool + closed bool + nodeID proto.NodeID + + // TODO(leventeliu): Reload savepoint from last block on chain initialization, and rollback + // any ongoing transaction on exit. + // + // unc is the uncommitted transaction. + unc *sql.Tx + origin uint64 // origin is the original savepoint of the current transaction + cmpoint uint64 // cmpoint is the last commit point of the current transaction + current uint64 // current is the current savepoint of the current transaction + hasSchemaChange uint32 // indicates schema change happens in this uncommitted transaction +} + +// NewState returns a new State bound to strg. +func NewState(nodeID proto.NodeID, strg xi.Storage) (s *State, err error) { + var t = &State{ + nodeID: nodeID, + strg: strg, + pool: newPool(), + } + if t.unc, err = t.strg.Writer().Begin(); err != nil { + return + } + t.setSavepoint() + s = t + return +} + +func (s *State) incSeq() { + s.current++ +} + +func (s *State) setNextTxID() { + s.origin = s.current + s.cmpoint = s.current +} + +func (s *State) setCommitPoint() { + s.cmpoint = s.current +} + +func (s *State) rollbackID(id uint64) { + s.current = id +} + +// InitTx sets the initial id of the current transaction. This method is not safe for concurrency +// and should only be called at initialization. +func (s *State) InitTx(id uint64) { + s.origin = id + s.cmpoint = id + s.current = id + s.setSavepoint() +} + +func (s *State) getID() uint64 { + return atomic.LoadUint64(&s.current) +} + +// Close commits any ongoing transaction if needed and closes the underlying storage. +func (s *State) Close(commit bool) (err error) { + if s.closed { + return + } + if s.unc != nil { + if commit { + if err = s.uncCommit(); err != nil { + return + } + } else { + // Only rollback to last commmit point + if err = s.rollback(); err != nil { + return + } + if err = s.uncCommit(); err != nil { + return + } + } + } + if err = s.strg.Close(); err != nil { + return + } + s.closed = true + return +} + +func convertQueryAndBuildArgs(pattern string, args []types.NamedArg) (containsDDL bool, p string, ifs []interface{}, err error) { + var ( + tokenizer = sqlparser.NewStringTokenizer(pattern) + stmt sqlparser.Statement + lastPos int + query string + queryParts []string + ) + + for { + stmt, err = sqlparser.ParseNext(tokenizer) + + if err != nil && err != io.EOF { + return + } + + if err == io.EOF { + err = nil + break + } + + query = pattern[lastPos : tokenizer.Position-1] + lastPos = tokenizer.Position + 1 + + // translate show statement + if showStmt, ok := stmt.(*sqlparser.Show); ok { + origQuery := query + + switch showStmt.Type { + case "table": + if showStmt.ShowCreate { + query = "SELECT sql FROM sqlite_master WHERE type = \"table\" AND tbl_name = \"" + + showStmt.OnTable.Name.String() + "\"" + } else { + query = "PRAGMA table_info(" + showStmt.OnTable.Name.String() + ")" + } + case "index": + query = "SELECT name FROM sqlite_master WHERE type = \"index\" AND tbl_name = \"" + + showStmt.OnTable.Name.String() + "\"" + case "tables": + query = "SELECT name FROM sqlite_master WHERE type = \"table\"" + } + + log.WithFields(log.Fields{ + "from": origQuery, + "to": query, + }).Debug("query translated") + } else if _, ok := stmt.(*sqlparser.DDL); ok { + containsDDL = true + } + + queryParts = append(queryParts, query) + } + + p = strings.Join(queryParts, "; ") + + ifs = make([]interface{}, len(args)) + for i, v := range args { + ifs[i] = sql.NamedArg{ + Name: v.Name, + Value: v.Value, + } + } + return +} + +func buildTypeNamesFromSQLColumnTypes(types []*sql.ColumnType) (names []string) { + names = make([]string, len(types)) + for i, v := range types { + names[i] = v.DatabaseTypeName() + } + return +} + +type sqlQuerier interface { + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +func readSingle( + qer sqlQuerier, q *types.Query) (names []string, types []string, data [][]interface{}, err error, +) { + var ( + rows *sql.Rows + cols []*sql.ColumnType + pattern string + args []interface{} + ) + + if _, pattern, args, err = convertQueryAndBuildArgs(q.Pattern, q.Args); err != nil { + return + } + if rows, err = qer.Query(pattern, args...); err != nil { + return + } + defer rows.Close() + // Fetch column names and types + if names, err = rows.Columns(); err != nil { + return + } + if cols, err = rows.ColumnTypes(); err != nil { + return + } + types = buildTypeNamesFromSQLColumnTypes(cols) + // Scan data row by row + data = make([][]interface{}, 0) + for rows.Next() { + var ( + row = make([]interface{}, len(cols)) + dest = make([]interface{}, len(cols)) + ) + for i := range row { + dest[i] = &row[i] + } + if err = rows.Scan(dest...); err != nil { + return + } + data = append(data, row) + } + return +} + +func buildRowsFromNativeData(data [][]interface{}) (rows []types.ResponseRow) { + rows = make([]types.ResponseRow, len(data)) + for i, v := range data { + rows[i].Values = v + } + return +} + +func (s *State) read(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { + var ( + ierr error + cnames, ctypes []string + data [][]interface{} + ) + // TODO(leventeliu): no need to run every read query here. + for i, v := range req.Payload.Queries { + if cnames, ctypes, data, ierr = readSingle(s.strg.DirtyReader(), &v); ierr != nil { + err = errors.Wrapf(ierr, "query at #%d failed", i) + // Add to failed pool list + s.pool.setFailed(req) + return + } + } + // Build query response + ref = &QueryTracker{Req: req} + resp = &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: req.Header, + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: uint64(len(data)), + LogOffset: s.getID(), + }, + }, + Payload: types.ResponsePayload{ + Columns: cnames, + DeclTypes: ctypes, + Rows: buildRowsFromNativeData(data), + }, + } + return +} + +func (s *State) readTx(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { + var ( + tx *sql.Tx + id uint64 + ierr error + cnames, ctypes []string + data [][]interface{} + querier sqlQuerier + ) + id = s.getID() + if atomic.LoadUint32(&s.hasSchemaChange) == 1 { + // lock transaction + s.Lock() + defer s.Unlock() + s.setSavepoint() + querier = s.unc + defer s.rollbackTo(id) + } else { + if tx, ierr = s.strg.DirtyReader().Begin(); ierr != nil { + err = errors.Wrap(ierr, "open tx failed") + return + } + querier = tx + defer tx.Rollback() + } + + for i, v := range req.Payload.Queries { + if cnames, ctypes, data, ierr = readSingle(querier, &v); ierr != nil { + err = errors.Wrapf(ierr, "query at #%d failed", i) + // Add to failed pool list + s.pool.setFailed(req) + return + } + } + // Build query response + ref = &QueryTracker{Req: req} + resp = &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: req.Header, + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: uint64(len(data)), + LogOffset: id, + }, + }, + Payload: types.ResponsePayload{ + Columns: cnames, + DeclTypes: ctypes, + Rows: buildRowsFromNativeData(data), + }, + } + return +} + +func (s *State) writeSingle(q *types.Query) (res sql.Result, err error) { + var ( + containsDDL bool + pattern string + args []interface{} + ) + + if containsDDL, pattern, args, err = convertQueryAndBuildArgs(q.Pattern, q.Args); err != nil { + return + } + if res, err = s.unc.Exec(pattern, args...); err == nil { + if containsDDL { + atomic.StoreUint32(&s.hasSchemaChange, 1) + } + s.incSeq() + } + return +} + +func (s *State) setSavepoint() (savepoint uint64) { + savepoint = s.getID() + s.unc.Exec("SAVEPOINT \"?\"", savepoint) + return +} + +func (s *State) rollbackTo(savepoint uint64) { + s.rollbackID(savepoint) + s.unc.Exec("ROLLBACK TO \"?\"", savepoint) +} + +func (s *State) write(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { + var ( + savepoint uint64 + query = &QueryTracker{Req: req} + totalAffectedRows int64 + curAffectedRows int64 + lastInsertID int64 + ) + + // TODO(leventeliu): savepoint is a sqlite-specified solution for nested transaction. + if err = func() (err error) { + var ierr error + s.Lock() + defer s.Unlock() + savepoint = s.getID() + for i, v := range req.Payload.Queries { + var res sql.Result + if res, ierr = s.writeSingle(&v); ierr != nil { + err = errors.Wrapf(ierr, "execute at #%d failed", i) + // Add to failed pool list + s.pool.setFailed(req) + s.rollbackTo(savepoint) + return + } + + curAffectedRows, _ = res.RowsAffected() + lastInsertID, _ = res.LastInsertId() + totalAffectedRows += curAffectedRows + } + s.setSavepoint() + s.pool.enqueue(savepoint, query) + return + }(); err != nil { + return + } + // Build query response + ref = query + resp = &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: req.Header, + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: 0, + LogOffset: savepoint, + AffectedRows: totalAffectedRows, + LastInsertID: lastInsertID, + }, + }, + } + return +} + +func (s *State) replay(req *types.Request, resp *types.Response) (err error) { + var ( + ierr error + savepoint uint64 + query = &QueryTracker{Req: req, Resp: resp} + ) + s.Lock() + defer s.Unlock() + savepoint = s.getID() + if resp.Header.ResponseHeader.LogOffset != savepoint { + err = errors.Wrapf( + ErrQueryConflict, + "local id %d vs replaying id %d", savepoint, resp.Header.ResponseHeader.LogOffset, + ) + return + } + for i, v := range req.Payload.Queries { + if _, ierr = s.writeSingle(&v); ierr != nil { + err = errors.Wrapf(ierr, "execute at #%d failed", i) + s.rollbackTo(savepoint) + return + } + } + s.setSavepoint() + s.pool.enqueue(savepoint, query) + return +} + +// ReplayBlock replays the queries from block. It also checks and skips some preceding pooled +// queries. +func (s *State) ReplayBlock(block *types.Block) (err error) { + var ( + ierr error + lastsp uint64 // Last savepoint + ) + s.Lock() + defer s.Unlock() + for i, q := range block.QueryTxs { + var query = &QueryTracker{Req: q.Request, Resp: &types.Response{Header: *q.Response}} + lastsp = s.getID() + if q.Response.ResponseHeader.LogOffset > lastsp { + err = ErrMissingParent + return + } + // Match and skip already pooled query + if q.Response.ResponseHeader.LogOffset < lastsp { + if !s.pool.match(q.Response.ResponseHeader.LogOffset, q.Request) { + err = ErrQueryConflict + return + } + continue + } + // Replay query + for j, v := range q.Request.Payload.Queries { + if q.Request.Header.QueryType == types.ReadQuery { + continue + } + if q.Request.Header.QueryType != types.WriteQuery { + err = errors.Wrapf(ErrInvalidRequest, "replay block at %d:%d", i, j) + s.rollbackTo(lastsp) + return + } + if _, ierr = s.writeSingle(&v); ierr != nil { + err = errors.Wrapf(ierr, "execute at %d:%d failed", i, j) + s.rollbackTo(lastsp) + return + } + } + s.setSavepoint() + s.pool.enqueue(lastsp, query) + } + // Remove duplicate failed queries from local pool + for _, r := range block.FailedReqs { + s.pool.removeFailed(r) + } + // Check if the current transaction is ok to commit + if s.pool.matchLast(lastsp) { + if err = s.uncCommit(); err != nil { + // FATAL ERROR + return + } + if s.unc, err = s.strg.Writer().Begin(); err != nil { + // FATAL ERROR + return + } + s.setNextTxID() + } else { + // Set commit point only, transaction is not actually committed. This commit point will be + // used on exiting. + s.setCommitPoint() + } + s.setSavepoint() + // Truncate pooled queries + s.pool.truncate(lastsp) + return +} + +func (s *State) commit() (err error) { + s.Lock() + defer s.Unlock() + if err = s.uncCommit(); err != nil { + return + } + if s.unc, err = s.strg.Writer().Begin(); err != nil { + return + } + s.setNextTxID() + s.setSavepoint() + _ = s.pool.queries + s.pool = newPool() + return +} + +// CommitEx commits the current transaction and returns all the pooled queries. +func (s *State) CommitEx() (failed []*types.Request, queries []*QueryTracker, err error) { + s.Lock() + defer s.Unlock() + if err = s.uncCommit(); err != nil { + // FATAL ERROR + return + } + if s.unc, err = s.strg.Writer().Begin(); err != nil { + // FATAL ERROR + return + } + s.setNextTxID() + s.setSavepoint() + // Return pooled items and reset + failed = s.pool.failedList() + queries = s.pool.queries + s.pool = newPool() + return +} + +func (s *State) uncCommit() (err error) { + if err = s.unc.Commit(); err != nil { + return + } + + // reset schema change flag + atomic.StoreUint32(&s.hasSchemaChange, 0) + + return +} + +func (s *State) rollback() (err error) { + s.Lock() + defer s.Unlock() + s.rollbackTo(s.cmpoint) + s.current = s.cmpoint + return +} + +func (s *State) getLocalTime() time.Time { + return time.Now().UTC() +} + +// Query does the query(ies) in req, pools the request and persists any change to +// the underlying storage. +func (s *State) Query(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { + switch req.Header.QueryType { + case types.ReadQuery: + return s.readTx(req) + case types.WriteQuery: + return s.write(req) + default: + err = ErrInvalidRequest + } + return +} + +// Replay replays a write log from other peer to replicate storage state. +func (s *State) Replay(req *types.Request, resp *types.Response) (err error) { + // NOTE(leventeliu): in the current implementation, failed requests are not tracked in remote + // nodes (while replaying via Replay calls). Because we don't want to actually replay read + // queries in all synchronized nodes, meanwhile, whether a request will fail or not + // remains unknown until we actually replay it -- a dead end here. + // So we just keep failed requests in local pool and report them in the next local block + // producing. + switch req.Header.QueryType { + case types.ReadQuery: + return + case types.WriteQuery: + return s.replay(req, resp) + default: + err = ErrInvalidRequest + } + return +} + +// Stat prints the statistic message of the State object. +func (s *State) Stat(id proto.DatabaseID) { + var ( + p = func() *pool { + s.RLock() + defer s.RUnlock() + return s.pool + }() + fc = atomic.LoadInt32(&p.failedRequestCount) + tc = atomic.LoadInt32(&p.trackerCount) + ) + log.WithFields(log.Fields{ + "database_id": id, + "pooled_fail_request_count": fc, + "pooled_query_tracker": tc, + }).Info("Xeno pool stats") +} diff --git a/xenomint/state_test.go b/xenomint/state_test.go new file mode 100644 index 000000000..783142a15 --- /dev/null +++ b/xenomint/state_test.go @@ -0,0 +1,521 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "database/sql" + "fmt" + "os" + "path" + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +func TestState(t *testing.T) { + Convey("Given a chain state object", t, func() { + var ( + id1 = proto.DatabaseID("db-x1") + fl1 = path.Join(testingDataDir, fmt.Sprint(t.Name(), "x1")) + fl2 = path.Join(testingDataDir, fmt.Sprint(t.Name(), "x2")) + st1, st2 *State + strg1, strg2 xi.Storage + err error + ) + nodeID := proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") + strg1, err = xs.NewSqlite(fmt.Sprint("file:", fl1)) + So(err, ShouldBeNil) + So(strg1, ShouldNotBeNil) + st1, err = NewState(nodeID, strg1) + So(err, ShouldBeNil) + So(st1, ShouldNotBeNil) + Reset(func() { + // Clean database file after each pass + err = st1.Close(true) + So(err, ShouldBeNil) + err = os.Remove(fl1) + So(err, ShouldBeNil) + err = os.Remove(fmt.Sprint(fl1, "-shm")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + err = os.Remove(fmt.Sprint(fl1, "-wal")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + }) + strg2, err = xs.NewSqlite(fmt.Sprint("file:", fl2)) + So(err, ShouldBeNil) + So(strg1, ShouldNotBeNil) + st2, err = NewState(nodeID, strg2) + So(err, ShouldBeNil) + So(st1, ShouldNotBeNil) + Reset(func() { + // Clean database file after each pass + err = st2.Close(true) + So(err, ShouldBeNil) + err = os.Remove(fl2) + So(err, ShouldBeNil) + err = os.Remove(fmt.Sprint(fl2, "-shm")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + err = os.Remove(fmt.Sprint(fl2, "-wal")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + }) + Convey("When storage is closed", func() { + err = st1.Close(false) + So(err, ShouldBeNil) + Convey("The storage should report error for any incoming query", func() { + var req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + }) + _, _, err = st1.Query(req) + So(err, ShouldNotBeNil) + err = errors.Cause(err) + So(err, ShouldNotBeNil) + So(err, ShouldEqual, sql.ErrTxDone) + }) + }) + Convey("The state will report error on read with uncommitted schema change", func() { + var ( + req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + }) + resp *types.Response + ) + _, resp, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT * FROM t1`), + })) + // any schema change query will trigger performance degradation mode in current block + So(err, ShouldBeNil) + }) + Convey("When a basic KV table is created", func() { + var ( + values = [][]interface{}{ + {int64(1), []byte("v1")}, + {int64(2), []byte("v2")}, + {int64(3), []byte("v3")}, + {int64(4), []byte("v4")}, + } + req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + }) + resp *types.Response + ) + _, resp, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + err = st1.commit() + So(err, ShouldBeNil) + _, resp, err = st2.Query(req) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + err = st2.commit() + Convey("The state should not change after attempted writing in read query", func() { + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, 1, "v1"), + buildQuery(`SELECT v FROM t1 WHERE k=?`, 1), + })) + // The use of Query instead of Exec won't produce an "attempt to write" error + // like Exec, but it should still keep it readonly -- which means writes will + // be ignored in this case. + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 0) + }) + Convey("The state should report invalid request with unknown query type", func() { + req = buildRequest(types.QueryType(0xff), []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }) + _, resp, err = st1.Query(req) + So(err, ShouldEqual, ErrInvalidRequest) + So(resp, ShouldBeNil) + err = st1.Replay(req, nil) + So(err, ShouldEqual, ErrInvalidRequest) + }) + Convey("The state should report error on malformed queries", func() { + _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`XXXXXX INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + err = st1.Replay(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`XXXXXX INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }), &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + LogOffset: st1.getID(), + }, + }, + }) + So(err, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t2 (k, v) VALUES (?, ?)`, values[0]...), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + err = st1.Replay(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t2 (k, v) VALUES (?, ?)`, values[0]...), + }), &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + LogOffset: st1.getID(), + }, + }, + }) + So(err, ShouldNotBeNil) + st1.Stat(id1) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`XXXXXX v FROM t1`), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t2`), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + _, resp, err = st1.read(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t2`), + })) + So(err, ShouldNotBeNil) + So(resp, ShouldBeNil) + st1.Stat(id1) + }) + Convey("The state should work properly with reading/writing queries", func() { + _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + })) + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 0) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, values[0][0]), + })) + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 1) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"v"}, + DeclTypes: []string{"TEXT"}, + Rows: []types.ResponseRow{{Values: values[0][1:]}}, + }) + st1.Stat(id1) + + _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[1]...), + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?); +INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), + })) + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 0) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1`), + })) + So(err, ShouldBeNil) + So(resp.Header.RowCount, ShouldEqual, 4) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"v"}, + DeclTypes: []string{"TEXT"}, + Rows: []types.ResponseRow{ + {Values: values[0][1:]}, + {Values: values[1][1:]}, + {Values: values[2][1:]}, + {Values: values[3][1:]}, + }, + }) + st1.Stat(id1) + + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT * FROM t1`), + })) + So(err, ShouldBeNil) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"k", "v"}, + DeclTypes: []string{"INT", "TEXT"}, + Rows: []types.ResponseRow{ + {Values: values[0][:]}, + {Values: values[1][:]}, + {Values: values[2][:]}, + {Values: values[3][:]}, + }, + }) + st1.Stat(id1) + + // Test show statements + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SHOW TABLE t1`), + })) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SHOW CREATE TABLE t1`), + })) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SHOW INDEX FROM TABLE t1`), + })) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SHOW TABLES`), + })) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + st1.Stat(id1) + + // Also test a non-transaction read implementation + _, resp, err = st1.read(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT * FROM t1`), + })) + So(err, ShouldBeNil) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"k", "v"}, + DeclTypes: []string{"INT", "TEXT"}, + Rows: []types.ResponseRow{ + {Values: values[0][:]}, + {Values: values[1][:]}, + {Values: values[2][:]}, + {Values: values[3][:]}, + }, + }) + st1.Stat(id1) + }) + Convey("The state should skip read query while replaying", func() { + err = st1.Replay(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT * FROM t1`), + }), nil) + So(err, ShouldBeNil) + }) + Convey("The state should report conflict state while replaying bad request", func() { + err = st1.Replay(buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }), &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + LogOffset: uint64(0xff), + }, + }, + }) + err = errors.Cause(err) + So(err, ShouldEqual, ErrQueryConflict) + }) + Convey("The state should be reproducible in another instance", func() { + var ( + qt *QueryTracker + reqs = []*types.Request{ + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }), + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[1]...), + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?); +INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), + }), + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`DELETE FROM t1 WHERE k=?`, values[2][0]), + }), + } + ) + for i := range reqs { + qt, resp, err = st1.Query(reqs[i]) + So(err, ShouldBeNil) + So(qt, ShouldNotBeNil) + So(resp, ShouldNotBeNil) + qt.UpdateResp(resp) + // Replay to st2 + err = st2.Replay(reqs[i], resp) + So(err, ShouldBeNil) + } + // Should be in same state + for i := range values { + var resp1, resp2 *types.Response + req = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), + }) + _, resp1, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp1, ShouldNotBeNil) + _, resp2, err = st2.Query(req) + So(err, ShouldBeNil) + So(resp2, ShouldNotBeNil) + So(resp1.Payload, ShouldResemble, resp2.Payload) + } + }) + Convey("When queries are committed to blocks on state instance #1", func() { + var ( + qt *QueryTracker + reqs = []*types.Request{ + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), + }), + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[1]...), + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?); +INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), + }), + buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`DELETE FROM t1 WHERE k=?`, values[2][0]), + }), + } + + cmtpos = 0 + cmtps = []int{1, len(reqs) - 1} + blocks = make([]*types.Block, len(cmtps)) + ) + for i := range reqs { + var resp *types.Response + qt, resp, err = st1.Query(reqs[i]) + So(err, ShouldBeNil) + So(qt, ShouldNotBeNil) + So(resp, ShouldNotBeNil) + qt.UpdateResp(resp) + // Commit block if matches the next commit point + if cmtpos < len(cmtps) && i == cmtps[cmtpos] { + var qts []*QueryTracker + _, qts, err = st1.CommitEx() + So(err, ShouldBeNil) + So(qts, ShouldNotBeNil) + blocks[cmtpos] = &types.Block{ + QueryTxs: make([]*types.QueryAsTx, len(qts)), + } + for i, v := range qts { + blocks[cmtpos].QueryTxs[i] = &types.QueryAsTx{ + Request: v.Req, + Response: &v.Resp.Header, + } + } + cmtpos++ + } + } + Convey( + "The state should report missing parent while replaying later block first", + func() { + err = st2.ReplayBlock(blocks[len(blocks)-1]) + So(err, ShouldEqual, ErrMissingParent) + }, + ) + Convey( + "The state should report conflict error while replaying modified query", + func() { + // Replay by request to st2 first + for _, v := range blocks { + for _, w := range v.QueryTxs { + err = st2.Replay(w.Request, &types.Response{ + Header: *w.Response, + }) + So(err, ShouldBeNil) + } + } + // Try to replay modified block #0 + var blockx = &types.Block{ + QueryTxs: []*types.QueryAsTx{ + &types.QueryAsTx{ + Request: &types.Request{ + Header: types.SignedRequestHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: [32]byte{ + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 1, + }, + }, + }, + }, + Response: &types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + LogOffset: blocks[0].QueryTxs[0].Response.LogOffset, + }, + }, + }, + }, + } + blockx.QueryTxs[0].Request.Header.DataHash = hash.Hash{0x0, 0x0, 0x0, 0x1} + err = st2.ReplayBlock(blockx) + So(err, ShouldEqual, ErrQueryConflict) + }, + ) + Convey( + "The state should be reproducible with block replaying in empty instance #2", + func() { + // Block replaying + for i := range blocks { + err = st2.ReplayBlock(blocks[i]) + So(err, ShouldBeNil) + } + // Should be in same state + for i := range values { + var resp1, resp2 *types.Response + req = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), + }) + _, resp1, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp1, ShouldNotBeNil) + _, resp2, err = st2.Query(req) + So(err, ShouldBeNil) + So(resp2, ShouldNotBeNil) + So(resp1.Payload, ShouldResemble, resp2.Payload) + } + }, + ) + Convey( + "The state should be reproducible with block replaying in synchronized"+ + " instance #2", + func() { + // Replay by request to st2 first + for _, v := range blocks { + for _, w := range v.QueryTxs { + err = st2.Replay(w.Request, &types.Response{ + Header: *w.Response, + }) + So(err, ShouldBeNil) + } + } + // Block replaying + for i := range blocks { + err = st2.ReplayBlock(blocks[i]) + So(err, ShouldBeNil) + } + // Should be in same state + for i := range values { + var resp1, resp2 *types.Response + req = buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), + }) + _, resp1, err = st1.Query(req) + So(err, ShouldBeNil) + So(resp1, ShouldNotBeNil) + _, resp2, err = st2.Query(req) + So(err, ShouldBeNil) + So(resp2, ShouldNotBeNil) + So(resp1.Payload, ShouldResemble, resp2.Payload) + } + }, + ) + }) + }) + }) +} diff --git a/xenomint/types/block.go b/xenomint/types/block.go new file mode 100644 index 000000000..2621786ab --- /dev/null +++ b/xenomint/types/block.go @@ -0,0 +1,102 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/merkle" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" +) + +//go:generate hsp + +// BlockHeader defines a block header. +type BlockHeader struct { + Version int32 + Producer proto.NodeID + GenesisHash hash.Hash + ParentHash hash.Hash + MerkleRoot hash.Hash + Timestamp time.Time +} + +// SignedBlockHeader defines a block along with its hasher, signer and verifier. +type SignedBlockHeader struct { + BlockHeader + DefaultHashSignVerifierImpl +} + +// Sign signs the block header. +func (h *SignedBlockHeader) Sign(signer *asymmetric.PrivateKey) error { + return h.DefaultHashSignVerifierImpl.Sign(&h.BlockHeader, signer) +} + +// Verify verifies the block header. +func (h *SignedBlockHeader) Verify() error { + return h.DefaultHashSignVerifierImpl.Verify(&h.BlockHeader) +} + +// Block defines a block including a signed block header and its query list. +type Block struct { + SignedBlockHeader + ReadQueries []*types.Ack + WriteQueries []*types.Ack +} + +// Sign signs the block. +func (b *Block) Sign(signer *asymmetric.PrivateKey) (err error) { + // Update header fields: generate merkle root from queries + var hashes []*hash.Hash + for _, v := range b.ReadQueries { + h := v.Header.Hash() + hashes = append(hashes, &h) + } + for _, v := range b.WriteQueries { + h := v.Header.Hash() + hashes = append(hashes, &h) + } + if err = b.MerkleRoot.SetBytes(merkle.NewMerkle(hashes).GetRoot()[:]); err != nil { + return + } + // Sign block header + return b.SignedBlockHeader.Sign(signer) +} + +// Verify verifies the block. +func (b *Block) Verify() error { + // Verify header fields: compare merkle root from queries + var hashes []*hash.Hash + for _, v := range b.ReadQueries { + h := v.Header.Hash() + hashes = append(hashes, &h) + } + for _, v := range b.WriteQueries { + h := v.Header.Hash() + hashes = append(hashes, &h) + } + if mroot := merkle.NewMerkle(hashes).GetRoot(); !mroot.IsEqual( + &b.SignedBlockHeader.MerkleRoot, + ) { + return ErrMerkleRootNotMatch + } + // Verify block header signature + return b.SignedBlockHeader.Verify() +} diff --git a/xenomint/types/block_gen.go b/xenomint/types/block_gen.go new file mode 100644 index 000000000..4c19a8bf2 --- /dev/null +++ b/xenomint/types/block_gen.go @@ -0,0 +1,143 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + hsp "github.com/CovenantSQL/HashStablePack/marshalhash" +) + +// MarshalHash marshals for hash +func (z *Block) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 3 + // map header, size 2 + o = append(o, 0x83, 0x83, 0x82, 0x82) + if oTemp, err := z.SignedBlockHeader.BlockHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.SignedBlockHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.ReadQueries))) + for za0001 := range z.ReadQueries { + if z.ReadQueries[za0001] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.ReadQueries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.WriteQueries))) + for za0002 := range z.WriteQueries { + if z.WriteQueries[za0002] == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.WriteQueries[za0002].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *Block) Msgsize() (s int) { + s = 1 + 18 + 1 + 12 + z.SignedBlockHeader.BlockHeader.Msgsize() + 28 + z.SignedBlockHeader.DefaultHashSignVerifierImpl.Msgsize() + 12 + hsp.ArrayHeaderSize + for za0001 := range z.ReadQueries { + if z.ReadQueries[za0001] == nil { + s += hsp.NilSize + } else { + s += z.ReadQueries[za0001].Msgsize() + } + } + s += 13 + hsp.ArrayHeaderSize + for za0002 := range z.WriteQueries { + if z.WriteQueries[za0002] == nil { + s += hsp.NilSize + } else { + s += z.WriteQueries[za0002].Msgsize() + } + } + return +} + +// MarshalHash marshals for hash +func (z *BlockHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 6 + o = append(o, 0x86, 0x86) + if oTemp, err := z.GenesisHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + if oTemp, err := z.ParentHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + o = hsp.AppendInt32(o, z.Version) + o = append(o, 0x86) + if oTemp, err := z.Producer.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x86) + o = hsp.AppendTime(o, z.Timestamp) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *BlockHeader) Msgsize() (s int) { + s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.ParentHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + return +} + +// MarshalHash marshals for hash +func (z *SignedBlockHeader) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.BlockHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SignedBlockHeader) Msgsize() (s int) { + s = 1 + 12 + z.BlockHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + return +} diff --git a/xenomint/types/block_gen_test.go b/xenomint/types/block_gen_test.go new file mode 100644 index 000000000..9d948edad --- /dev/null +++ b/xenomint/types/block_gen_test.go @@ -0,0 +1,121 @@ +package types + +// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "testing" +) + +func TestMarshalHashBlock(t *testing.T) { + v := Block{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashBlock(b *testing.B) { + v := Block{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgBlock(b *testing.B) { + v := Block{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashBlockHeader(t *testing.T) { + v := BlockHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashBlockHeader(b *testing.B) { + v := BlockHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgBlockHeader(b *testing.B) { + v := BlockHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + +func TestMarshalHashSignedBlockHeader(t *testing.T) { + v := SignedBlockHeader{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashSignedBlockHeader(b *testing.B) { + v := SignedBlockHeader{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgSignedBlockHeader(b *testing.B) { + v := SignedBlockHeader{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/xenomint/types/block_test.go b/xenomint/types/block_test.go new file mode 100644 index 000000000..e971bfffe --- /dev/null +++ b/xenomint/types/block_test.go @@ -0,0 +1,87 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" + "github.com/CovenantSQL/CovenantSQL/types" + . "github.com/smartystreets/goconvey/convey" +) + +func TestBlock(t *testing.T) { + Convey("Given a block and a pair of keys", t, func() { + var ( + block = &Block{ + SignedBlockHeader: SignedBlockHeader{ + BlockHeader: BlockHeader{}, + }, + ReadQueries: []*types.Ack{ + { + Header: types.SignedAckHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x0, 0x0, 0x0, 0x1}, + }, + }, + }, + }, + WriteQueries: []*types.Ack{ + { + Header: types.SignedAckHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x0, 0x0, 0x0, 0x2}, + }, + }, + }, + }, + } + priv, _, err = asymmetric.GenSecp256k1KeyPair() + ) + So(err, ShouldBeNil) + So(priv, ShouldNotBeNil) + Convey("When the block is signed by the key pair", func() { + err = block.Sign(priv) + So(err, ShouldBeNil) + Convey("The block should be verifiable", func() { + err = block.Verify() + So(err, ShouldBeNil) + }) + Convey("The object should have data hash", func() { + var enc, err = block.BlockHeader.MarshalHash() + So(err, ShouldBeNil) + So(enc, ShouldNotBeNil) + So(block.SignedBlockHeader.Hash(), ShouldEqual, hash.THashH(enc)) + }) + Convey("When the queries is modified", func() { + block.ReadQueries = append(block.ReadQueries, &types.Ack{ + Header: types.SignedAckHeader{ + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x0, 0x0, 0x0, 0x3}, + }, + }, + }) + Convey("The verifier should return merkle root not match error", func() { + err = block.Verify() + So(err, ShouldEqual, ErrMerkleRootNotMatch) + }) + }) + }) + }) +} diff --git a/blockproducer/types/common.go b/xenomint/types/common.go similarity index 81% rename from blockproducer/types/common.go rename to xenomint/types/common.go index 024cc9646..cd4f48a1f 100644 --- a/blockproducer/types/common.go +++ b/xenomint/types/common.go @@ -21,20 +21,24 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" ) -type marshalHasher interface { - MarshalHash() ([]byte, error) -} +//go:generate hsp // DefaultHashSignVerifierImpl defines a default implementation of hashSignVerifier. type DefaultHashSignVerifierImpl struct { - Hash hash.Hash + DataHash hash.Hash Signee *asymmetric.PublicKey Signature *asymmetric.Signature } -// GetHash implements hashSignVerifier.GetHash. -func (i *DefaultHashSignVerifierImpl) GetHash() hash.Hash { - return i.Hash +// marshalHasher is the interface implemented by an object that can be stably +// marshalled and hashed. +type marshalHasher interface { + MarshalHash() ([]byte, error) +} + +// Hash implements hashSignVerifier.Hash. +func (i *DefaultHashSignVerifierImpl) Hash() hash.Hash { + return i.DataHash } // Sign implements hashSignVerifier.Sign. @@ -49,7 +53,7 @@ func (i *DefaultHashSignVerifierImpl) Sign( if i.Signature, err = signer.Sign(h[:]); err != nil { return } - i.Hash = h + i.DataHash = h i.Signee = signer.PubKey() return } @@ -61,12 +65,12 @@ func (i *DefaultHashSignVerifierImpl) Verify(obj marshalHasher) (err error) { return } var h = hash.THashH(enc) - if !i.Hash.IsEqual(&h) { - err = ErrSignVerification + if !i.DataHash.IsEqual(&h) { + err = ErrHashValueNotMatch return } if !i.Signature.Verify(h[:], i.Signee) { - err = ErrSignVerification + err = ErrSignatureNotMatch return } return diff --git a/blockproducer/types/common_gen.go b/xenomint/types/common_gen.go similarity index 93% rename from blockproducer/types/common_gen.go rename to xenomint/types/common_gen.go index 9f8cefc8a..8fdb3ee08 100644 --- a/blockproducer/types/common_gen.go +++ b/xenomint/types/common_gen.go @@ -32,7 +32,7 @@ func (z *DefaultHashSignVerifierImpl) MarshalHash() (o []byte, err error) { } } o = append(o, 0x83) - if oTemp, err := z.Hash.MarshalHash(); err != nil { + if oTemp, err := z.DataHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -54,6 +54,6 @@ func (z *DefaultHashSignVerifierImpl) Msgsize() (s int) { } else { s += z.Signature.Msgsize() } - s += 5 + z.Hash.Msgsize() + s += 9 + z.DataHash.Msgsize() return } diff --git a/blockproducer/types/common_gen_test.go b/xenomint/types/common_gen_test.go similarity index 100% rename from blockproducer/types/common_gen_test.go rename to xenomint/types/common_gen_test.go diff --git a/xenomint/types/common_test.go b/xenomint/types/common_test.go new file mode 100644 index 000000000..76f6c43b7 --- /dev/null +++ b/xenomint/types/common_test.go @@ -0,0 +1,100 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "math/big" + "testing" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + . "github.com/smartystreets/goconvey/convey" +) + +var ( + dummyHash = []byte{ + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + } +) + +type DummyHeader struct{} + +func (h *DummyHeader) MarshalHash() ([]byte, error) { + return dummyHash, nil +} + +type DummyObject struct { + DummyHeader + DefaultHashSignVerifierImpl +} + +func (o *DummyObject) Sign(signer *asymmetric.PrivateKey) error { + return o.DefaultHashSignVerifierImpl.Sign(&o.DummyHeader, signer) +} + +func (o *DummyObject) Verify() error { + return o.DefaultHashSignVerifierImpl.Verify(&o.DummyHeader) +} + +func TestDefaultHashSignVerifierImpl(t *testing.T) { + Convey("Given a dummy object and a pair of keys", t, func() { + var ( + obj = &DummyObject{} + priv, _, err = asymmetric.GenSecp256k1KeyPair() + ) + So(err, ShouldBeNil) + So(priv, ShouldNotBeNil) + Convey("When the object is signed by the key pair", func() { + err = obj.Sign(priv) + So(err, ShouldBeNil) + Convey("The object should be verifiable", func() { + err = obj.Verify() + So(err, ShouldBeNil) + }) + Convey("The object should have data hash", func() { + So(obj.Hash(), ShouldEqual, hash.THashH(dummyHash)) + }) + Convey("When the hash is modified", func() { + obj.DefaultHashSignVerifierImpl.DataHash = hash.Hash{0x0, 0x0, 0x0, 0x1} + Convey("The verifier should return hash value not match error", func() { + err = obj.Verify() + So(err, ShouldEqual, ErrHashValueNotMatch) + }) + }) + Convey("When the signee is modified", func() { + var _, pub, err = asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + obj.DefaultHashSignVerifierImpl.Signee = pub + Convey("The verifier should return signature not match error", func() { + err = obj.Verify() + So(err, ShouldEqual, ErrSignatureNotMatch) + }) + }) + Convey("When the signature is modified", func() { + var val = obj.DefaultHashSignVerifierImpl.Signature.R + val.Add(val, big.NewInt(1)) + Convey("The verifier should return signature not match error", func() { + err = obj.Verify() + So(err, ShouldEqual, ErrSignatureNotMatch) + }) + }) + }) + }) +} diff --git a/worker/types/doc.go b/xenomint/types/doc.go similarity index 92% rename from worker/types/doc.go rename to xenomint/types/doc.go index cf6b420cc..28c02e239 100644 --- a/worker/types/doc.go +++ b/xenomint/types/doc.go @@ -14,7 +14,4 @@ * limitations under the License. */ -/* -Package types defines miner node export types. -*/ package types diff --git a/xenomint/types/errors.go b/xenomint/types/errors.go new file mode 100644 index 000000000..10bb609f8 --- /dev/null +++ b/xenomint/types/errors.go @@ -0,0 +1,30 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "errors" +) + +var ( + // ErrMerkleRootNotMatch indicates the merkle root not match error from verifier. + ErrMerkleRootNotMatch = errors.New("merkle root not match") + // ErrHashValueNotMatch indicates the hash value not match error from verifier. + ErrHashValueNotMatch = errors.New("hash value not match") + // ErrSignatureNotMatch indicates the signature not match error from verifier. + ErrSignatureNotMatch = errors.New("signature not match") +) diff --git a/xenomint/types/xxx_test.go b/xenomint/types/xxx_test.go new file mode 100644 index 000000000..04c291621 --- /dev/null +++ b/xenomint/types/xxx_test.go @@ -0,0 +1,43 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "math/rand" + "os" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func setup() { + rand.Seed(time.Now().UnixNano()) + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) +} + +func teardown() { +} + +func TestMain(m *testing.M) { + os.Exit(func() int { + setup() + defer teardown() + return m.Run() + }()) +} diff --git a/xenomint/xxx_test.go b/xenomint/xxx_test.go new file mode 100644 index 000000000..aabe8fe80 --- /dev/null +++ b/xenomint/xxx_test.go @@ -0,0 +1,221 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xenomint + +import ( + "io/ioutil" + "math/rand" + "os" + "path" + //"runtime/trace" + "sync" + "syscall" + "testing" + "time" + + ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + pc "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +const ( + benchmarkQueriesPerBlock = 100 + + benchmarkRPCName = "BENCH" + benchmarkDatabaseID = "0x0" + + benchmarkVNum = 3 + benchmarkVLen = 333 + // benchmarkKeySpace defines the key space for benchmarking. + // + // We will have `benchmarkKeySpace` preserved records in the generated testing table and + // another `benchmarkKeySpace` constructed incoming records returned from the setup function. + benchmarkKeySpace = 100000 +) + +var ( + testingDataDir string + testingTraceFile *os.File + testingPrivateKeyFile string + testingPublicKeyStoreFile string + testingNonceDifficulty int + + testingPrivateKey *ca.PrivateKey + testingPublicKey *ca.PublicKey + + testingMasterKey = []byte(`?08Rl%WUih4V0H+c`) +) + +func buildQuery(query string, args ...interface{}) types.Query { + var nargs = make([]types.NamedArg, len(args)) + for i := range args { + nargs[i] = types.NamedArg{ + Name: "", + Value: args[i], + } + } + return types.Query{ + Pattern: query, + Args: nargs, + } +} + +func buildRequest(qt types.QueryType, qs []types.Query) *types.Request { + var ( + id proto.NodeID + err error + ) + if id, err = kms.GetLocalNodeID(); err != nil { + id = proto.NodeID("00000000000000000000000000000000") + } + return &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + NodeID: id, + Timestamp: time.Now().UTC(), + QueryType: qt, + }, + }, + Payload: types.RequestPayload{Queries: qs}, + } +} + +func concat(args [][]interface{}) (ret []interface{}) { + var ( + tlen int + ) + for _, v := range args { + tlen += len(v) + } + ret = make([]interface{}, 0, tlen) + for _, v := range args { + ret = append(ret, v...) + } + return +} + +func createNodesWithPublicKey( + pub *ca.PublicKey, diff int, num int) (nis []proto.Node, err error, +) { + var ( + nic = make(chan pc.NonceInfo) + block = pc.MiningBlock{Data: pub.Serialize(), NonceChan: nic, Stop: nil} + miner = pc.NewCPUMiner(nil) + wg = &sync.WaitGroup{} + + next pc.Uint256 + ni pc.NonceInfo + ) + + defer func() { + wg.Wait() + close(nic) + }() + + nis = make([]proto.Node, num) + for i := range nis { + wg.Add(1) + go func() { + defer wg.Done() + miner.ComputeBlockNonce(block, next, diff) + }() + ni = <-nic + nis[i] = proto.Node{ + ID: proto.NodeID(ni.Hash.String()), + Nonce: ni.Nonce, + PublicKey: pub, + } + next = ni.Nonce + next.Inc() + } + + return +} + +func setup() { + const minNoFile uint64 = 4096 + var ( + err error + lmt syscall.Rlimit + ) + + if testingDataDir, err = ioutil.TempDir("", "CovenantSQL"); err != nil { + panic(err) + } + + rand.Seed(time.Now().UnixNano()) + + // Set NOFILE limit + if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + if lmt.Max < minNoFile { + panic("insufficient max RLIMIT_NOFILE") + } + lmt.Cur = lmt.Max + if err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + + // Initialze kms + testingNonceDifficulty = 2 + testingPrivateKeyFile = path.Join(testingDataDir, "private.key") + testingPublicKeyStoreFile = path.Join(testingDataDir, "public.keystore") + if testingPrivateKey, testingPublicKey, err = ca.GenSecp256k1KeyPair(); err != nil { + panic(err) + } + kms.Unittest = true + kms.SetLocalKeyPair(testingPrivateKey, testingPublicKey) + if err = kms.SavePrivateKey( + testingPrivateKeyFile, testingPrivateKey, testingMasterKey, + ); err != nil { + panic(err) + } + + // Setup runtime trace for testing + //if testingTraceFile, err = ioutil.TempFile("", "CovenantSQL.trace."); err != nil { + // panic(err) + //} + //if err = trace.Start(testingTraceFile); err != nil { + // panic(err) + //} + + log.SetOutput(os.Stdout) + log.SetLevel(log.DebugLevel) +} + +func teardown() { + //trace.Stop() + var err error + //if err = testingTraceFile.Close(); err != nil { + // panic(err) + //} + if err = os.RemoveAll(testingDataDir); err != nil { + panic(err) + } +} + +func TestMain(m *testing.M) { + os.Exit(func() int { + setup() + defer teardown() + return m.Run() + }()) +}